Merge pull request #19234 from jdoliner/patch-1

Remove Pachyderm storage driver.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a12667d..78fba74 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -405,7 +405,7 @@
 + Docker daemon has full IPv6 support
 + The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools
 + The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted
-+ Container total memory usage can be limited for `docker run` using the `—memory-swap` flag
++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag
 * Major stability improvements for devicemapper storage driver
 * Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted
 * Better integration with host system: per-container iptable rules are moved to the DOCKER chain
diff --git a/Dockerfile b/Dockerfile
index 1b29d58..d498eb6 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -23,16 +23,15 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM ubuntu:14.04
-MAINTAINER Tianon Gravi <admwiggin@gmail.com> (@tianon)
+FROM ubuntu:trusty
 
 # add zfs ppa
-RUN	apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
-RUN	echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
+RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61
+RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list
 
 # add llvm repo
-RUN	apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 15CF4D18AF4F7421
-RUN	echo deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main > /etc/apt/sources.list.d/llvm.list
+RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 6084F3CF814B57C1CF12EFD515CF4D18AF4F7421
+RUN echo deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main > /etc/apt/sources.list.d/llvm.list
 
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
@@ -72,19 +71,28 @@
 	&& ln -snf /usr/bin/clang++-3.8 /usr/local/bin/clang++
 
 # Get lvm2 source for compiling statically
-RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2
+ENV LVM2_VERSION 2.02.103
+RUN mkdir -p /usr/local/lvm2 \
+	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
+		| tar -xzC /usr/local/lvm2 --strip-components=1
 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
 
 # Compile and install lvm2
 RUN cd /usr/local/lvm2 \
-	&& ./configure --enable-static_link \
+	&& ./configure \
+		--build="$(gcc -print-multiarch)" \
+		--enable-static_link \
 	&& make device-mapper \
 	&& make install_device-mapper
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
 # Install Go
-ENV GO_VERSION 1.5.2
-RUN curl -sSL  "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar -v -C /usr/local -xz
+# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
+#            will need updating, to avoid errors. Ping #docker-maintainers on IRC 
+#            with a heads-up.
+ENV GO_VERSION 1.5.3
+RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \
+	| tar -xzC /usr/local
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
 ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
 
@@ -128,32 +136,39 @@
 # install seccomp
 # this can be changed to the ubuntu package libseccomp-dev if dockerinit is removed,
 # we need libseccomp.a (which the package does not provide) for dockerinit
-ENV SECCOMP_VERSION v2.2.3
+ENV SECCOMP_VERSION 2.2.3
 RUN set -x \
-	&& export SECCOMP_PATH=$(mktemp -d) \
-	&& git clone https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \
+	&& export SECCOMP_PATH="$(mktemp -d)" \
+	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
+		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
 	&& ( \
 		cd "$SECCOMP_PATH" \
-		&& git checkout "$SECCOMP_VERSION" \
-		&& ./autogen.sh \
-		&& ./configure --prefix=/usr \
+		&& ./configure --prefix=/usr/local \
 		&& make \
 		&& make install \
+		&& ldconfig \
 	) \
 	&& rm -rf "$SECCOMP_PATH"
 
-# Install registry
-ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
+# Install two versions of the registry. The first is an older version that
+# only supports schema1 manifests. The second is a newer version that supports
+# both. This allows integration-cli tests to cover push/pull with both schema1
+# and schema2 manifests.
+ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
+ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
 	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
 	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
 		go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
+	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
+	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
+		go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
 	&& rm -rf "$GOPATH"
 
 # Install notary server
-ENV NOTARY_VERSION docker-v1.10-2
+ENV NOTARY_VERSION docker-v1.10-3
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
@@ -165,7 +180,7 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 57512760c83fbe41302891aa51e34a86f4db74de
+ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
@@ -198,7 +213,7 @@
 # Get useful and necessary Hub images so we can "docker load" locally instead of pulling
 COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
 RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
-	busybox:latest@sha256:eb3c0d4680f9213ee5f348ea6d39489a1f85a318a2ae09e012c426f78252a6d2 \
+	busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \
 	debian:jessie@sha256:24a900d1671b269d6640b4224e7b63801880d8e3cb2bcbfaa10a5dddcf4469ed \
 	hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7
 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
@@ -206,8 +221,8 @@
 # Download man page generator
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
-	&& git clone -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
+	&& git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
+	&& git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
 	&& go get -v -d github.com/cpuguy83/go-md2man \
 	&& go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \
 	&& rm -rf "$GOPATH"
@@ -222,12 +237,12 @@
 	&& rm -rf "$GOPATH"
 
 # Build/install the tool for embedding resources in Windows binaries
-ENV RSRC_COMMIT e48dbf1b7fc464a9e85fcec450dddf80816b76e0
+ENV RSRC_VERSION v2
 RUN set -x \
-	&& git clone https://github.com/akavel/rsrc.git /go/src/github.com/akavel/rsrc \
-	&& cd /go/src/github.com/akavel/rsrc \
-	&& git checkout -q $RSRC_COMMIT \
-	&& go install -v
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \
+	&& go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \
+	&& rm -rf "$GOPATH"
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
index 95a7201..e0203b6 100644
--- a/Dockerfile.armhf
+++ b/Dockerfile.armhf
@@ -23,8 +23,7 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM ioft/armhf-ubuntu:14.04
-MAINTAINER Govinda Fichtner <govinda.fichtner@googlemail.com> (@_beagile_)
+FROM armhf/ubuntu:trusty
 
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
@@ -40,6 +39,7 @@
 	git \
 	iptables \
 	jq \
+	net-tools \
 	libapparmor-dev \
 	libcap-dev \
 	libltdl-dev \
@@ -52,38 +52,47 @@
 	python-mock \
 	python-pip \
 	python-websocket \
-	s3cmd=1.1.0* \
 	xfsprogs \
 	tar \
 	--no-install-recommends
 
 # Get lvm2 source for compiling statically
-RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2
+ENV LVM2_VERSION 2.02.103
+RUN mkdir -p /usr/local/lvm2 \
+	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
+		| tar -xzC /usr/local/lvm2 --strip-components=1
 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
 
 # Compile and install lvm2
 RUN cd /usr/local/lvm2 \
-	&& ./configure --enable-static_link \
+	&& ./configure \
+		--build="$(gcc -print-multiarch)" \
+		--enable-static_link \
 	&& make device-mapper \
 	&& make install_device-mapper
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
 # Install Go
-ENV GO_VERSION 1.4.3
+#ENV GO_VERSION 1.5.3
 # TODO update GO_TOOLS_COMMIT below when this updates to 1.5+
-RUN curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/local -xz
+ENV GO_VERSION 1.4.3
+RUN curl -fsSL "https://github.com/hypriot/golang-armbuilds/releases/download/v${GO_VERSION}/go${GO_VERSION}.linux-armv7.tar.gz" \
+	| tar -xzC /usr/local
+# temporarily using Hypriot's tarballs while we wait for official 1.6+
+#RUN curl -fsSL https://golang.org/dl/go${GO_VERSION}.linux-arm6.tar.gz \
+#		| tar -xzC /usr/local
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
 ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
 
 # we're building for armhf, which is ARMv7, so let's be explicit about that
+ENV GOARCH arm
 ENV GOARM 7
 
-RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1
-
 # This has been commented out and kept as reference because we don't support compiling with older Go anymore.
 # ENV GOFMT_VERSION 1.3.3
 # RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
 
+#ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3
 # TODO update this sha when we upgrade to Go 1.5+
 ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9
 # Grab Go's cover tool for dead-simple code coverage testing
@@ -94,6 +103,8 @@
 	&& go install -v golang.org/x/tools/cmd/cover \
 	&& go install -v golang.org/x/tools/cmd/vet
 # Grab Go's lint tool
+#ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456
+# TODO update this sha when we upgrade to Go 1.5+
 ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f
 RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
 	&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
@@ -102,54 +113,55 @@
 # install seccomp
 # this can be changed to the ubuntu package libseccomp-dev if dockerinit is removed,
 # we need libseccomp.a (which the package does not provide) for dockerinit
-ENV SECCOMP_VERSION v2.2.3
+ENV SECCOMP_VERSION 2.2.3
 RUN set -x \
-	&& export SECCOMP_PATH=$(mktemp -d) \
-	&& git clone https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \
+	&& export SECCOMP_PATH="$(mktemp -d)" \
+	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
+		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
 	&& ( \
 		cd "$SECCOMP_PATH" \
-		&& git checkout "$SECCOMP_VERSION" \
-		&& ./autogen.sh \
-		&& ./configure --prefix=/usr \
+		&& ./configure --prefix=/usr/local \
 		&& make \
 		&& make install \
+		&& ldconfig \
 	) \
 	&& rm -rf "$SECCOMP_PATH"
 
-# Install registry
-ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
+# Install two versions of the registry. The first is an older version that
+# only supports schema1 manifests. The second is a newer version that supports
+# both. This allows integration-cli tests to cover push/pull with both schema1
+# and schema2 manifests.
+ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
+ENV REGISTRY_COMMIT a7ae88da459b98b481a245e5b1750134724ac67d
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
 	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
 	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
 		go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
+	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
+	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
+		go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
 	&& rm -rf "$GOPATH"
 
 # Install notary server
-# commented Notary temporary as we are waiting for an update of jose2go: https://github.com/docker/notary/issues/239
-#
-# ENV NOTARY_COMMIT 8e8122eb5528f621afcd4e2854c47302f17392f7
-# RUN set -x \
-# 	&& export GOPATH="$(mktemp -d)" \
-# 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
-# 	&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_COMMIT") \
-# 	&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
-# 		go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
-# 	&& rm -rf "$GOPATH"
+ENV NOTARY_VERSION docker-v1.10-2
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
+	&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
+	&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
+		go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
+	&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
+		go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
+	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 139850f3f3b17357bab5ba3edfb745fb14043764
+ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
-	&& git checkout -q $DOCKER_PY_COMMIT
-
-# Setup s3cmd config
-RUN { \
-		echo '[default]'; \
-		echo 'access_key=$AWS_ACCESS_KEY'; \
-		echo 'secret_key=$AWS_SECRET_KEY'; \
-	} > ~/.s3cfg
+	&& git checkout -q $DOCKER_PY_COMMIT \
+	&& pip install -r test-requirements.txt
 
 # Set user.email so crosbymichael's in-container merge commits go smoothly
 RUN git config --global user.email 'docker-dummy@example.com'
@@ -171,16 +183,16 @@
 # Get useful and necessary Hub images so we can "docker load" locally instead of pulling
 COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
 RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
-	hypriot/armhf-busybox:latest@sha256:b0fc94dac9793ce3c35607b15012b4c7deca300963a7cc38ab440189ec81e2e7 \
-	hypriot/armhf-hello-world:latest@sha256:b618ec0cc3acf683e8d77ad6c5ec81546cddde2036eda9a78f628effdeca74cd \
-	hypriot/armhf-unshare:latest@sha256:8fede091760d2fb8b2d14cedffdd681c4575b02b1abeeb18dd79b754c62327db
+	armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \
+	armhf/debian:jessie@sha256:094687129906d2a43cb4e5946ea379b5619c9ca8e4e27b3ba28b40f237a4150c \
+	armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791
 # see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
 
 # Download man page generator
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
-	&& git clone -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
+	&& git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
+	&& git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
 	&& go get -v -d github.com/cpuguy83/go-md2man \
 	&& go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \
 	&& rm -rf "$GOPATH"
@@ -195,12 +207,12 @@
 	&& rm -rf "$GOPATH"
 
 # Build/install the tool for embedding resources in Windows binaries
-ENV RSRC_COMMIT e48dbf1b7fc464a9e85fcec450dddf80816b76e0
+ENV RSRC_VERSION v2
 RUN set -x \
-	&& git clone https://github.com/akavel/rsrc.git /go/src/github.com/akavel/rsrc \
-	&& cd /go/src/github.com/akavel/rsrc \
-	&& git checkout -q $RSRC_COMMIT \
-	&& go install -v
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \
+	&& go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \
+	&& rm -rf "$GOPATH"
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
diff --git a/Dockerfile.gccgo b/Dockerfile.gccgo
index 1ae1af2..6d184f0 100644
--- a/Dockerfile.gccgo
+++ b/Dockerfile.gccgo
@@ -61,7 +61,7 @@
 ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 139850f3f3b17357bab5ba3edfb745fb14043764
+ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT
diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le
index 7520b5d..1c3804b 100644
--- a/Dockerfile.ppc64le
+++ b/Dockerfile.ppc64le
@@ -1,10 +1,27 @@
-# This file describes the standard way to build Docker, using docker
+# This file describes the standard way to build Docker on ppc64le, using docker
 #
 # Usage:
 #
 # # Assemble the full dev environment. This is slow the first time.
 # docker build -t docker -f Dockerfile.ppc64le .
 #
+# # Mount your source in an interactive container for quick testing:
+# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
+#
+# # Run the test suite:
+# docker run --privileged docker hack/make.sh test
+#
+# # Publish a release:
+# docker run --privileged \
+#  -e AWS_S3_BUCKET=baz \
+#  -e AWS_ACCESS_KEY=foo \
+#  -e AWS_SECRET_KEY=bar \
+#  -e GPG_PASSPHRASE=gloubiboulga \
+#  docker hack/release.sh
+#
+# Note: AppArmor used to mess with privileged mode, but this is no longer
+# the case. Therefore, you don't have to disable it anymore.
+#
 
 FROM ppc64le/gcc:5.3
 
@@ -12,37 +29,83 @@
 RUN apt-get update && apt-get install -y \
 	apparmor \
 	aufs-tools \
+	automake \
+	bash-completion \
 	btrfs-tools \
 	build-essential \
+	createrepo \
 	curl \
+	dpkg-sig \
 	git \
 	iptables \
 	jq \
 	net-tools \
 	libapparmor-dev \
 	libcap-dev \
+	libltdl-dev \
 	libsqlite3-dev \
+	libsystemd-journal-dev \
+	libtool \
 	mercurial \
-	parallel \
+	pkg-config \
 	python-dev \
 	python-mock \
 	python-pip \
 	python-websocket \
+	xfsprogs \
+	tar \
 	--no-install-recommends
 
-RUN rm -rf /usr/local/lvm2
-RUN git clone --no-checkout git://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103
-RUN curl -o /usr/local/lvm2/autoconf/config.guess 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD'
-RUN curl -o /usr/local/lvm2/autoconf/config.sub 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD'
+# Get lvm2 source for compiling statically
+ENV LVM2_VERSION 2.02.103
+RUN mkdir -p /usr/local/lvm2 \
+	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
+		| tar -xzC /usr/local/lvm2 --strip-components=1
+# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
+
+# fix platform enablement in lvm2 to support ppc64le properly
+RUN set -e \
+	&& for f in config.guess config.sub; do \
+		curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
+	done
+# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
 
 # Compile and install lvm2
 RUN cd /usr/local/lvm2 \
-	&& ./configure --enable-static_link \
+	&& ./configure \
+		--build="$(gcc -print-multiarch)" \
+		--enable-static_link \
 	&& make device-mapper \
 	&& make install_device-mapper
+# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
+# TODO install Go, using gccgo as GOROOT_BOOTSTRAP (Go 1.5+ supports ppc64le properly)
+# possibly a ppc64le/golang image?
+
+ENV PATH /go/bin:$PATH
 ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
 
+# This has been commented out and kept as reference because we don't support compiling with older Go anymore.
+# ENV GOFMT_VERSION 1.3.3
+# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
+
+# TODO update this sha when we upgrade to Go 1.5+
+ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9
+# Grab Go's cover tool for dead-simple code coverage testing
+# Grab Go's vet tool for examining go code to find suspicious constructs
+# and help prevent errors that the compiler might not catch
+RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
+	&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \
+	&& go install -v golang.org/x/tools/cmd/cover \
+	&& go install -v golang.org/x/tools/cmd/vet
+# Grab Go's lint tool
+ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f
+RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
+	&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
+	&& go install -v github.com/golang/lint/golint
+
+
+# Install registry
 ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
@@ -63,12 +126,15 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 47ab89ec2bd3bddf1221b856ffbaff333edeabb4
+ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
 	&& pip install -r test-requirements.txt
 
+# Set user.email so crosbymichael's in-container merge commits go smoothly
+RUN git config --global user.email 'docker-dummy@example.com'
+
 # Add an unprivileged user to be used for tests which need it
 RUN groupadd -r docker
 RUN useradd --create-home --gid docker unprivilegeduser
@@ -77,12 +143,45 @@
 WORKDIR /go/src/github.com/docker/docker
 ENV DOCKER_BUILDTAGS apparmor selinux
 
-ENV IMAGEREPO ppc64le
+# Let us use a .bashrc file
+RUN ln -sfv $PWD/.bashrc ~/.bashrc
+
+# Register Docker's bash completion.
+RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
+
+# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
 COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
 RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
-	$IMAGEREPO/busybox:latest \
-	$IMAGEREPO/hello-world:frozen \ 
-	$IMAGEREPO/unshare:latest	
+	ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \
+	ppc64le/debian:jessie@sha256:74e06e6506b23cf8abd00250782838b2d19910824d8e7eab3d14dc1845ea10c6 \
+	ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974
+# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
+
+# Download man page generator
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
+	&& git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
+	&& go get -v -d github.com/cpuguy83/go-md2man \
+	&& go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \
+	&& rm -rf "$GOPATH"
+
+# Download toml validator
+ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \
+	&& (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \
+	&& go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \
+	&& rm -rf "$GOPATH"
+
+# Build/install the tool for embedding resources in Windows binaries
+ENV RSRC_VERSION v2
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \
+	&& go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \
+	&& rm -rf "$GOPATH"
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
index c035979..ce335c9 100644
--- a/Dockerfile.s390x
+++ b/Dockerfile.s390x
@@ -1,47 +1,111 @@
-# This file describes the standard way to build Docker, using docker
+# This file describes the standard way to build Docker on s390x, using docker
 #
 # Usage:
 #
 # # Assemble the full dev environment. This is slow the first time.
 # docker build -t docker -f Dockerfile.s390x .
 #
+# # Mount your source in an interactive container for quick testing:
+# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
+#
+# # Run the test suite:
+# docker run --privileged docker hack/make.sh test
+#
+# # Publish a release:
+# docker run --privileged \
+#  -e AWS_S3_BUCKET=baz \
+#  -e AWS_ACCESS_KEY=foo \
+#  -e AWS_SECRET_KEY=bar \
+#  -e GPG_PASSPHRASE=gloubiboulga \
+#  docker hack/release.sh
+#
+# Note: AppArmor used to mess with privileged mode, but this is no longer
+# the case. Therefore, you don't have to disable it anymore.
+#
 
-FROM s390x/gcc:5.2
+FROM s390x/gcc:5.3
 
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
 	apparmor \
 	aufs-tools \
+	automake \
+	bash-completion \
 	btrfs-tools \
 	build-essential \
+	createrepo \
 	curl \
+	dpkg-sig \
 	git \
 	iptables \
 	jq \
 	net-tools \
 	libapparmor-dev \
 	libcap-dev \
+	libltdl-dev \
 	libsqlite3-dev \
+	libsystemd-journal-dev \
+	libtool \
 	mercurial \
-	parallel \
+	pkg-config \
 	python-dev \
 	python-mock \
 	python-pip \
 	python-websocket \
+	xfsprogs \
+	tar \
 	--no-install-recommends
 
 # Get lvm2 source for compiling statically
-RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2
+ENV LVM2_VERSION 2.02.103
+RUN mkdir -p /usr/local/lvm2 \
+	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
+		| tar -xzC /usr/local/lvm2 --strip-components=1
 # see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags
 
+# fix platform enablement in lvm2 to support s390x properly
+RUN set -e \
+	&& for f in config.guess config.sub; do \
+		curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \
+	done
+# "arch.c:78:2: error: #error the arch code needs to know about your machine type"
+
 # Compile and install lvm2
 RUN cd /usr/local/lvm2 \
-	&& ./configure --enable-static_link \
+	&& ./configure \
+		--build="$(gcc -print-multiarch)" \
+		--enable-static_link \
 	&& make device-mapper \
 	&& make install_device-mapper
+# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
+# Note: Go comes from the base image (gccgo, specifically)
+# We can't compile Go proper because s390x isn't an officially supported architecture yet.
+
+ENV PATH /go/bin:$PATH
 ENV GOPATH /go:/go/src/github.com/docker/docker/vendor
 
+# This has been commented out and kept as reference because we don't support compiling with older Go anymore.
+# ENV GOFMT_VERSION 1.3.3
+# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt
+
+# TODO update this sha when we upgrade to Go 1.5+
+ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9
+# Grab Go's cover tool for dead-simple code coverage testing
+# Grab Go's vet tool for examining go code to find suspicious constructs
+# and help prevent errors that the compiler might not catch
+RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \
+	&& (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \
+	&& go install -v golang.org/x/tools/cmd/cover \
+	&& go install -v golang.org/x/tools/cmd/vet
+# Grab Go's lint tool
+ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f
+RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \
+	&& (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \
+	&& go install -v github.com/golang/lint/golint
+
+
+# Install registry
 ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
@@ -62,12 +126,15 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 47ab89ec2bd3bddf1221b856ffbaff333edeabb4
+ENV DOCKER_PY_COMMIT e2878cbcc3a7eef99917adc1be252800b0e41ece
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
 	&& pip install -r test-requirements.txt
 
+# Set user.email so crosbymichael's in-container merge commits go smoothly
+RUN git config --global user.email 'docker-dummy@example.com'
+
 # Add an unprivileged user to be used for tests which need it
 RUN groupadd -r docker
 RUN useradd --create-home --gid docker unprivilegeduser
@@ -76,12 +143,45 @@
 WORKDIR /go/src/github.com/docker/docker
 ENV DOCKER_BUILDTAGS apparmor selinux
 
-ENV IMAGEREPO s390x
+# Let us use a .bashrc file
+RUN ln -sfv $PWD/.bashrc ~/.bashrc
+
+# Register Docker's bash completion.
+RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker
+
+# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
 COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
 RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
-	$IMAGEREPO/busybox:latest \
-	$IMAGEREPO/hello-world:frozen \
-	$IMAGEREPO/unshare:latest 
+	s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \
+	s390x/debian:jessie@sha256:3c478e199f60c877c00306356267798d32727dc3cd38512cdb4b060659ea9d20 \
+	s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699
+# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is)
+
+# Download man page generator
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone --depth 1 -b v1.0.4 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \
+	&& git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \
+	&& go get -v -d github.com/cpuguy83/go-md2man \
+	&& go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \
+	&& rm -rf "$GOPATH"
+
+# Download toml validator
+ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \
+	&& (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \
+	&& go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \
+	&& rm -rf "$GOPATH"
+
+# Build/install the tool for embedding resources in Windows binaries
+ENV RSRC_VERSION v2
+RUN set -x \
+	&& export GOPATH="$(mktemp -d)" \
+	&& git clone --depth 1 -b "$RSRC_VERSION" https://github.com/akavel/rsrc.git "$GOPATH/src/github.com/akavel/rsrc" \
+	&& go build -v -o /usr/local/bin/rsrc github.com/akavel/rsrc \
+	&& rm -rf "$GOPATH"
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
diff --git a/MAINTAINERS b/MAINTAINERS
index 8132bee..43a2f87 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -27,6 +27,7 @@
 
 		people = [
 			"calavera",
+			"coolljt0725",
 			"cpuguy83",
 			"crosbymichael",
 			"duglin",
@@ -115,6 +116,11 @@
 	Email = "david.calavera@gmail.com"
 	GitHub = "calavera"
 
+	[people.coolljt0725]
+	Name = "Lei Jitang"
+	Email = "leijitang@huawei.com"
+	GitHub = "coolljt0725"
+
 	[people.cpuguy83]
 	Name = "Brian Goff"
 	Email = "cpuguy83@gmail.com"
diff --git a/Makefile b/Makefile
index 6bca1e7..7a66015 100644
--- a/Makefile
+++ b/Makefile
@@ -1,14 +1,27 @@
 .PHONY: all binary build cross default docs docs-build docs-shell shell test test-docker-py test-integration-cli test-unit validate
 
 # get OS/Arch of docker engine
-DOCKER_ENGINE_OSARCH = $(shell docker version | grep 'OS/Arch' | tail -1 | cut -d':' -f2 | tr -d '[[:space:]]')
+DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:+$$DOCKER_CLIENT_OSARCH}')
 # default for linux/amd64 and others
-DOCKER_FILE = Dockerfile
+DOCKERFILE := Dockerfile
 # switch to different Dockerfile for linux/arm
-ifeq ($(DOCKER_ENGINE_OSARCH),linux/arm)
-	DOCKER_FILE = Dockerfile.armhf
+ifeq ($(DOCKER_OSARCH), linux/arm)
+	DOCKERFILE := Dockerfile.armhf
+else
+ifeq ($(DOCKER_OSARCH), linux/arm64)
+	# TODO .arm64
+	DOCKERFILE := Dockerfile.armhf
+else
+ifeq ($(DOCKER_OSARCH), linux/ppc64le)
+	DOCKERFILE := Dockerfile.ppc64le
+else
+ifeq ($(DOCKER_OSARCH), linux/s390x)
+	DOCKERFILE := Dockerfile.s390x
 endif
-export DOCKER_FILE
+endif
+endif
+endif
+export DOCKERFILE
 
 # env vars passed through directly to Docker's build scripts
 # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily
@@ -18,7 +31,7 @@
 	-e DOCKER_CLIENTONLY \
 	-e DOCKER_DEBUG \
 	-e DOCKER_EXPERIMENTAL \
-	-e DOCKER_FILE \
+	-e DOCKERFILE \
 	-e DOCKER_GRAPHDRIVER \
 	-e DOCKER_REMAP_ROOT \
 	-e DOCKER_STORAGE_OPTS \
@@ -60,7 +73,7 @@
 	$(DOCKER_RUN_DOCKER) hack/make.sh binary
 
 build: bundles
-	docker build -t "$(DOCKER_IMAGE)" -f $(DOCKER_FILE) .
+	docker build -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
 
 bundles:
 	mkdir bundles
diff --git a/VENDORING.md b/VENDORING.md
new file mode 100644
index 0000000..c6bb508
--- /dev/null
+++ b/VENDORING.md
@@ -0,0 +1,45 @@
+# Vendoring policies
+
+This document outlines recommended Vendoring policies for Docker repositories.
+(Example, libnetwork is a Docker repo and logrus is not.)
+
+## Vendoring using tags
+
+Commit ID based vendoring provides little/no information about the updates
+vendored. To fix this, vendors will now require that repositories use annotated
+tags along with commit ids to snapshot commits. Annotated tags by themselves
+are not sufficient, since the same tag can be force updated to reference
+different commits.
+
+Each tag should:
+- Follow Semantic Versioning rules (refer to section on "Semantic Versioning")
+- Have a corresponding entry in the change tracking document.
+
+Each repo should:
+- Have a change tracking document between tags/releases. Ex: CHANGELOG.md,
+github releases file.
+
+The goal here is for consuming repos to be able to use the tag version and
+changelog updates to determine whether the vendoring will cause any  breaking or
+backward incompatible changes. This also means that repos can specify having
+dependency on a package of a specific version or greater up to the next major
+release, without encountering breaking changes.
+
+## Semantic Versioning
+Annotated version tags should follow Schema Versioning policies.
+According to http://semver.org:
+
+"Given a version number MAJOR.MINOR.PATCH, increment the:
+    MAJOR version when you make incompatible API changes,
+    MINOR version when you add functionality in a backwards-compatible manner, and
+    PATCH version when you make backwards-compatible bug fixes.
+Additional labels for pre-release and build metadata are available as extensions
+to the MAJOR.MINOR.PATCH format."
+
+## Vendoring cadence
+In order to avoid huge vendoring changes, it is recommended to have a regular
+cadence for vendoring updates. eg. monthly.
+
+## Pre-merge vendoring tests
+All related repos will be vendored into docker/docker.
+CI on docker/docker should catch any breaking changes involving multiple repos.
diff --git a/api/client/build.go b/api/client/build.go
index 048cd4a..7fb3391 100644
--- a/api/client/build.go
+++ b/api/client/build.go
@@ -22,6 +22,7 @@
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/gitutils"
 	"github.com/docker/docker/pkg/httputils"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/jsonmessage"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/progress"
@@ -34,6 +35,8 @@
 	"github.com/docker/go-units"
 )
 
+type translatorFunc func(reference.NamedTagged) (reference.Canonical, error)
+
 // CmdBuild builds a new image from the source code at a given path.
 //
 // If '-' is provided instead of a path or URL, Docker will build an image from either a Dockerfile or tar archive read from STDIN.
@@ -101,11 +104,11 @@
 
 	switch {
 	case specifiedContext == "-":
-		tempDir, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
+		context, relDockerfile, err = getContextFromReader(cli.in, *dockerfileName)
 	case urlutil.IsGitURL(specifiedContext) && hasGit:
 		tempDir, relDockerfile, err = getContextFromGitURL(specifiedContext, *dockerfileName)
 	case urlutil.IsURL(specifiedContext):
-		tempDir, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName)
+		context, relDockerfile, err = getContextFromURL(progBuff, specifiedContext, *dockerfileName)
 	default:
 		contextDir, relDockerfile, err = getContextFromLocalDir(specifiedContext, *dockerfileName)
 	}
@@ -122,67 +125,59 @@
 		contextDir = tempDir
 	}
 
-	// And canonicalize dockerfile name to a platform-independent one
-	relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
-	if err != nil {
-		return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
-	}
+	if context == nil {
+		// And canonicalize dockerfile name to a platform-independent one
+		relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
+		if err != nil {
+			return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
+		}
 
-	f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
-	if err != nil && !os.IsNotExist(err) {
-		return err
-	}
+		f, err := os.Open(filepath.Join(contextDir, ".dockerignore"))
+		if err != nil && !os.IsNotExist(err) {
+			return err
+		}
 
-	var excludes []string
-	if err == nil {
-		excludes, err = dockerignore.ReadAll(f)
+		var excludes []string
+		if err == nil {
+			excludes, err = dockerignore.ReadAll(f)
+			if err != nil {
+				return err
+			}
+		}
+
+		if err := validateContextDirectory(contextDir, excludes); err != nil {
+			return fmt.Errorf("Error checking context: '%s'.", err)
+		}
+
+		// If .dockerignore mentions .dockerignore or the Dockerfile
+		// then make sure we send both files over to the daemon
+		// because Dockerfile is, obviously, needed no matter what, and
+		// .dockerignore is needed to know if either one needs to be
+		// removed. The daemon will remove them for us, if needed, after it
+		// parses the Dockerfile. Ignore errors here, as they will have been
+		// caught by validateContextDirectory above.
+		var includes = []string{"."}
+		keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
+		keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
+		if keepThem1 || keepThem2 {
+			includes = append(includes, ".dockerignore", relDockerfile)
+		}
+
+		context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
+			Compression:     archive.Uncompressed,
+			ExcludePatterns: excludes,
+			IncludeFiles:    includes,
+		})
 		if err != nil {
 			return err
 		}
 	}
 
-	if err := validateContextDirectory(contextDir, excludes); err != nil {
-		return fmt.Errorf("Error checking context: '%s'.", err)
-	}
-
-	// If .dockerignore mentions .dockerignore or the Dockerfile
-	// then make sure we send both files over to the daemon
-	// because Dockerfile is, obviously, needed no matter what, and
-	// .dockerignore is needed to know if either one needs to be
-	// removed. The daemon will remove them for us, if needed, after it
-	// parses the Dockerfile. Ignore errors here, as they will have been
-	// caught by validateContextDirectory above.
-	var includes = []string{"."}
-	keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
-	keepThem2, _ := fileutils.Matches(relDockerfile, excludes)
-	if keepThem1 || keepThem2 {
-		includes = append(includes, ".dockerignore", relDockerfile)
-	}
-
-	context, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
-		Compression:     archive.Uncompressed,
-		ExcludePatterns: excludes,
-		IncludeFiles:    includes,
-	})
-	if err != nil {
-		return err
-	}
-
 	var resolvedTags []*resolvedTag
 	if isTrusted() {
-		// Resolve the FROM lines in the Dockerfile to trusted digest references
-		// using Notary. On a successful build, we must tag the resolved digests
-		// to the original name specified in the Dockerfile.
-		var newDockerfile *trustedDockerfile
-		newDockerfile, resolvedTags, err = rewriteDockerfileFrom(filepath.Join(contextDir, relDockerfile), cli.trustedReference)
-		if err != nil {
-			return fmt.Errorf("unable to process Dockerfile: %v", err)
-		}
-		defer newDockerfile.Close()
-
 		// Wrap the tar archive to replace the Dockerfile entry with the rewritten
 		// Dockerfile which uses trusted pulls.
-		context = replaceDockerfileTarWrapper(context, newDockerfile, relDockerfile)
+		context = replaceDockerfileTarWrapper(context, relDockerfile, cli.trustedReference, &resolvedTags)
 	}
 
 	// Setup an upload progress bar
@@ -459,41 +454,54 @@
 }
 
 // getContextFromReader will read the contents of the given reader as either a
-// Dockerfile or tar archive to be extracted to a temporary directory used as
-// the context directory. Returns the absolute path to the temporary context
-// directory, the relative path of the dockerfile in that context directory,
-// and a non-nil error on success.
-func getContextFromReader(r io.Reader, dockerfileName string) (absContextDir, relDockerfile string, err error) {
+// Dockerfile or tar archive. Returns a tar archive used as a context and a
+// path to the Dockerfile inside the tar.
+func getContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
 	buf := bufio.NewReader(r)
 
 	magic, err := buf.Peek(archive.HeaderSize)
 	if err != nil && err != io.EOF {
-		return "", "", fmt.Errorf("failed to peek context header from STDIN: %v", err)
+		return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err)
 	}
 
-	if absContextDir, err = ioutil.TempDir("", "docker-build-context-"); err != nil {
-		return "", "", fmt.Errorf("unbale to create temporary context directory: %v", err)
+	if archive.IsArchive(magic) {
+		return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
 	}
 
-	defer func(d string) {
-		if err != nil {
-			os.RemoveAll(d)
-		}
-	}(absContextDir)
-
-	if !archive.IsArchive(magic) { // Input should be read as a Dockerfile.
-		// -f option has no meaning when we're reading it from stdin,
-		// so just use our default Dockerfile name
-		relDockerfile = api.DefaultDockerfileName
-
-		return absContextDir, relDockerfile, writeToFile(buf, filepath.Join(absContextDir, relDockerfile))
+	// Input should be read as a Dockerfile.
+	tmpDir, err := ioutil.TempDir("", "docker-build-context-")
+	if err != nil {
+		return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err)
 	}
 
-	if err := archive.Untar(buf, absContextDir, nil); err != nil {
-		return "", "", fmt.Errorf("unable to extract stdin to temporary context directory: %v", err)
+	f, err := os.Create(filepath.Join(tmpDir, api.DefaultDockerfileName))
+	if err != nil {
+		return nil, "", err
+	}
+	_, err = io.Copy(f, buf)
+	if err != nil {
+		f.Close()
+		return nil, "", err
 	}
 
-	return getDockerfileRelPath(absContextDir, dockerfileName)
+	if err := f.Close(); err != nil {
+		return nil, "", err
+	}
+	if err := r.Close(); err != nil {
+		return nil, "", err
+	}
+
+	tar, err := archive.Tar(tmpDir, archive.Uncompressed)
+	if err != nil {
+		return nil, "", err
+	}
+
+	return ioutils.NewReadCloserWrapper(tar, func() error {
+		err := tar.Close()
+		os.RemoveAll(tmpDir)
+		return err
+	}), api.DefaultDockerfileName, nil
+
 }
 
 // getContextFromGitURL uses a Git URL as context for a `docker build`. The
@@ -510,23 +518,20 @@
 }
 
 // getContextFromURL uses a remote URL as context for a `docker build`. The
-// remote resource is downloaded as either a Dockerfile or a context tar
-// archive and stored in a temporary directory used as the context directory.
-// Returns the absolute path to the temporary context directory, the relative
-// path of the dockerfile in that context directory, and a non-nil error on
-// success.
-func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (absContextDir, relDockerfile string, err error) {
+// remote resource is downloaded as either a Dockerfile or a tar archive.
+// Returns the tar archive used for the context and a path of the
+// dockerfile inside the tar.
+func getContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) {
 	response, err := httputils.Download(remoteURL)
 	if err != nil {
-		return "", "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err)
+		return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err)
 	}
-	defer response.Body.Close()
 	progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true)
 
 	// Pass the response body through a progress reader.
 	progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL))
 
-	return getContextFromReader(progReader, dockerfileName)
+	return getContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName)
 }
 
 // getContextFromLocalDir uses the given local directory as context for a
@@ -548,16 +553,6 @@
 
 var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P<image>[^ \f\r\t\v\n#]+)`)
 
-type trustedDockerfile struct {
-	*os.File
-	size int64
-}
-
-func (td *trustedDockerfile) Close() error {
-	td.File.Close()
-	return os.Remove(td.File.Name())
-}
-
 // resolvedTag records the repository, tag, and resolved digest reference
 // from a Dockerfile rewrite.
 type resolvedTag struct {
@@ -569,32 +564,9 @@
 // "FROM <image>" instructions to a digest reference. `translator` is a
 // function that takes a repository name and tag reference and returns a
 // trusted digest reference.
-func rewriteDockerfileFrom(dockerfileName string, translator func(reference.NamedTagged) (reference.Canonical, error)) (newDockerfile *trustedDockerfile, resolvedTags []*resolvedTag, err error) {
-	dockerfile, err := os.Open(dockerfileName)
-	if err != nil {
-		return nil, nil, fmt.Errorf("unable to open Dockerfile: %v", err)
-	}
-	defer dockerfile.Close()
-
+func rewriteDockerfileFrom(dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
 	scanner := bufio.NewScanner(dockerfile)
-
-	// Make a tempfile to store the rewritten Dockerfile.
-	tempFile, err := ioutil.TempFile("", "trusted-dockerfile-")
-	if err != nil {
-		return nil, nil, fmt.Errorf("unable to make temporary trusted Dockerfile: %v", err)
-	}
-
-	trustedFile := &trustedDockerfile{
-		File: tempFile,
-	}
-
-	defer func() {
-		if err != nil {
-			// Close the tempfile if there was an error during Notary lookups.
-			// Otherwise the caller should close it.
-			trustedFile.Close()
-		}
-	}()
+	buf := bytes.NewBuffer(nil)
 
 	// Scan the lines of the Dockerfile, looking for a "FROM" line.
 	for scanner.Scan() {
@@ -622,26 +594,21 @@
 			}
 		}
 
-		n, err := fmt.Fprintln(tempFile, line)
+		_, err := fmt.Fprintln(buf, line)
 		if err != nil {
 			return nil, nil, err
 		}
-
-		trustedFile.size += int64(n)
 	}
 
-	tempFile.Seek(0, os.SEEK_SET)
-
-	return trustedFile, resolvedTags, scanner.Err()
+	return buf.Bytes(), resolvedTags, scanner.Err()
 }
 
 // replaceDockerfileTarWrapper wraps the given input tar archive stream and
 // replaces the entry with the given Dockerfile name with the contents of the
 // new Dockerfile. Returns a new tar archive stream with the replaced
 // Dockerfile.
-func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, newDockerfile *trustedDockerfile, dockerfileName string) io.ReadCloser {
+func replaceDockerfileTarWrapper(inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
 	pipeReader, pipeWriter := io.Pipe()
-
 	go func() {
 		tarReader := tar.NewReader(inputTarStream)
 		tarWriter := tar.NewWriter(pipeWriter)
@@ -662,13 +629,18 @@
 			}
 
 			var content io.Reader = tarReader
-
 			if hdr.Name == dockerfileName {
 				// This entry is the Dockerfile. Since the tar archive was
 				// generated from a directory on the local filesystem, the
 				// Dockerfile will only appear once in the archive.
-				hdr.Size = newDockerfile.size
-				content = newDockerfile
+				var newDockerfile []byte
+				newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(content, translator)
+				if err != nil {
+					pipeWriter.CloseWithError(err)
+					return
+				}
+				hdr.Size = int64(len(newDockerfile))
+				content = bytes.NewBuffer(newDockerfile)
 			}
 
 			if err := tarWriter.WriteHeader(hdr); err != nil {
diff --git a/api/client/info.go b/api/client/info.go
index b309dbf..42f0682 100644
--- a/api/client/info.go
+++ b/api/client/info.go
@@ -25,6 +25,9 @@
 	}
 
 	fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers)
+	fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning)
+	fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused)
+	fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped)
 	fmt.Fprintf(cli.out, "Images: %d\n", info.Images)
 	ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion)
 	ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver)
diff --git a/api/client/network.go b/api/client/network.go
index 393a249..56adabc 100644
--- a/api/client/network.go
+++ b/api/client/network.go
@@ -10,6 +10,7 @@
 	"github.com/docker/docker/opts"
 	flag "github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/stringid"
+	runconfigopts "github.com/docker/docker/runconfig/opts"
 	"github.com/docker/engine-api/types"
 	"github.com/docker/engine-api/types/filters"
 	"github.com/docker/engine-api/types/network"
@@ -39,12 +40,16 @@
 	flIpamIPRange := opts.NewListOpts(nil)
 	flIpamGateway := opts.NewListOpts(nil)
 	flIpamAux := opts.NewMapOpts(nil, nil)
+	flIpamOpt := opts.NewMapOpts(nil, nil)
 
 	cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment")
 	cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range")
 	cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet")
 	cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver")
 	cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options")
+	cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options")
+
+	flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network")
 
 	cmd.Require(flag.Exact, 1)
 	err := cmd.ParseFlags(args, true)
@@ -68,9 +73,10 @@
 	nc := types.NetworkCreate{
 		Name:           cmd.Arg(0),
 		Driver:         driver,
-		IPAM:           network.IPAM{Driver: *flIpamDriver, Config: ipamCfg},
+		IPAM:           network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()},
 		Options:        flOpts.GetAll(),
 		CheckDuplicate: true,
+		Internal:       *flInternal,
 	}
 
 	resp, err := cli.client.NetworkCreate(nc)
@@ -112,6 +118,10 @@
 	cmd := Cli.Subcmd("network connect", []string{"NETWORK CONTAINER"}, "Connects a container to a network", false)
 	flIPAddress := cmd.String([]string{"-ip"}, "", "IP Address")
 	flIPv6Address := cmd.String([]string{"-ip6"}, "", "IPv6 Address")
+	flLinks := opts.NewListOpts(runconfigopts.ValidateLink)
+	cmd.Var(&flLinks, []string{"-link"}, "Add link to another container")
+	flAliases := opts.NewListOpts(nil)
+	cmd.Var(&flAliases, []string{"-alias"}, "Add network-scoped alias for the container")
 	cmd.Require(flag.Min, 2)
 	if err := cmd.ParseFlags(args, true); err != nil {
 		return err
@@ -121,6 +131,8 @@
 			IPv4Address: *flIPAddress,
 			IPv6Address: *flIPv6Address,
 		},
+		Links:   flLinks.GetAll(),
+		Aliases: flAliases.GetAll(),
 	}
 	return cli.client.NetworkConnect(cmd.Arg(0), cmd.Arg(1), epConfig)
 }
@@ -130,12 +142,13 @@
 // Usage: docker network disconnect <NETWORK> <CONTAINER>
 func (cli *DockerCli) CmdNetworkDisconnect(args ...string) error {
 	cmd := Cli.Subcmd("network disconnect", []string{"NETWORK CONTAINER"}, "Disconnects container from a network", false)
+	force := cmd.Bool([]string{"f", "-force"}, false, "Force the container to disconnect from a network")
 	cmd.Require(flag.Exact, 2)
 	if err := cmd.ParseFlags(args, true); err != nil {
 		return err
 	}
 
-	return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1))
+	return cli.client.NetworkDisconnect(cmd.Arg(0), cmd.Arg(1), *force)
 }
 
 // CmdNetworkLs lists all the networks managed by docker daemon
diff --git a/api/client/run.go b/api/client/run.go
index dcd7f01..3b3a1a2 100644
--- a/api/client/run.go
+++ b/api/client/run.go
@@ -90,8 +90,8 @@
 		os.Exit(125)
 	}
 
-	if hostConfig.OomKillDisable && hostConfig.Memory == 0 {
-		fmt.Fprintf(cli.err, "WARNING: Dangerous only disable the OOM Killer on containers but not set the '-m/--memory' option\n")
+	if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
+		fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n")
 	}
 
 	if len(hostConfig.DNS) > 0 {
diff --git a/api/client/trust.go b/api/client/trust.go
index f347689..d06db5d 100644
--- a/api/client/trust.go
+++ b/api/client/trust.go
@@ -284,13 +284,15 @@
 	case signed.ErrInvalidKeyType:
 		return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err)
 	case signed.ErrNoKeys:
-		return fmt.Errorf("Error: could not find signing keys for remote repository %s: %v", repoName, err)
+		return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err)
 	case signed.ErrLowVersion:
 		return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err)
-	case signed.ErrInsufficientSignatures:
+	case signed.ErrRoleThreshold:
 		return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err)
 	case client.ErrRepositoryNotExist:
-		return fmt.Errorf("Error: remote trust data repository not initialized for %s: %v", repoName, err)
+		return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err)
+	case signed.ErrInsufficientSignatures:
+		return fmt.Errorf("Error: could not produce valid signature for %s.  If Yubikey was used, was touch input provided?: %v", repoName, err)
 	}
 
 	return err
diff --git a/api/server/middleware.go b/api/server/middleware.go
index 77a4e55..c978478 100644
--- a/api/server/middleware.go
+++ b/api/server/middleware.go
@@ -1,9 +1,9 @@
 package server
 
 import (
-	"bytes"
+	"bufio"
 	"encoding/json"
-	"io/ioutil"
+	"io"
 	"net/http"
 	"runtime"
 	"strings"
@@ -14,6 +14,7 @@
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/errors"
 	"github.com/docker/docker/pkg/authorization"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/version"
 	"golang.org/x/net/context"
 )
@@ -27,25 +28,37 @@
 	return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 		logrus.Debugf("%s %s", r.Method, r.RequestURI)
 
-		if r.Method == "POST" {
-			if err := httputils.CheckForJSON(r); err == nil {
-				var buf bytes.Buffer
-				if _, err := buf.ReadFrom(r.Body); err == nil {
-					r.Body.Close()
-					r.Body = ioutil.NopCloser(&buf)
-					var postForm map[string]interface{}
-					if err := json.Unmarshal(buf.Bytes(), &postForm); err == nil {
-						if _, exists := postForm["password"]; exists {
-							postForm["password"] = "*****"
-						}
-						formStr, errMarshal := json.Marshal(postForm)
-						if errMarshal == nil {
-							logrus.Debugf("form data: %s", string(formStr))
-						} else {
-							logrus.Debugf("form data: %q", postForm)
-						}
-					}
-				}
+		if r.Method != "POST" {
+			return handler(ctx, w, r, vars)
+		}
+		if err := httputils.CheckForJSON(r); err != nil {
+			return handler(ctx, w, r, vars)
+		}
+		maxBodySize := 4096 // 4KB
+		if r.ContentLength > int64(maxBodySize) {
+			return handler(ctx, w, r, vars)
+		}
+
+		body := r.Body
+		bufReader := bufio.NewReaderSize(body, maxBodySize)
+		r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() })
+
+		b, err := bufReader.Peek(maxBodySize)
+		if err != io.EOF {
+			// either there was an error reading, or the buffer is full (in which case the request is too large)
+			return handler(ctx, w, r, vars)
+		}
+
+		var postForm map[string]interface{}
+		if err := json.Unmarshal(b, &postForm); err == nil {
+			if _, exists := postForm["password"]; exists {
+				postForm["password"] = "*****"
+			}
+			formStr, errMarshal := json.Marshal(postForm)
+			if errMarshal == nil {
+				logrus.Debugf("form data: %s", string(formStr))
+			} else {
+				logrus.Debugf("form data: %q", postForm)
 			}
 		}
 
@@ -169,8 +182,8 @@
 		middlewares = append(middlewares, debugRequestMiddleware)
 	}
 
-	if len(s.cfg.AuthZPluginNames) > 0 {
-		s.authZPlugins = authorization.NewPlugins(s.cfg.AuthZPluginNames)
+	if len(s.cfg.AuthorizationPluginNames) > 0 {
+		s.authZPlugins = authorization.NewPlugins(s.cfg.AuthorizationPluginNames)
 		middlewares = append(middlewares, s.authorizationMiddleware)
 	}
 
diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go
index ee2f1ef..4e2ffca 100644
--- a/api/server/router/container/container_routes.go
+++ b/api/server/router/container/container_routes.go
@@ -1,6 +1,7 @@
 package container
 
 import (
+	"encoding/json"
 	"fmt"
 	"io"
 	"net/http"
@@ -332,11 +333,17 @@
 		return err
 	}
 
-	_, hostConfig, _, err := runconfig.DecodeContainerConfig(r.Body)
-	if err != nil {
+	var updateConfig container.UpdateConfig
+
+	decoder := json.NewDecoder(r.Body)
+	if err := decoder.Decode(&updateConfig); err != nil {
 		return err
 	}
 
+	hostConfig := &container.HostConfig{
+		Resources: updateConfig.Resources,
+	}
+
 	name := vars["name"]
 	warnings, err := s.backend.ContainerUpdate(name, hostConfig)
 	if err != nil {
diff --git a/api/server/router/network/backend.go b/api/server/router/network/backend.go
index 60461de..c6ea0ad 100644
--- a/api/server/router/network/backend.go
+++ b/api/server/router/network/backend.go
@@ -13,10 +13,10 @@
 	GetNetworksByID(partialID string) []libnetwork.Network
 	GetAllNetworks() []libnetwork.Network
 	CreateNetwork(name, driver string, ipam network.IPAM,
-		options map[string]string) (libnetwork.Network, error)
+		options map[string]string, internal bool) (libnetwork.Network, error)
 	ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
 	DisconnectContainerFromNetwork(containerName string,
-		network libnetwork.Network) error
+		network libnetwork.Network, force bool) error
 	NetworkControllerEnabled() bool
 	DeleteNetwork(name string) error
 }
diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go
index 1f91b72..85fa88d 100644
--- a/api/server/router/network/network_routes.go
+++ b/api/server/router/network/network_routes.go
@@ -92,7 +92,7 @@
 		warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID())
 	}
 
-	nw, err = n.backend.CreateNetwork(create.Name, create.Driver, create.IPAM, create.Options)
+	nw, err = n.backend.CreateNetwork(create.Name, create.Driver, create.IPAM, create.Options, create.Internal)
 	if err != nil {
 		return err
 	}
@@ -144,7 +144,7 @@
 		return err
 	}
 
-	return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw)
+	return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw, disconnect.Force)
 }
 
 func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
@@ -182,12 +182,19 @@
 }
 
 func buildIpamResources(r *types.NetworkResource, nw libnetwork.Network) {
-	id, _, ipv4conf, ipv6conf := nw.Info().IpamConfig()
+	id, opts, ipv4conf, ipv6conf := nw.Info().IpamConfig()
+
+	ipv4Info, ipv6Info := nw.Info().IpamInfo()
 
 	r.IPAM.Driver = id
 
+	r.IPAM.Options = opts
+
 	r.IPAM.Config = []network.IPAMConfig{}
 	for _, ip4 := range ipv4conf {
+		if ip4.PreferredPool == "" {
+			continue
+		}
 		iData := network.IPAMConfig{}
 		iData.Subnet = ip4.PreferredPool
 		iData.IPRange = ip4.SubPool
@@ -196,7 +203,21 @@
 		r.IPAM.Config = append(r.IPAM.Config, iData)
 	}
 
+	if len(r.IPAM.Config) == 0 {
+		for _, ip4Info := range ipv4Info {
+			iData := network.IPAMConfig{}
+			iData.Subnet = ip4Info.IPAMData.Pool.String()
+			iData.Gateway = ip4Info.IPAMData.Gateway.String()
+			r.IPAM.Config = append(r.IPAM.Config, iData)
+		}
+	}
+
+	hasIpv6Conf := false
 	for _, ip6 := range ipv6conf {
+		if ip6.PreferredPool == "" {
+			continue
+		}
+		hasIpv6Conf = true
 		iData := network.IPAMConfig{}
 		iData.Subnet = ip6.PreferredPool
 		iData.IPRange = ip6.SubPool
@@ -204,6 +225,15 @@
 		iData.AuxAddress = ip6.AuxAddresses
 		r.IPAM.Config = append(r.IPAM.Config, iData)
 	}
+
+	if !hasIpv6Conf {
+		for _, ip6Info := range ipv6Info {
+			iData := network.IPAMConfig{}
+			iData.Subnet = ip6Info.IPAMData.Pool.String()
+			iData.Gateway = ip6Info.IPAMData.Gateway.String()
+			r.IPAM.Config = append(r.IPAM.Config, iData)
+		}
+	}
 }
 
 func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource {
diff --git a/api/server/router_swapper.go b/api/server/router_swapper.go
new file mode 100644
index 0000000..b5f1d06
--- /dev/null
+++ b/api/server/router_swapper.go
@@ -0,0 +1,30 @@
+package server
+
+import (
+	"net/http"
+	"sync"
+
+	"github.com/gorilla/mux"
+)
+
+// routerSwapper is an http.Handler that allow you to swap
+// mux routers.
+type routerSwapper struct {
+	mu     sync.Mutex
+	router *mux.Router
+}
+
+// Swap changes the old router with the new one.
+func (rs *routerSwapper) Swap(newRouter *mux.Router) {
+	rs.mu.Lock()
+	rs.router = newRouter
+	rs.mu.Unlock()
+}
+
+// ServeHTTP makes the routerSwapper to implement the http.Handler interface.
+func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	rs.mu.Lock()
+	router := rs.router
+	rs.mu.Unlock()
+	router.ServeHTTP(w, r)
+}
diff --git a/api/server/server.go b/api/server/server.go
index 8b3c41a..f312f23 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -4,7 +4,6 @@
 	"crypto/tls"
 	"net"
 	"net/http"
-	"os"
 	"strings"
 
 	"github.com/Sirupsen/logrus"
@@ -30,22 +29,23 @@
 
 // Config provides the configuration for the API server
 type Config struct {
-	Logging          bool
-	EnableCors       bool
-	CorsHeaders      string
-	AuthZPluginNames []string
-	Version          string
-	SocketGroup      string
-	TLSConfig        *tls.Config
-	Addrs            []Addr
+	Logging                  bool
+	EnableCors               bool
+	CorsHeaders              string
+	AuthorizationPluginNames []string
+	Version                  string
+	SocketGroup              string
+	TLSConfig                *tls.Config
+	Addrs                    []Addr
 }
 
 // Server contains instance details for the server
 type Server struct {
-	cfg          *Config
-	servers      []*HTTPServer
-	routers      []router.Router
-	authZPlugins []authorization.Plugin
+	cfg           *Config
+	servers       []*HTTPServer
+	routers       []router.Router
+	authZPlugins  []authorization.Plugin
+	routerSwapper *routerSwapper
 }
 
 // Addr contains string representation of address and its protocol (tcp, unix...).
@@ -80,12 +80,14 @@
 	}
 }
 
-// ServeAPI loops through all initialized servers and spawns goroutine
-// with Server method for each. It sets CreateMux() as Handler also.
-func (s *Server) ServeAPI() error {
+// serveAPI loops through all initialized servers and spawns goroutine
+// with Server method for each. It sets createMux() as Handler also.
+func (s *Server) serveAPI() error {
+	s.initRouterSwapper()
+
 	var chErrors = make(chan error, len(s.servers))
 	for _, srv := range s.servers {
-		srv.srv.Handler = s.CreateMux()
+		srv.srv.Handler = s.routerSwapper
 		go func(srv *HTTPServer) {
 			var err error
 			logrus.Infof("API listen on %s", srv.l.Addr())
@@ -186,11 +188,11 @@
 	s.routers = append(s.routers, r)
 }
 
-// CreateMux initializes the main router the server uses.
+// createMux initializes the main router the server uses.
 // we keep enableCors just for legacy usage, need to be removed in the future
-func (s *Server) CreateMux() *mux.Router {
+func (s *Server) createMux() *mux.Router {
 	m := mux.NewRouter()
-	if os.Getenv("DEBUG") != "" {
+	if utils.IsDebugEnabled() {
 		profilerSetup(m, "/debug/")
 	}
 
@@ -207,3 +209,36 @@
 
 	return m
 }
+
+// Wait blocks the server goroutine until it exits.
+// It sends an error message if there is any error during
+// the API execution.
+func (s *Server) Wait(waitChan chan error) {
+	if err := s.serveAPI(); err != nil {
+		logrus.Errorf("ServeAPI error: %v", err)
+		waitChan <- err
+		return
+	}
+	waitChan <- nil
+}
+
+func (s *Server) initRouterSwapper() {
+	s.routerSwapper = &routerSwapper{
+		router: s.createMux(),
+	}
+}
+
+// Reload reads configuration changes and modifies the
+// server according to those changes.
+// Currently, only the --debug configuration is taken into account.
+func (s *Server) Reload(config *daemon.Config) {
+	debugEnabled := utils.IsDebugEnabled()
+	switch {
+	case debugEnabled && !config.Debug: // disable debug
+		utils.DisableDebug()
+		s.routerSwapper.Swap(s.createMux())
+	case config.Debug && !debugEnabled: // enable debug
+		utils.EnableDebug()
+		s.routerSwapper.Swap(s.createMux())
+	}
+}
diff --git a/container/container.go b/container/container.go
index 9974a70..ea083c5 100644
--- a/container/container.go
+++ b/container/container.go
@@ -232,6 +232,9 @@
 // Resize changes the TTY of the process running inside the container
 // to the given height and width. The container must be running.
 func (container *Container) Resize(h, w int) error {
+	if container.Command.ProcessConfig.Terminal == nil {
+		return fmt.Errorf("Container %s does not have a terminal ready", container.ID)
+	}
 	if err := container.Command.ProcessConfig.Terminal.Resize(h, w); err != nil {
 		return err
 	}
diff --git a/container/container_unix.go b/container/container_unix.go
index 4b8296b..282d889 100644
--- a/container/container_unix.go
+++ b/container/container_unix.go
@@ -18,6 +18,7 @@
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/system"
+	runconfigopts "github.com/docker/docker/runconfig/opts"
 	"github.com/docker/docker/utils"
 	"github.com/docker/docker/volume"
 	"github.com/docker/engine-api/types/container"
@@ -33,8 +34,8 @@
 // DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container
 const DefaultSHMSize int64 = 67108864
 
-// Container holds the fields specific to unixen implementations. See
-// CommonContainer for standard fields common to all containers.
+// Container holds the fields specific to unixen implementations.
+// See CommonContainer for standard fields common to all containers.
 type Container struct {
 	CommonContainer
 
@@ -193,6 +194,7 @@
 	if _, ok := networkSettings.Networks[n.Name()]; !ok {
 		networkSettings.Networks[n.Name()] = new(network.EndpointSettings)
 	}
+	networkSettings.Networks[n.Name()].NetworkID = n.ID()
 	networkSettings.Networks[n.Name()].EndpointID = ep.ID()
 
 	iface := epInfo.Iface()
@@ -247,6 +249,21 @@
 	return nil
 }
 
+// BuildJoinOptions builds endpoint Join options from a given network.
+func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) {
+	var joinOptions []libnetwork.EndpointOption
+	if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok {
+		for _, str := range epConfig.Links {
+			name, alias, err := runconfigopts.ParseLink(str)
+			if err != nil {
+				return nil, err
+			}
+			joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias))
+		}
+	}
+	return joinOptions, nil
+}
+
 // BuildCreateEndpointOptions builds endpoint options from a given network.
 func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) {
 	var (
@@ -267,6 +284,10 @@
 			createOptions = append(createOptions,
 				libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), nil))
 		}
+
+		for _, alias := range epConfig.Aliases {
+			createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias))
+		}
 	}
 
 	if !container.HostConfig.NetworkMode.IsUserDefined() {
@@ -686,7 +707,7 @@
 			return err
 		}
 		if len(srcList) == 0 {
-			// If the source volume is empty copy files from the root into the volume
+			// If the source volume is empty, copies files from the root into the volume
 			if err := chrootarchive.CopyWithTar(source, destination); err != nil {
 				return err
 			}
diff --git a/container/monitor.go b/container/monitor.go
index 0010a76..2f3368f 100644
--- a/container/monitor.go
+++ b/container/monitor.go
@@ -80,7 +80,6 @@
 // StartMonitor initializes a containerMonitor for this container with the provided supervisor and restart policy
 // and starts the container's process.
 func (container *Container) StartMonitor(s supervisor, policy container.RestartPolicy) error {
-	container.Lock()
 	container.monitor = &containerMonitor{
 		supervisor:    s,
 		container:     container,
@@ -89,7 +88,6 @@
 		stopChan:      make(chan struct{}),
 		startSignal:   make(chan struct{}),
 	}
-	container.Unlock()
 
 	return container.monitor.wait()
 }
@@ -159,8 +157,6 @@
 		}
 		m.Close()
 	}()
-
-	m.container.Lock()
 	// reset stopped flag
 	if m.container.HasBeenManuallyStopped {
 		m.container.HasBeenManuallyStopped = false
@@ -175,20 +171,16 @@
 		if err := m.supervisor.StartLogging(m.container); err != nil {
 			m.resetContainer(false)
 
-			m.container.Unlock()
 			return err
 		}
 
 		pipes := execdriver.NewPipes(m.container.Stdin(), m.container.Stdout(), m.container.Stderr(), m.container.Config.OpenStdin)
-		m.container.Unlock()
 
 		m.logEvent("start")
 
 		m.lastStartTime = time.Now()
 
-		// don't lock Run because m.callback has own lock
 		if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
-			m.container.Lock()
 			// if we receive an internal error from the initial start of a container then lets
 			// return it instead of entering the restart loop
 			// set to 127 for container cmd not found/does not exist)
@@ -198,7 +190,6 @@
 				if m.container.RestartCount == 0 {
 					m.container.ExitCode = 127
 					m.resetContainer(false)
-					m.container.Unlock()
 					return derr.ErrorCodeCmdNotFound
 				}
 			}
@@ -207,7 +198,6 @@
 				if m.container.RestartCount == 0 {
 					m.container.ExitCode = 126
 					m.resetContainer(false)
-					m.container.Unlock()
 					return derr.ErrorCodeCmdCouldNotBeInvoked
 				}
 			}
@@ -216,13 +206,11 @@
 				m.container.ExitCode = -1
 				m.resetContainer(false)
 
-				m.container.Unlock()
 				return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err))
 			}
 
-			m.container.Unlock()
 			logrus.Errorf("Error running container: %s", err)
-		} // end if
+		}
 
 		// here container.Lock is already lost
 		afterRun = true
@@ -243,14 +231,13 @@
 			if m.shouldStop {
 				return err
 			}
-			m.container.Lock()
 			continue
 		}
 
 		m.logEvent("die")
 		m.resetContainer(true)
 		return err
-	} // end for
+	}
 }
 
 // resetMonitor resets the stateful fields on the containerMonitor based on the
@@ -331,7 +318,7 @@
 		}
 	}
 
-	m.container.SetRunningLocking(pid)
+	m.container.SetRunning(pid)
 
 	// signal that the process has started
 	// close channel only if not closed
diff --git a/container/state.go b/container/state.go
index d36ade9..138d798 100644
--- a/container/state.go
+++ b/container/state.go
@@ -179,13 +179,6 @@
 	return res
 }
 
-// SetRunningLocking locks container and sets it to "running"
-func (s *State) SetRunningLocking(pid int) {
-	s.Lock()
-	s.SetRunning(pid)
-	s.Unlock()
-}
-
 // SetRunning sets the state of the container to "running".
 func (s *State) SetRunning(pid int) {
 	s.Error = ""
@@ -199,7 +192,7 @@
 	s.waitChan = make(chan struct{})
 }
 
-// SetStoppedLocking locks the container state and sets it to "stopped".
+// SetStoppedLocking locks the container state is sets it to "stopped".
 func (s *State) SetStoppedLocking(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
 	s.SetStopped(exitStatus)
diff --git a/contrib/builder/deb/debian-jessie/Dockerfile b/contrib/builder/deb/debian-jessie/Dockerfile
index e92b213..2c142cc 100644
--- a/contrib/builder/deb/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/debian-jessie/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev  libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/debian-stretch/Dockerfile b/contrib/builder/deb/debian-stretch/Dockerfile
index efc9e00..5ea789a 100644
--- a/contrib/builder/deb/debian-stretch/Dockerfile
+++ b/contrib/builder/deb/debian-stretch/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/debian-wheezy/Dockerfile b/contrib/builder/deb/debian-wheezy/Dockerfile
index 67d6d43..df2a38c 100644
--- a/contrib/builder/deb/debian-wheezy/Dockerfile
+++ b/contrib/builder/deb/debian-wheezy/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools/wheezy-backports build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev  libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ubuntu-precise/Dockerfile b/contrib/builder/deb/ubuntu-precise/Dockerfile
index daf7668..9bc3d31 100644
--- a/contrib/builder/deb/ubuntu-precise/Dockerfile
+++ b/contrib/builder/deb/ubuntu-precise/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion  build-essential curl ca-certificates debhelper dh-apparmor  git libapparmor-dev  libltdl-dev libsqlite3-dev  --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ubuntu-trusty/Dockerfile b/contrib/builder/deb/ubuntu-trusty/Dockerfile
index 61aaa2f..426f6e3 100644
--- a/contrib/builder/deb/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev  libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ubuntu-wily/Dockerfile b/contrib/builder/deb/ubuntu-wily/Dockerfile
index 9414832..f6a8a64 100644
--- a/contrib/builder/deb/ubuntu-wily/Dockerfile
+++ b/contrib/builder/deb/ubuntu-wily/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/centos-7/Dockerfile b/contrib/builder/rpm/centos-7/Dockerfile
index 0679ea2..8725d07 100644
--- a/contrib/builder/rpm/centos-7/Dockerfile
+++ b/contrib/builder/rpm/centos-7/Dockerfile
@@ -8,7 +8,7 @@
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
 RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/fedora-22/Dockerfile b/contrib/builder/rpm/fedora-22/Dockerfile
index ff88ce1..912be90 100644
--- a/contrib/builder/rpm/fedora-22/Dockerfile
+++ b/contrib/builder/rpm/fedora-22/Dockerfile
@@ -28,7 +28,7 @@
 ) \
 && rm -rf "$SECCOMP_PATH"
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/fedora-23/Dockerfile b/contrib/builder/rpm/fedora-23/Dockerfile
index 1596714..bfa7061 100644
--- a/contrib/builder/rpm/fedora-23/Dockerfile
+++ b/contrib/builder/rpm/fedora-23/Dockerfile
@@ -28,7 +28,7 @@
 ) \
 && rm -rf "$SECCOMP_PATH"
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/opensuse-13.2/Dockerfile b/contrib/builder/rpm/opensuse-13.2/Dockerfile
index 2c4e7c6..2f5f7cb 100644
--- a/contrib/builder/rpm/opensuse-13.2/Dockerfile
+++ b/contrib/builder/rpm/opensuse-13.2/Dockerfile
@@ -7,7 +7,7 @@
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
 RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/oraclelinux-7/Dockerfile b/contrib/builder/rpm/oraclelinux-7/Dockerfile
index 7a129a7..153495d 100644
--- a/contrib/builder/rpm/oraclelinux-7/Dockerfile
+++ b/contrib/builder/rpm/oraclelinux-7/Dockerfile
@@ -7,7 +7,7 @@
 RUN yum groupinstall -y "Development Tools"
 RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel selinux-policy selinux-policy-devel sqlite-devel tar
 
-ENV GO_VERSION 1.5.2
+ENV GO_VERSION 1.5.3
 RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
index e8c7211..3a04365 100755
--- a/contrib/check-config.sh
+++ b/contrib/check-config.sh
@@ -38,28 +38,31 @@
 	zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
 }
 
-# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
-declare -A colors=(
-	[black]=30
-	[red]=31
-	[green]=32
-	[yellow]=33
-	[blue]=34
-	[magenta]=35
-	[cyan]=36
-	[white]=37
-)
 color() {
-	color=()
+	local codes=()
 	if [ "$1" = 'bold' ]; then
-		color+=( '1' )
+		codes=( "${codes[@]}" '1' )
 		shift
 	fi
-	if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then
-		color+=( "${colors[$1]}" )
+	if [ "$#" -gt 0 ]; then
+		local code=
+		case "$1" in
+			# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+			black) code=30 ;;
+			red) code=31 ;;
+			green) code=32 ;;
+			yellow) code=33 ;;
+			blue) code=34 ;;
+			magenta) code=35 ;;
+			cyan) code=36 ;;
+			white) code=37 ;;
+		esac
+		if [ "$code" ]; then
+			codes=( "${codes[@]}" "$code" )
+		fi
 	fi
 	local IFS=';'
-	echo -en '\033['"${color[*]}"m
+	echo -en '\033['"${codes[*]}"'m'
 }
 wrap_color() {
 	text="$1"
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 7a3543e..4b12ce6 100644
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -340,6 +340,25 @@
 	" -- "$cur" ) )
 }
 
+__docker_complete_detach-keys() {
+	case "$prev" in
+		--detach-keys)
+			case "$cur" in
+				*,)
+					COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) )
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) )
+					;;
+			esac
+
+			__docker_nospace
+			return
+			;;
+	esac
+	return 1
+}
+
 __docker_complete_isolation() {
 	COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) )
 }
@@ -513,12 +532,14 @@
 }
 
 _docker_attach() {
-	case "$cur" in
+	__docker_complete_detach-keys && return
+
+ 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--help --no-stdin --sig-proxy" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy" -- "$cur" ) )
 			;;
 		*)
-			local counter="$(__docker_pos_first_nonflag)"
+			local counter=$(__docker_pos_first_nonflag '--detach-keys')
 			if [ $cword -eq $counter ]; then
 				__docker_complete_containers_running
 			fi
@@ -685,7 +706,7 @@
 	local options_with_args="
 		$global_options_with_args
 		--api-cors-header
-		--authz-plugin
+		--authorization-plugin
 		--bip
 		--bridge -b
 		--cgroup-parent
@@ -717,7 +738,7 @@
 	"
 
 	case "$prev" in
-		--authz-plugin)
+		--authorization-plugin)
 			__docker_complete_plugins Authorization
 			return
 			;;
@@ -901,6 +922,8 @@
 }
 
 _docker_exec() {
+	__docker_complete_detach-keys && return
+
 	case "$prev" in
 		--user|-u)
 			return
@@ -909,7 +932,7 @@
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach -d --detach-keys --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) )
 			;;
 		*)
 			__docker_complete_containers_running
@@ -1139,7 +1162,7 @@
 			if [ $cword -eq $counter ]; then
 				__docker_complete_networks
 			elif [ $cword -eq $(($counter + 1)) ]; then
-				__docker_complete_containers_running
+				__docker_complete_containers_all
 			fi
 			;;
 	esac
@@ -1508,13 +1531,20 @@
 		--tty -t
 	"
 
+	if [ "$command" = "run" ] ; then
+		options_with_args="$options_with_args
+			--detach-keys
+		"
+		boolean_options="$boolean_options
+			--detach -d
+			--rm
+			--sig-proxy=false
+		"
+		__docker_complete_detach-keys && return
+	fi
+
 	local all_options="$options_with_args $boolean_options"
 
-	[ "$command" = "run" ] && all_options="$all_options
-		--detach -d
-		--rm
-		--sig-proxy=false
-	"
 
 	case "$prev" in
 		--add-host)
@@ -1701,9 +1731,11 @@
 }
 
 _docker_start() {
+	__docker_complete_detach-keys && return
+
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--attach -a --help --interactive -i" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--attach -a --detach-keys --help --interactive -i" -- "$cur" ) )
 			;;
 		*)
 			__docker_complete_containers_stopped
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index fd6790d..df344a0 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -443,26 +443,29 @@
 }
 
 __docker_subcommand() {
-    local -a _command_args opts_help opts_cpumemlimit opts_create
+    local -a _command_args opts_help opts_build_create_run opts_build_create_run_update opts_create_run opts_create_run_update
     local expl help="--help"
     integer ret=1
 
     opts_help=("(: -)--help[Print usage]")
-    opts_cpumemlimit=(
-        "($help)--cpu-shares=[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)"
+    opts_build_create_run=(
         "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: "
+        "($help)--isolation=[]:isolation:(default hyperv process)"
+        "($help)*--shm-size=[Size of '/dev/shm'. The format is '<number><unit>'. Default is '64m'.]:shm size: "
+        "($help)*--ulimit=[ulimit options]:ulimit: "
+    )
+    opts_build_create_run_update=(
+        "($help)--cpu-shares=[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)"
         "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: "
         "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: "
         "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: "
         "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: "
         "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: "
         "($help)--memory-swap=[Total memory limit with swap]:Memory limit: "
-        "($help)*--ulimit=[ulimit options]:ulimit: "
     )
-    opts_create=(
+    opts_create_run=(
         "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)"
         "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: "
-        "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)"
         "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: "
         "($help)*--cap-add=[Add Linux capabilities]:capability: "
         "($help)*--cap-drop=[Drop Linux capabilities]:capability: "
@@ -483,7 +486,6 @@
         "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts"
         "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]"
         "($help)--ipc=[IPC namespace to use]:IPC namespace: "
-        "($help)--kernel-memory[Kernel memory limit in bytes.]:Memory limit: "
         "($help)*--link=[Add link to another container]:link:->link"
         "($help)*"{-l=,--label=}"[Set meta data on a container]:label: "
         "($help)--log-driver=[Default driver for container logs]:Logging driver:(json-file syslog journald gelf fluentd awslogs splunk none)"
@@ -502,12 +504,17 @@
         "($help)*--security-opt=[Security options]:security option: "
         "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]"
         "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users"
-        "($help)--tmpfs[mount tmpfs] "
+        "($help)--tmpfs[mount tmpfs]"
         "($help)*-v[Bind mount a volume]:volume: "
         "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)"
         "($help)*--volumes-from=[Mount volumes from the specified container]:volume: "
         "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories"
     )
+    opts_create_run_update=(
+        "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)"
+        "($help)--kernel-memory=[Kernel memory limit in bytes.]:Memory limit: "
+        "($help)--memory-reservation=[Memory soft limit]:Memory limit: "
+    )
 
     case "$words[1]" in
         (attach)
@@ -520,7 +527,8 @@
         (build)
             _arguments $(__docker_arguments) \
                 $opts_help \
-                $opts_cpumemlimit \
+                $opts_build_create_run \
+                $opts_build_create_run_update \
                 "($help)*--build-arg[Set build-time variables]:<varname>=<value>: " \
                 "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \
                 "($help)--force-rm[Always remove intermediate containers]" \
@@ -559,8 +567,10 @@
         (create)
             _arguments $(__docker_arguments) \
                 $opts_help \
-                $opts_cpumemlimit \
-                $opts_create \
+                $opts_build_create_run \
+                $opts_build_create_run_update \
+                $opts_create_run \
+                $opts_create_run_update \
                 "($help -): :__docker_images" \
                 "($help -):command: _command_names -e" \
                 "($help -)*::arguments: _normal" && ret=0
@@ -580,7 +590,7 @@
             _arguments $(__docker_arguments) \
                 $opts_help \
                 "($help)--api-cors-header=[Set CORS headers in the remote API]:CORS headers: " \
-                "($help)*--authz-plugin=[Set authorization plugins to load]" \
+                "($help)*--authorization-plugin=[Set authorization plugins to load]" \
                 "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \
                 "($help)--bip=[Specify network bridge IP]" \
                 "($help)--cgroup-parent=[Set parent cgroup for all containers]:cgroup: " \
@@ -624,6 +634,7 @@
                 "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \
                 "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \
                 "($help)--tlsverify[Use TLS and verify the remote]" \
+                "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \
                 "($help)--userland-proxy[Use userland proxy for loopback traffic]" && ret=0
 
             case $state in
@@ -643,6 +654,14 @@
                         _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0
                     fi
                     ;;
+                (users-groups)
+                    if compset -P '*:'; then
+                        _groups && ret=0
+                    else
+                        _describe -t userns-default "default Docker user management" '(default)' && ret=0
+                        _users && ret=0
+                    fi
+                    ;;
             esac
             ;;
         (diff)
@@ -851,8 +870,10 @@
         (run)
             _arguments $(__docker_arguments) \
                 $opts_help \
-                $opts_cpumemlimit \
-                $opts_create \
+                $opts_build_create_run \
+                $opts_build_create_run_update \
+                $opts_create_run \
+                $opts_create_run_update \
                 "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \
                 "($help)--rm[Remove intermediate containers when it exits]" \
                 "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \
@@ -919,6 +940,23 @@
             esac
 
             ;;
+        (update)
+            _arguments $(__docker_arguments) \
+                $opts_help \
+                $opts_create_run_update \
+                $opts_build_create_run_update \
+                "($help -)*: :->values" && ret=0
+
+            case $state in
+                (values)
+                    if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then
+                        __docker_stoppedcontainers && ret=0
+                    else
+                        __docker_containers && ret=0
+                    fi
+                    ;;
+            esac
+            ;;
         (volume)
             local curcontext="$curcontext" state
             _arguments $(__docker_arguments) \
diff --git a/daemon/config.go b/daemon/config.go
index 8e46713..a75178f 100644
--- a/daemon/config.go
+++ b/daemon/config.go
@@ -1,9 +1,19 @@
 package daemon
 
 import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/discovery"
 	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/engine-api/types/container"
+	"github.com/imdario/mergo"
 )
 
 const (
@@ -11,42 +21,69 @@
 	disableNetworkBridge = "none"
 )
 
+// LogConfig represents the default log configuration.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
+type LogConfig struct {
+	Type   string            `json:"log-driver,omitempty"`
+	Config map[string]string `json:"log-opts,omitempty"`
+}
+
+// CommonTLSOptions defines TLS configuration for the daemon server.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
+type CommonTLSOptions struct {
+	CAFile   string `json:"tlscacert,omitempty"`
+	CertFile string `json:"tlscert,omitempty"`
+	KeyFile  string `json:"tlskey,omitempty"`
+}
+
 // CommonConfig defines the configuration of a docker daemon which are
 // common across platforms.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
 type CommonConfig struct {
-	AuthZPlugins  []string // AuthZPlugins holds list of authorization plugins
-	AutoRestart   bool
-	Bridge        bridgeConfig // Bridge holds bridge network specific configuration.
-	Context       map[string][]string
-	DisableBridge bool
-	DNS           []string
-	DNSOptions    []string
-	DNSSearch     []string
-	ExecOptions   []string
-	ExecRoot      string
-	GraphDriver   string
-	GraphOptions  []string
-	Labels        []string
-	LogConfig     container.LogConfig
-	Mtu           int
-	Pidfile       string
-	RemappedRoot  string
-	Root          string
-	TrustKeyPath  string
+	AuthorizationPlugins []string            `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
+	AutoRestart          bool                `json:"-"`
+	Bridge               bridgeConfig        `json:"-"` // Bridge holds bridge network specific configuration.
+	Context              map[string][]string `json:"-"`
+	DisableBridge        bool                `json:"-"`
+	DNS                  []string            `json:"dns,omitempty"`
+	DNSOptions           []string            `json:"dns-opts,omitempty"`
+	DNSSearch            []string            `json:"dns-search,omitempty"`
+	ExecOptions          []string            `json:"exec-opts,omitempty"`
+	ExecRoot             string              `json:"exec-root,omitempty"`
+	GraphDriver          string              `json:"storage-driver,omitempty"`
+	GraphOptions         []string            `json:"storage-opts,omitempty"`
+	Labels               []string            `json:"labels,omitempty"`
+	LogConfig            LogConfig           `json:"log-config,omitempty"`
+	Mtu                  int                 `json:"mtu,omitempty"`
+	Pidfile              string              `json:"pidfile,omitempty"`
+	Root                 string              `json:"graph,omitempty"`
+	TrustKeyPath         string              `json:"-"`
 
 	// ClusterStore is the storage backend used for the cluster information. It is used by both
 	// multihost networking (to store networks and endpoints information) and by the node discovery
 	// mechanism.
-	ClusterStore string
+	ClusterStore string `json:"cluster-store,omitempty"`
 
 	// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
 	// as TLS configuration settings.
-	ClusterOpts map[string]string
+	ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
 
 	// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
 	// discovery. This should be a 'host:port' combination on which that daemon instance is
 	// reachable by other hosts.
-	ClusterAdvertise string
+	ClusterAdvertise string `json:"cluster-advertise,omitempty"`
+
+	Debug      bool             `json:"debug,omitempty"`
+	Hosts      []string         `json:"hosts,omitempty"`
+	LogLevel   string           `json:"log-level,omitempty"`
+	TLS        bool             `json:"tls,omitempty"`
+	TLSVerify  bool             `json:"tls-verify,omitempty"`
+	TLSOptions CommonTLSOptions `json:"tls-opts,omitempty"`
+
+	reloadLock sync.Mutex
 }
 
 // InstallCommonFlags adds command-line options to the top-level flag parser for
@@ -54,9 +91,9 @@
 // Subsequent calls to `flag.Parse` will populate config with values parsed
 // from the command-line.
 func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) {
-	cmd.Var(opts.NewListOptsRef(&config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
-	cmd.Var(opts.NewListOptsRef(&config.AuthZPlugins, nil), []string{"-authz-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
-	cmd.Var(opts.NewListOptsRef(&config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options"))
+	cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
+	cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
+	cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options"))
 	cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file"))
 	cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime"))
 	cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", usageFn("Root of the Docker execdriver"))
@@ -65,12 +102,131 @@
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
-	cmd.Var(opts.NewListOptsRef(&config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use"))
+	cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use"))
 	cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
-	cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
+	cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
 	cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs"))
-	cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))
+	cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))
 	cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise"))
 	cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store"))
-	cmd.Var(opts.NewMapOpts(config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options"))
+	cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options"))
+}
+
+func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
+	if clusterAdvertise == "" {
+		return "", errDiscoveryDisabled
+	}
+	if clusterStore == "" {
+		return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
+	}
+
+	advertise, err := discovery.ParseAdvertise(clusterAdvertise)
+	if err != nil {
+		return "", fmt.Errorf("discovery advertise parsing failed (%v)", err)
+	}
+	return advertise, nil
+}
+
+// ReloadConfiguration reads the configuration in the host and reloads the daemon and server.
+func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) {
+	logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
+	newConfig, err := getConflictFreeConfiguration(configFile, flags)
+	if err != nil {
+		logrus.Error(err)
+	} else {
+		reload(newConfig)
+	}
+}
+
+// MergeDaemonConfigurations reads a configuration file,
+// loads the file configuration in an isolated structure,
+// and merges the configuration provided from flags on top
+// if there are no conflicts.
+func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) {
+	fileConfig, err := getConflictFreeConfiguration(configFile, flags)
+	if err != nil {
+		return nil, err
+	}
+
+	// merge flags configuration on top of the file configuration
+	if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
+		return nil, err
+	}
+
+	return fileConfig, nil
+}
+
+// getConflictFreeConfiguration loads the configuration from a JSON file.
+// It compares that configuration with the one provided by the flags,
+// and returns an error if there are conflicts.
+func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) {
+	b, err := ioutil.ReadFile(configFile)
+	if err != nil {
+		return nil, err
+	}
+
+	var reader io.Reader
+	if flags != nil {
+		var jsonConfig map[string]interface{}
+		reader = bytes.NewReader(b)
+		if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
+			return nil, err
+		}
+
+		if err := findConfigurationConflicts(jsonConfig, flags); err != nil {
+			return nil, err
+		}
+	}
+
+	var config Config
+	reader = bytes.NewReader(b)
+	err = json.NewDecoder(reader).Decode(&config)
+	return &config, err
+}
+
+// findConfigurationConflicts iterates over the provided flags searching for
+// duplicated configurations. It returns an error with all the conflicts if
+// it finds any.
+func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error {
+	var conflicts []string
+	flatten := make(map[string]interface{})
+	for k, v := range config {
+		if m, ok := v.(map[string]interface{}); ok {
+			for km, vm := range m {
+				flatten[km] = vm
+			}
+		} else {
+			flatten[k] = v
+		}
+	}
+
+	printConflict := func(name string, flagValue, fileValue interface{}) string {
+		return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
+	}
+
+	collectConflicts := func(f *flag.Flag) {
+		// search option name in the json configuration payload if the value is a named option
+		if namedOption, ok := f.Value.(opts.NamedOption); ok {
+			if optsValue, ok := flatten[namedOption.Name()]; ok {
+				conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
+			}
+		} else {
+			// search flag name in the json configuration payload without trailing dashes
+			for _, name := range f.Names {
+				name = strings.TrimLeft(name, "-")
+
+				if value, ok := flatten[name]; ok {
+					conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
+					break
+				}
+			}
+		}
+	}
+
+	flags.Visit(collectConflicts)
+
+	if len(conflicts) > 0 {
+		return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
+	}
+	return nil
 }
diff --git a/daemon/config_experimental.go b/daemon/config_experimental.go
index f1c4bb9..ceb7c38 100644
--- a/daemon/config_experimental.go
+++ b/daemon/config_experimental.go
@@ -2,118 +2,7 @@
 
 package daemon
 
-import (
-	"fmt"
-	"strconv"
-	"strings"
-
-	"github.com/docker/docker/pkg/idtools"
-	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/opencontainers/runc/libcontainer/user"
-)
+import flag "github.com/docker/docker/pkg/mflag"
 
 func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) {
-	cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces"))
-}
-
-const (
-	defaultIDSpecifier string = "default"
-	defaultRemappedID  string = "dockremap"
-)
-
-// Parse the remapped root (user namespace) option, which can be one of:
-//   username            - valid username from /etc/passwd
-//   username:groupname  - valid username; valid groupname from /etc/group
-//   uid                 - 32-bit unsigned int valid Linux UID value
-//   uid:gid             - uid value; 32-bit unsigned int Linux GID value
-//
-//  If no groupname is specified, and a username is specified, an attempt
-//  will be made to lookup a gid for that username as a groupname
-//
-//  If names are used, they are verified to exist in passwd/group
-func parseRemappedRoot(usergrp string) (string, string, error) {
-
-	var (
-		userID, groupID     int
-		username, groupname string
-	)
-
-	idparts := strings.Split(usergrp, ":")
-	if len(idparts) > 2 {
-		return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
-	}
-
-	if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
-		// must be a uid; take it as valid
-		userID = int(uid)
-		luser, err := user.LookupUid(userID)
-		if err != nil {
-			return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
-		}
-		username = luser.Name
-		if len(idparts) == 1 {
-			// if the uid was numeric and no gid was specified, take the uid as the gid
-			groupID = userID
-			lgrp, err := user.LookupGid(groupID)
-			if err != nil {
-				return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
-			}
-			groupname = lgrp.Name
-		}
-	} else {
-		lookupName := idparts[0]
-		// special case: if the user specified "default", they want Docker to create or
-		// use (after creation) the "dockremap" user/group for root remapping
-		if lookupName == defaultIDSpecifier {
-			lookupName = defaultRemappedID
-		}
-		luser, err := user.LookupUser(lookupName)
-		if err != nil && idparts[0] != defaultIDSpecifier {
-			// error if the name requested isn't the special "dockremap" ID
-			return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
-		} else if err != nil {
-			// special case-- if the username == "default", then we have been asked
-			// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
-			// ranges will be used for the user and group mappings in user namespaced containers
-			_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
-			if err == nil {
-				return defaultRemappedID, defaultRemappedID, nil
-			}
-			return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
-		}
-		userID = luser.Uid
-		username = luser.Name
-		if len(idparts) == 1 {
-			// we only have a string username, and no group specified; look up gid from username as group
-			group, err := user.LookupGroup(lookupName)
-			if err != nil {
-				return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
-			}
-			groupID = group.Gid
-			groupname = group.Name
-		}
-	}
-
-	if len(idparts) == 2 {
-		// groupname or gid is separately specified and must be resolved
-		// to a unsigned 32-bit gid
-		if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
-			// must be a gid, take it as valid
-			groupID = int(gid)
-			lgrp, err := user.LookupGid(groupID)
-			if err != nil {
-				return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
-			}
-			groupname = lgrp.Name
-		} else {
-			// not a number; attempt a lookup
-			group, err := user.LookupGroup(idparts[1])
-			if err != nil {
-				return "", "", fmt.Errorf("Error during gid lookup for %q: %v", idparts[1], err)
-			}
-			groupID = group.Gid
-			groupname = idparts[1]
-		}
-	}
-	return username, groupname, nil
 }
diff --git a/daemon/config_test.go b/daemon/config_test.go
new file mode 100644
index 0000000..69a199e
--- /dev/null
+++ b/daemon/config_test.go
@@ -0,0 +1,177 @@
+package daemon
+
+import (
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/mflag"
+)
+
+func TestDaemonConfigurationMerge(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"debug": true}`))
+	f.Close()
+
+	c := &Config{
+		CommonConfig: CommonConfig{
+			AutoRestart: true,
+			LogConfig: LogConfig{
+				Type:   "syslog",
+				Config: map[string]string{"tag": "test"},
+			},
+		},
+	}
+
+	cc, err := MergeDaemonConfigurations(c, nil, configFile)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !cc.Debug {
+		t.Fatalf("expected %v, got %v\n", true, cc.Debug)
+	}
+	if !cc.AutoRestart {
+		t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart)
+	}
+	if cc.LogConfig.Type != "syslog" {
+		t.Fatalf("expected syslog config, got %q\n", cc.LogConfig)
+	}
+}
+
+func TestDaemonConfigurationNotFound(t *testing.T) {
+	_, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker")
+	if err == nil || !os.IsNotExist(err) {
+		t.Fatalf("expected does not exist error, got %v", err)
+	}
+}
+
+func TestDaemonBrokenConfiguration(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"Debug": tru`))
+	f.Close()
+
+	_, err = MergeDaemonConfigurations(&Config{}, nil, configFile)
+	if err == nil {
+		t.Fatalf("expected error, got %v", err)
+	}
+}
+
+func TestParseClusterAdvertiseSettings(t *testing.T) {
+	_, err := parseClusterAdvertiseSettings("something", "")
+	if err != errDiscoveryDisabled {
+		t.Fatalf("expected discovery disabled error, got %v\n", err)
+	}
+
+	_, err = parseClusterAdvertiseSettings("", "something")
+	if err == nil {
+		t.Fatalf("expected discovery store error, got %v\n", err)
+	}
+
+	_, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080")
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFindConfigurationConflicts(t *testing.T) {
+	config := map[string]interface{}{"authorization-plugins": "foobar"}
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+
+	err := findConfigurationConflicts(config, flags)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	flags.String([]string{"authorization-plugins"}, "", "")
+	if err := flags.Set("authorization-plugins", "asdf"); err != nil {
+		t.Fatal(err)
+	}
+
+	err = findConfigurationConflicts(config, flags)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "authorization-plugins") {
+		t.Fatalf("expected authorization-plugins conflict, got %v", err)
+	}
+}
+
+func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) {
+	config := map[string]interface{}{"hosts": []string{"qwer"}}
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+
+	var hosts []string
+	flags.Var(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to")
+	if err := flags.Set("-host", "tcp://127.0.0.1:4444"); err != nil {
+		t.Fatal(err)
+	}
+	if err := flags.Set("H", "unix:///var/run/docker.sock"); err != nil {
+		t.Fatal(err)
+	}
+
+	err := findConfigurationConflicts(config, flags)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "hosts") {
+		t.Fatalf("expected hosts conflict, got %v", err)
+	}
+}
+
+func TestDaemonConfigurationMergeConflicts(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"debug": true}`))
+	f.Close()
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	flags.Bool([]string{"debug"}, false, "")
+	flags.Set("debug", "false")
+
+	_, err = MergeDaemonConfigurations(&Config{}, flags, configFile)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "debug") {
+		t.Fatalf("expected debug conflict, got %v", err)
+	}
+}
+
+func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`))
+	f.Close()
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	flags.String([]string{"tlscacert"}, "", "")
+	flags.Set("tlscacert", "~/.docker/ca.pem")
+
+	_, err = MergeDaemonConfigurations(&Config{}, flags, configFile)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "tlscacert") {
+		t.Fatalf("expected tlscacert conflict, got %v", err)
+	}
+}
diff --git a/daemon/config_unix.go b/daemon/config_unix.go
index ce14f9f..60fb3a9 100644
--- a/daemon/config_unix.go
+++ b/daemon/config_unix.go
@@ -18,18 +18,20 @@
 )
 
 // Config defines the configuration of a docker daemon.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
 type Config struct {
 	CommonConfig
 
 	// Fields below here are platform specific.
 
-	CorsHeaders          string
-	EnableCors           bool
-	EnableSelinuxSupport bool
-	RemappedRoot         string
-	SocketGroup          string
-	CgroupParent         string
-	Ulimits              map[string]*units.Ulimit
+	CorsHeaders          string                   `json:"api-cors-headers,omitempty"`
+	EnableCors           bool                     `json:"api-enable-cors,omitempty"`
+	EnableSelinuxSupport bool                     `json:"selinux-enabled,omitempty"`
+	RemappedRoot         string                   `json:"userns-remap,omitempty"`
+	SocketGroup          string                   `json:"group,omitempty"`
+	CgroupParent         string                   `json:"cgroup-parent,omitempty"`
+	Ulimits              map[string]*units.Ulimit `json:"default-ulimits,omitempty"`
 }
 
 // bridgeConfig stores all the bridge driver specific
@@ -79,6 +81,7 @@
 	cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header"))
 	cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API"))
 	cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers"))
+	cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces"))
 
 	config.attachExperimentalFlags(cmd, usageFn)
 }
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index 9840fa2..0a361a6 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -37,40 +37,36 @@
 
 func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
 	var env []string
-	children, err := daemon.children(container.Name)
-	if err != nil {
-		return nil, err
-	}
+	children := daemon.children(container)
 
 	bridgeSettings := container.NetworkSettings.Networks["bridge"]
 	if bridgeSettings == nil {
 		return nil, nil
 	}
 
-	if len(children) > 0 {
-		for linkAlias, child := range children {
-			if !child.IsRunning() {
-				return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias)
-			}
+	for linkAlias, child := range children {
+		if !child.IsRunning() {
+			return nil, derr.ErrorCodeLinkNotRunning.WithArgs(child.Name, linkAlias)
+		}
 
-			childBridgeSettings := child.NetworkSettings.Networks["bridge"]
-			if childBridgeSettings == nil {
-				return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
-			}
+		childBridgeSettings := child.NetworkSettings.Networks["bridge"]
+		if childBridgeSettings == nil {
+			return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID)
+		}
 
-			link := links.NewLink(
-				bridgeSettings.IPAddress,
-				childBridgeSettings.IPAddress,
-				linkAlias,
-				child.Config.Env,
-				child.Config.ExposedPorts,
-			)
+		link := links.NewLink(
+			bridgeSettings.IPAddress,
+			childBridgeSettings.IPAddress,
+			linkAlias,
+			child.Config.Env,
+			child.Config.ExposedPorts,
+		)
 
-			for _, envVar := range link.ToEnv() {
-				env = append(env, envVar)
-			}
+		for _, envVar := range link.ToEnv() {
+			env = append(env, envVar)
 		}
 	}
+
 	return env, nil
 }
 
@@ -213,7 +209,7 @@
 		BlkioThrottleWriteBpsDevice:  writeBpsDevice,
 		BlkioThrottleReadIOpsDevice:  readIOpsDevice,
 		BlkioThrottleWriteIOpsDevice: writeIOpsDevice,
-		OomKillDisable:               c.HostConfig.OomKillDisable,
+		OomKillDisable:               *c.HostConfig.OomKillDisable,
 		MemorySwappiness:             -1,
 	}
 
@@ -242,6 +238,14 @@
 	}
 	uidMap, gidMap := daemon.GetUIDGIDMaps()
 
+	if !daemon.seccompEnabled {
+		if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" {
+			return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.")
+		}
+		logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.")
+		c.SeccompProfile = "unconfined"
+	}
+
 	defaultCgroupParent := "/docker"
 	if daemon.configStore.CgroupParent != "" {
 		defaultCgroupParent = daemon.configStore.CgroupParent
@@ -419,11 +423,7 @@
 
 	var childEndpoints, parentEndpoints []string
 
-	children, err := daemon.children(container.Name)
-	if err != nil {
-		return nil, err
-	}
-
+	children := daemon.children(container)
 	for linkAlias, child := range children {
 		if !isLinkable(child) {
 			return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name)
@@ -443,23 +443,20 @@
 	}
 
 	bridgeSettings := container.NetworkSettings.Networks["bridge"]
-	refs := daemon.containerGraph().RefPaths(container.ID)
-	for _, ref := range refs {
-		if ref.ParentID == "0" {
+	for alias, parent := range daemon.parents(container) {
+		if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() {
 			continue
 		}
 
-		c, err := daemon.GetContainer(ref.ParentID)
-		if err != nil {
-			logrus.Error(err)
-		}
-
-		if c != nil && !daemon.configStore.DisableBridge && container.HostConfig.NetworkMode.IsPrivate() {
-			logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", c.ID, ref.Name, bridgeSettings.IPAddress)
-			sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(c.ID, ref.Name, bridgeSettings.IPAddress))
-			if ep.ID() != "" {
-				parentEndpoints = append(parentEndpoints, ep.ID())
-			}
+		_, alias = path.Split(alias)
+		logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress)
+		sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate(
+			parent.ID,
+			alias,
+			bridgeSettings.IPAddress,
+		))
+		if ep.ID() != "" {
+			parentEndpoints = append(parentEndpoints, ep.ID())
 		}
 	}
 
@@ -471,7 +468,6 @@
 	}
 
 	sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions))
-
 	return sboxOptions, nil
 }
 
@@ -590,7 +586,7 @@
 	if container.NetworkSettings == nil {
 		container.NetworkSettings = &network.Settings{}
 	}
-	if endpointsConfig != nil {
+	if len(endpointsConfig) > 0 {
 		container.NetworkSettings.Networks = endpointsConfig
 	}
 	if container.NetworkSettings.Networks == nil {
@@ -708,13 +704,43 @@
 	es.MacAddress = ""
 }
 
+func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrName string, updateSettings bool) (libnetwork.Network, error) {
+	if container.HostConfig.NetworkMode.IsContainer() {
+		return nil, runconfig.ErrConflictSharedNetwork
+	}
+
+	if containertypes.NetworkMode(idOrName).IsBridge() &&
+		daemon.configStore.DisableBridge {
+		container.Config.NetworkDisabled = true
+		return nil, nil
+	}
+
+	n, err := daemon.FindNetwork(idOrName)
+	if err != nil {
+		return nil, err
+	}
+
+	if updateSettings {
+		if err := daemon.updateNetworkSettings(container, n); err != nil {
+			return nil, err
+		}
+	}
+	return n, nil
+}
+
 // ConnectToNetwork connects a container to a network
 func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error {
 	if !container.Running {
-		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
-	}
-	if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil {
-		return err
+		if container.RemovalInProgress || container.Dead {
+			return derr.ErrorCodeRemovalContainer.WithArgs(container.ID)
+		}
+		if _, err := daemon.updateNetworkConfig(container, idOrName, true); err != nil {
+			return err
+		}
+	} else {
+		if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil {
+			return err
+		}
 	}
 	if err := container.ToDiskLocking(); err != nil {
 		return fmt.Errorf("Error saving container to disk: %v", err)
@@ -723,63 +749,45 @@
 }
 
 func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) {
-	if container.HostConfig.NetworkMode.IsContainer() {
-		return runconfig.ErrConflictSharedNetwork
+	n, err := daemon.updateNetworkConfig(container, idOrName, updateSettings)
+	if err != nil {
+		return err
+	}
+	if n == nil {
+		return nil
 	}
 
 	if !containertypes.NetworkMode(idOrName).IsUserDefined() && hasUserDefinedIPAddress(endpointConfig) {
 		return runconfig.ErrUnsupportedNetworkAndIP
 	}
 
-	if containertypes.NetworkMode(idOrName).IsBridge() &&
-		daemon.configStore.DisableBridge {
-		container.Config.NetworkDisabled = true
-		return nil
+	if !containertypes.NetworkMode(idOrName).IsUserDefined() && len(endpointConfig.Aliases) > 0 {
+		return runconfig.ErrUnsupportedNetworkAndAlias
 	}
 
 	controller := daemon.netController
 
-	n, err := daemon.FindNetwork(idOrName)
-	if err != nil {
-		return err
-	}
-
 	if err := validateNetworkingConfig(n, endpointConfig); err != nil {
 		return err
 	}
 
-	if updateSettings {
-		if err := daemon.updateNetworkSettings(container, n); err != nil {
-			return err
-		}
-	}
-
 	if endpointConfig != nil {
 		container.NetworkSettings.Networks[n.Name()] = endpointConfig
 	}
 
-	ep, err := container.GetEndpointInNetwork(n)
-	if err == nil {
-		return fmt.Errorf("Conflict. A container with name %q is already connected to network %s.", strings.TrimPrefix(container.Name, "/"), idOrName)
-	}
-
-	if _, ok := err.(libnetwork.ErrNoSuchEndpoint); !ok {
-		return err
-	}
-
 	createOptions, err := container.BuildCreateEndpointOptions(n)
 	if err != nil {
 		return err
 	}
 
 	endpointName := strings.TrimPrefix(container.Name, "/")
-	ep, err = n.CreateEndpoint(endpointName, createOptions...)
+	ep, err := n.CreateEndpoint(endpointName, createOptions...)
 	if err != nil {
 		return err
 	}
 	defer func() {
 		if err != nil {
-			if e := ep.Delete(); e != nil {
+			if e := ep.Delete(false); e != nil {
 				logrus.Warnf("Could not rollback container connection to network %s", idOrName)
 			}
 		}
@@ -803,7 +811,12 @@
 		container.UpdateSandboxNetworkSettings(sb)
 	}
 
-	if err := ep.Join(sb); err != nil {
+	joinOptions, err := container.BuildJoinOptions(n)
+	if err != nil {
+		return err
+	}
+
+	if err := ep.Join(sb, joinOptions...); err != nil {
 		return err
 	}
 
@@ -815,18 +828,33 @@
 	return nil
 }
 
-// DisconnectFromNetwork disconnects container from network n.
-func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network) error {
-	if !container.Running {
-		return derr.ErrorCodeNotRunning.WithArgs(container.ID)
+// ForceEndpointDelete deletes an endpoing from a network forcefully
+func (daemon *Daemon) ForceEndpointDelete(name string, n libnetwork.Network) error {
+	ep, err := n.EndpointByName(name)
+	if err != nil {
+		return err
 	}
+	return ep.Delete(true)
+}
 
+// DisconnectFromNetwork disconnects container from network n.
+func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error {
 	if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() {
 		return runconfig.ErrConflictHostNetwork
 	}
-
-	if err := disconnectFromNetwork(container, n); err != nil {
-		return err
+	if !container.Running {
+		if container.RemovalInProgress || container.Dead {
+			return derr.ErrorCodeRemovalContainer.WithArgs(container.ID)
+		}
+		if _, ok := container.NetworkSettings.Networks[n.Name()]; ok {
+			delete(container.NetworkSettings.Networks, n.Name())
+		} else {
+			return fmt.Errorf("container %s is not connected to the network %s", container.ID, n.Name())
+		}
+	} else {
+		if err := disconnectFromNetwork(container, n, false); err != nil {
+			return err
+		}
 	}
 
 	if err := container.ToDiskLocking(); err != nil {
@@ -840,7 +868,7 @@
 	return nil
 }
 
-func disconnectFromNetwork(container *container.Container, n libnetwork.Network) error {
+func disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error {
 	var (
 		ep   libnetwork.Endpoint
 		sbox libnetwork.Sandbox
@@ -862,6 +890,15 @@
 	}
 	n.WalkEndpoints(s)
 
+	if ep == nil && force {
+		epName := strings.TrimPrefix(container.Name, "/")
+		ep, err := n.EndpointByName(epName)
+		if err != nil {
+			return err
+		}
+		return ep.Delete(force)
+	}
+
 	if ep == nil {
 		return fmt.Errorf("container %s is not connected to the network", container.ID)
 	}
@@ -870,7 +907,7 @@
 		return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
 	}
 
-	if err := ep.Delete(); err != nil {
+	if err := ep.Delete(false); err != nil {
 		return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
 	}
 
diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go
index d713541..8c3ae27 100644
--- a/daemon/container_operations_windows.go
+++ b/daemon/container_operations_windows.go
@@ -32,8 +32,13 @@
 	return nil
 }
 
+// ForceEndpointDelete deletes an endpoing from a network forcefully
+func (daemon *Daemon) ForceEndpointDelete(name string, n libnetwork.Network) error {
+	return nil
+}
+
 // DisconnectFromNetwork disconnects a container from the network.
-func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network) error {
+func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error {
 	return nil
 }
 
diff --git a/daemon/create_unix.go b/daemon/create_unix.go
index 1701fcf..e10f9e4 100644
--- a/daemon/create_unix.go
+++ b/daemon/create_unix.go
@@ -23,6 +23,10 @@
 	}
 	defer daemon.Unmount(container)
 
+	if err := container.SetupWorkingDirectory(); err != nil {
+		return err
+	}
+
 	for spec := range config.Volumes {
 		name := stringid.GenerateNonCryptoID()
 		destination := filepath.Clean(spec)
diff --git a/daemon/daemon.go b/daemon/daemon.go
index b23543a..9e0e77e 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -12,9 +12,9 @@
 	"io/ioutil"
 	"net"
 	"os"
+	"path"
 	"path/filepath"
 	"runtime"
-	"strings"
 	"sync"
 	"syscall"
 	"time"
@@ -46,13 +46,13 @@
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/migrate/v1"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/progress"
+	"github.com/docker/docker/pkg/registrar"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/stringid"
@@ -147,7 +147,6 @@
 	trustKey                  libtrust.PrivateKey
 	idIndex                   *truncindex.TruncIndex
 	configStore               *Config
-	containerGraphDB          *graphdb.Database
 	execDriver                execdriver.Driver
 	statsCollector            *statsCollector
 	defaultLogConfig          containertypes.LogConfig
@@ -155,13 +154,16 @@
 	EventsService             *events.Events
 	netController             libnetwork.NetworkController
 	volumes                   *store.VolumeStore
-	discoveryWatcher          discovery.Watcher
+	discoveryWatcher          discoveryReloader
 	root                      string
+	seccompEnabled            bool
 	shutdown                  bool
 	uidMaps                   []idtools.IDMap
 	gidMaps                   []idtools.IDMap
 	layerStore                layer.Store
 	imageStore                image.Store
+	nameIndex                 *registrar.Registrar
+	linkIndex                 *linkIndex
 }
 
 // GetContainer looks for a container using the provided information, which could be
@@ -245,8 +247,7 @@
 			logrus.Errorf("Error saving container name to disk: %v", err)
 		}
 	}
-
-	return nil
+	return daemon.nameIndex.Reserve(container.Name, container.ID)
 }
 
 // Register makes a container object usable by the daemon as <container.ID>
@@ -257,10 +258,8 @@
 	} else {
 		container.NewNopInputPipe()
 	}
-	daemon.containers.Add(container.ID, container)
 
-	// don't update the Suffixarray if we're starting up
-	// we'll waste time if we update it for every container
+	daemon.containers.Add(container.ID, container)
 	daemon.idIndex.Add(container.ID)
 
 	if container.IsRunning() {
@@ -291,15 +290,10 @@
 }
 
 func (daemon *Daemon) restore() error {
-	type cr struct {
-		container  *container.Container
-		registered bool
-	}
-
 	var (
-		debug         = os.Getenv("DEBUG") != ""
+		debug         = utils.IsDebugEnabled()
 		currentDriver = daemon.GraphDriverName()
-		containers    = make(map[string]*cr)
+		containers    = make(map[string]*container.Container)
 	)
 
 	if !debug {
@@ -332,63 +326,70 @@
 		if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
 			logrus.Debugf("Loaded container %v", container.ID)
 
-			containers[container.ID] = &cr{container: container}
+			containers[container.ID] = container
 		} else {
 			logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
 		}
 	}
 
-	if entities := daemon.containerGraphDB.List("/", -1); entities != nil {
-		for _, p := range entities.Paths() {
-			if !debug && logrus.GetLevel() == logrus.InfoLevel {
-				fmt.Print(".")
-			}
+	var migrateLegacyLinks bool
+	restartContainers := make(map[*container.Container]chan struct{})
+	for _, c := range containers {
+		if err := daemon.registerName(c); err != nil {
+			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
+			continue
+		}
+		if err := daemon.Register(c); err != nil {
+			logrus.Errorf("Failed to register container %s: %s", c.ID, err)
+			continue
+		}
 
-			e := entities[p]
+		// get list of containers we need to restart
+		if daemon.configStore.AutoRestart && c.ShouldRestart() {
+			restartContainers[c] = make(chan struct{})
+		}
 
-			if c, ok := containers[e.ID()]; ok {
-				c.registered = true
-			}
+		// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
+		if c.HostConfig != nil && c.HostConfig.Links == nil {
+			migrateLegacyLinks = true
 		}
 	}
 
-	restartContainers := make(map[*container.Container]chan struct{})
-	for _, c := range containers {
-		if !c.registered {
-			// Try to set the default name for a container if it exists prior to links
-			c.container.Name, err = daemon.generateNewName(c.container.ID)
-			if err != nil {
-				logrus.Debugf("Setting default id - %s", err)
-			}
-			if err := daemon.registerName(c.container); err != nil {
-				logrus.Errorf("Failed to register container %s: %s", c.container.ID, err)
-				continue
-			}
+	// migrate any legacy links from sqlite
+	linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
+	var legacyLinkDB *graphdb.Database
+	if migrateLegacyLinks {
+		legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
+		if err != nil {
+			return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
 		}
+		defer legacyLinkDB.Close()
+	}
 
-		if err := daemon.Register(c.container); err != nil {
-			logrus.Errorf("Failed to register container %s: %s", c.container.ID, err)
-			continue
+	// Now that all the containers are registered, register the links
+	for _, c := range containers {
+		if migrateLegacyLinks {
+			if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
+				return err
+			}
 		}
-		// get list of containers we need to restart
-		if daemon.configStore.AutoRestart && c.container.ShouldRestart() {
-			restartContainers[c.container] = make(chan struct{})
+		if err := daemon.registerLinks(c, c.HostConfig); err != nil {
+			logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
 		}
 	}
 
 	group := sync.WaitGroup{}
 	for c, notifier := range restartContainers {
 		group.Add(1)
-		go func(container *container.Container, chNotify chan struct{}) {
+
+		go func(c *container.Container, chNotify chan struct{}) {
 			defer group.Done()
-			logrus.Debugf("Starting container %s", container.ID)
+
+			logrus.Debugf("Starting container %s", c.ID)
 
 			// ignore errors here as this is a best effort to wait for children to be
 			//   running before we try to start the container
-			children, err := daemon.children(container.Name)
-			if err != nil {
-				logrus.Warnf("error getting children for %s: %v", container.Name, err)
-			}
+			children := daemon.children(c)
 			timeout := time.After(5 * time.Second)
 			for _, child := range children {
 				if notifier, exists := restartContainers[child]; exists {
@@ -398,11 +399,12 @@
 					}
 				}
 			}
-			if err := daemon.containerStart(container); err != nil {
-				logrus.Errorf("Failed to start container %s: %s", container.ID, err)
+			if err := daemon.containerStart(c); err != nil {
+				logrus.Errorf("Failed to start container %s: %s", c.ID, err)
 			}
 			close(chNotify)
 		}(c, notifier)
+
 	}
 	group.Wait()
 
@@ -452,28 +454,28 @@
 	if !validContainerNamePattern.MatchString(name) {
 		return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)
 	}
-
 	if name[0] != '/' {
 		name = "/" + name
 	}
 
-	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
-		if !graphdb.IsNonUniqueNameError(err) {
-			return "", err
+	if err := daemon.nameIndex.Reserve(name, id); err != nil {
+		if err == registrar.ErrNameReserved {
+			id, err := daemon.nameIndex.Get(name)
+			if err != nil {
+				logrus.Errorf("got unexpected error while looking up reserved name: %v", err)
+				return "", err
+			}
+			return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id)
 		}
-
-		conflictingContainer, err := daemon.GetByName(name)
-		if err != nil {
-			return "", err
-		}
-		return "", fmt.Errorf(
-			"Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", strings.TrimPrefix(name, "/"),
-			stringid.TruncateID(conflictingContainer.ID))
-
+		return "", fmt.Errorf("error reserving name: %s, error: %v", name, err)
 	}
 	return name, nil
 }
 
+func (daemon *Daemon) releaseName(name string) {
+	daemon.nameIndex.Release(name)
+}
+
 func (daemon *Daemon) generateNewName(id string) (string, error) {
 	var name string
 	for i := 0; i < 6; i++ {
@@ -482,17 +484,17 @@
 			name = "/" + name
 		}
 
-		if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
-			if !graphdb.IsNonUniqueNameError(err) {
-				return "", err
+		if err := daemon.nameIndex.Reserve(name, id); err != nil {
+			if err == registrar.ErrNameReserved {
+				continue
 			}
-			continue
+			return "", err
 		}
 		return name, nil
 	}
 
 	name = "/" + stringid.TruncateID(id)
-	if _, err := daemon.containerGraphDB.Set(name, id); err != nil {
+	if err := daemon.nameIndex.Reserve(name, id); err != nil {
 		return "", err
 	}
 	return name, nil
@@ -542,32 +544,19 @@
 	return base, err
 }
 
-// GetFullContainerName returns a constructed container name. I think
-// it has to do with the fact that a container is a file on disk and
-// this is sort of just creating a file name.
-func GetFullContainerName(name string) (string, error) {
-	if name == "" {
-		return "", fmt.Errorf("Container name cannot be empty")
-	}
-	if name[0] != '/' {
-		name = "/" + name
-	}
-	return name, nil
-}
-
 // GetByName returns a container given a name.
 func (daemon *Daemon) GetByName(name string) (*container.Container, error) {
-	fullName, err := GetFullContainerName(name)
-	if err != nil {
-		return nil, err
+	fullName := name
+	if name[0] != '/' {
+		fullName = "/" + name
 	}
-	entity := daemon.containerGraphDB.Get(fullName)
-	if entity == nil {
+	id, err := daemon.nameIndex.Get(fullName)
+	if err != nil {
 		return nil, fmt.Errorf("Could not find entity for %s", name)
 	}
-	e := daemon.containers.Get(entity.ID())
+	e := daemon.containers.Get(id)
 	if e == nil {
-		return nil, fmt.Errorf("Could not find container for entity id %s", entity.ID())
+		return nil, fmt.Errorf("Could not find container for entity id %s", id)
 	}
 	return e, nil
 }
@@ -584,48 +573,37 @@
 	daemon.EventsService.Evict(listener)
 }
 
-// children returns all child containers of the container with the
-// given name. The containers are returned as a map from the container
-// name to a pointer to Container.
-func (daemon *Daemon) children(name string) (map[string]*container.Container, error) {
-	name, err := GetFullContainerName(name)
-	if err != nil {
-		return nil, err
+// GetLabels for a container or image id
+func (daemon *Daemon) GetLabels(id string) map[string]string {
+	// TODO: TestCase
+	container := daemon.containers.Get(id)
+	if container != nil {
+		return container.Config.Labels
 	}
-	children := make(map[string]*container.Container)
 
-	err = daemon.containerGraphDB.Walk(name, func(p string, e *graphdb.Entity) error {
-		c, err := daemon.GetContainer(e.ID())
-		if err != nil {
-			return err
-		}
-		children[p] = c
-		return nil
-	}, 0)
-
-	if err != nil {
-		return nil, err
+	img, err := daemon.GetImage(id)
+	if err == nil {
+		return img.ContainerConfig.Labels
 	}
-	return children, nil
+	return nil
+}
+
+func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
+	return daemon.linkIndex.children(c)
 }
 
 // parents returns the names of the parent containers of the container
 // with the given name.
-func (daemon *Daemon) parents(name string) ([]string, error) {
-	name, err := GetFullContainerName(name)
-	if err != nil {
-		return nil, err
-	}
-
-	return daemon.containerGraphDB.Parents(name)
+func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
+	return daemon.linkIndex.parents(c)
 }
 
 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
-	fullName := filepath.Join(parent.Name, alias)
-	if !daemon.containerGraphDB.Exists(fullName) {
-		_, err := daemon.containerGraphDB.Set(fullName, child.ID)
+	fullName := path.Join(parent.Name, alias)
+	if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
 		return err
 	}
+	daemon.linkIndex.link(parent, child, fullName)
 	return nil
 }
 
@@ -793,19 +771,8 @@
 
 	// Discovery is only enabled when the daemon is launched with an address to advertise.  When
 	// initialized, the daemon is registered and we can store the discovery backend as its read-only
-	// DiscoveryWatcher version.
-	if config.ClusterStore != "" && config.ClusterAdvertise != "" {
-		advertise, err := discovery.ParseAdvertise(config.ClusterStore, config.ClusterAdvertise)
-		if err != nil {
-			return nil, fmt.Errorf("discovery advertise parsing failed (%v)", err)
-		}
-		config.ClusterAdvertise = advertise
-		d.discoveryWatcher, err = initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
-		if err != nil {
-			return nil, fmt.Errorf("discovery initialization failed (%v)", err)
-		}
-	} else if config.ClusterAdvertise != "" {
-		return nil, fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
+	if err := d.initDiscovery(config); err != nil {
+		return nil, err
 	}
 
 	d.netController, err = d.initNetworkController(config)
@@ -813,14 +780,6 @@
 		return nil, fmt.Errorf("Error initializing network controller: %v", err)
 	}
 
-	graphdbPath := filepath.Join(config.Root, "linkgraph.db")
-	graph, err := graphdb.NewSqliteConn(graphdbPath)
-	if err != nil {
-		return nil, err
-	}
-
-	d.containerGraphDB = graph
-
 	sysInfo := sysinfo.New(false)
 	// Check if Devices cgroup is mounted, it is hard requirement for container security,
 	// on Linux/FreeBSD.
@@ -844,18 +803,24 @@
 	d.configStore = config
 	d.execDriver = ed
 	d.statsCollector = d.newStatsCollector(1 * time.Second)
-	d.defaultLogConfig = config.LogConfig
+	d.defaultLogConfig = containertypes.LogConfig{
+		Type:   config.LogConfig.Type,
+		Config: config.LogConfig.Config,
+	}
 	d.RegistryService = registryService
 	d.EventsService = eventsService
 	d.volumes = volStore
 	d.root = config.Root
 	d.uidMaps = uidMaps
 	d.gidMaps = gidMaps
+	d.seccompEnabled = sysInfo.Seccomp
+
+	d.nameIndex = registrar.NewRegistrar()
+	d.linkIndex = newLinkIndex()
 
 	if err := d.cleanupMounts(); err != nil {
 		return nil, err
 	}
-
 	go d.execCommandGC()
 
 	if err := d.restore(); err != nil {
@@ -933,12 +898,6 @@
 		daemon.netController.Stop()
 	}
 
-	if daemon.containerGraphDB != nil {
-		if err := daemon.containerGraphDB.Close(); err != nil {
-			logrus.Errorf("Error during container graph.Close(): %v", err)
-		}
-	}
-
 	if daemon.layerStore != nil {
 		if err := daemon.layerStore.Cleanup(); err != nil {
 			logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
@@ -1340,10 +1299,6 @@
 	return daemon.execDriver
 }
 
-func (daemon *Daemon) containerGraph() *graphdb.Database {
-	return daemon.containerGraphDB
-}
-
 // GetUIDGIDMaps returns the current daemon's user namespace settings
 // for the full uid and gid maps which will be applied to containers
 // started in this instance.
@@ -1420,9 +1375,14 @@
 		return err
 	}
 
+	// make sure links is not nil
+	// this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links
+	if hostConfig.Links == nil {
+		hostConfig.Links = []string{}
+	}
+
 	container.HostConfig = hostConfig
-	container.ToDisk()
-	return nil
+	return container.ToDisk()
 }
 
 func (daemon *Daemon) setupInitLayer(initPath string) error {
@@ -1552,6 +1512,76 @@
 	return container.NewBaseContainer(id, daemon.containerRoot(id))
 }
 
+// initDiscovery initializes the discovery watcher for this daemon.
+func (daemon *Daemon) initDiscovery(config *Config) error {
+	advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
+	if err != nil {
+		if err == errDiscoveryDisabled {
+			return nil
+		}
+		return err
+	}
+
+	config.ClusterAdvertise = advertise
+	discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
+	if err != nil {
+		return fmt.Errorf("discovery initialization failed (%v)", err)
+	}
+
+	daemon.discoveryWatcher = discoveryWatcher
+	return nil
+}
+
+// Reload reads configuration changes and modifies the
+// daemon according to those changes.
+// This are the settings that Reload changes:
+// - Daemon labels.
+// - Cluster discovery (reconfigure and restart).
+func (daemon *Daemon) Reload(config *Config) error {
+	daemon.configStore.reloadLock.Lock()
+	defer daemon.configStore.reloadLock.Unlock()
+
+	daemon.configStore.Labels = config.Labels
+	return daemon.reloadClusterDiscovery(config)
+}
+
+func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
+	newAdvertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
+	if err != nil && err != errDiscoveryDisabled {
+		return err
+	}
+
+	// check discovery modifications
+	if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, config.ClusterStore, config.ClusterOpts) {
+		return nil
+	}
+
+	// enable discovery for the first time if it was not previously enabled
+	if daemon.discoveryWatcher == nil {
+		discoveryWatcher, err := initDiscovery(config.ClusterStore, newAdvertise, config.ClusterOpts)
+		if err != nil {
+			return fmt.Errorf("discovery initialization failed (%v)", err)
+		}
+		daemon.discoveryWatcher = discoveryWatcher
+	} else {
+		if err == errDiscoveryDisabled {
+			// disable discovery if it was previously enabled and it's disabled now
+			daemon.discoveryWatcher.Stop()
+		} else {
+			// reload discovery
+			if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
+				return err
+			}
+		}
+	}
+
+	daemon.configStore.ClusterStore = config.ClusterStore
+	daemon.configStore.ClusterOpts = config.ClusterOpts
+	daemon.configStore.ClusterAdvertise = newAdvertise
+
+	return nil
+}
+
 func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface {
 	n := &libcontainer.NetworkInterface{Name: name}
 	n.RxBytes = stats.RxBytes
diff --git a/daemon/daemon_experimental.go b/daemon/daemon_experimental.go
index cc3852c..3fd0e76 100644
--- a/daemon/daemon_experimental.go
+++ b/daemon/daemon_experimental.go
@@ -2,88 +2,8 @@
 
 package daemon
 
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-
-	"github.com/Sirupsen/logrus"
-	"github.com/docker/docker/pkg/idtools"
-	"github.com/docker/engine-api/types/container"
-)
-
-func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) {
-	if runtime.GOOS != "linux" && config.RemappedRoot != "" {
-		return nil, nil, fmt.Errorf("User namespaces are only supported on Linux")
-	}
-
-	// if the daemon was started with remapped root option, parse
-	// the config option to the int uid,gid values
-	var (
-		uidMaps, gidMaps []idtools.IDMap
-	)
-	if config.RemappedRoot != "" {
-		username, groupname, err := parseRemappedRoot(config.RemappedRoot)
-		if err != nil {
-			return nil, nil, err
-		}
-		if username == "root" {
-			// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
-			// effectively
-			logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
-			return uidMaps, gidMaps, nil
-		}
-		logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname)
-		// update remapped root setting now that we have resolved them to actual names
-		config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
-
-		uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname)
-		if err != nil {
-			return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err)
-		}
-	}
-	return uidMaps, gidMaps, nil
-}
-
-func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error {
-	config.Root = rootDir
-	// the docker root metadata directory needs to have execute permissions for all users (o+x)
-	// so that syscalls executing as non-root, operating on subdirectories of the graph root
-	// (e.g. mounted layers of a container) can traverse this path.
-	// The user namespace support will create subdirectories for the remapped root host uid:gid
-	// pair owned by that same uid:gid pair for proper write access to those needed metadata and
-	// layer content subtrees.
-	if _, err := os.Stat(rootDir); err == nil {
-		// root current exists; verify the access bits are correct by setting them
-		if err = os.Chmod(rootDir, 0701); err != nil {
-			return err
-		}
-	} else if os.IsNotExist(err) {
-		// no root exists yet, create it 0701 with root:root ownership
-		if err := os.MkdirAll(rootDir, 0701); err != nil {
-			return err
-		}
-	}
-
-	// if user namespaces are enabled we will create a subtree underneath the specified root
-	// with any/all specified remapped root uid/gid options on the daemon creating
-	// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
-	// `chdir()` to work for containers namespaced to that uid/gid)
-	if config.RemappedRoot != "" {
-		config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID))
-		logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
-		// Create the root directory if it doesn't exists
-		if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil {
-			return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
-		}
-	}
-	return nil
-}
+import "github.com/docker/engine-api/types/container"
 
 func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
-	if hostConfig.Privileged && daemon.configStore.RemappedRoot != "" {
-		return nil, fmt.Errorf("Privileged mode is incompatible with user namespace mappings")
-	}
 	return nil, nil
 }
diff --git a/daemon/daemon_stub.go b/daemon/daemon_stub.go
index d60f063..40e8ddc 100644
--- a/daemon/daemon_stub.go
+++ b/daemon/daemon_stub.go
@@ -2,26 +2,7 @@
 
 package daemon
 
-import (
-	"os"
-
-	"github.com/docker/docker/pkg/idtools"
-	"github.com/docker/docker/pkg/system"
-	"github.com/docker/engine-api/types/container"
-)
-
-func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) {
-	return nil, nil, nil
-}
-
-func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error {
-	config.Root = rootDir
-	// Create the root directory if it doesn't exists
-	if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) {
-		return err
-	}
-	return nil
-}
+import "github.com/docker/engine-api/types/container"
 
 func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
 	return nil, nil
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index 0d927f7..26e9c2f 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -3,12 +3,15 @@
 import (
 	"io/ioutil"
 	"os"
-	"path"
 	"path/filepath"
+	"reflect"
 	"testing"
+	"time"
 
 	"github.com/docker/docker/container"
-	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/discovery"
+	_ "github.com/docker/docker/pkg/discovery/memory"
+	"github.com/docker/docker/pkg/registrar"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/volume"
 	volumedrivers "github.com/docker/docker/volume/drivers"
@@ -75,23 +78,18 @@
 	index.Add(c4.ID)
 	index.Add(c5.ID)
 
-	daemonTestDbPath := path.Join(os.TempDir(), "daemon_test.db")
-	graph, err := graphdb.NewSqliteConn(daemonTestDbPath)
-	if err != nil {
-		t.Fatalf("Failed to create daemon test sqlite database at %s", daemonTestDbPath)
-	}
-	graph.Set(c1.Name, c1.ID)
-	graph.Set(c2.Name, c2.ID)
-	graph.Set(c3.Name, c3.ID)
-	graph.Set(c4.Name, c4.ID)
-	graph.Set(c5.Name, c5.ID)
-
 	daemon := &Daemon{
-		containers:       store,
-		idIndex:          index,
-		containerGraphDB: graph,
+		containers: store,
+		idIndex:    index,
+		nameIndex:  registrar.NewRegistrar(),
 	}
 
+	daemon.reserveName(c1.ID, c1.Name)
+	daemon.reserveName(c2.ID, c2.Name)
+	daemon.reserveName(c3.ID, c3.Name)
+	daemon.reserveName(c4.ID, c4.Name)
+	daemon.reserveName(c5.ID, c5.Name)
+
 	if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 {
 		t.Fatal("Should explicitly match full container IDs")
 	}
@@ -120,8 +118,6 @@
 	if _, err := daemon.GetContainer("nothing"); err == nil {
 		t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID")
 	}
-
-	os.Remove(daemonTestDbPath)
 }
 
 func initDaemonWithVolumeStore(tmp string) (*Daemon, error) {
@@ -206,19 +202,6 @@
 	}
 }
 
-func TestGetFullName(t *testing.T) {
-	name, err := GetFullContainerName("testing")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if name != "/testing" {
-		t.Fatalf("Expected /testing got %s", name)
-	}
-	if _, err := GetFullContainerName(""); err == nil {
-		t.Fatal("Error should not be nil")
-	}
-}
-
 func TestValidContainerNames(t *testing.T) {
 	invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"}
 	validNames := []string{"word-word", "word_word", "1weoid"}
@@ -392,3 +375,118 @@
 		}
 	}
 }
+
+func TestDaemonReloadLabels(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &Config{
+		CommonConfig: CommonConfig{
+			Labels: []string{"foo:bar"},
+		},
+	}
+
+	newConfig := &Config{
+		CommonConfig: CommonConfig{
+			Labels: []string{"foo:baz"},
+		},
+	}
+
+	daemon.Reload(newConfig)
+	label := daemon.configStore.Labels[0]
+	if label != "foo:baz" {
+		t.Fatalf("Expected daemon label `foo:baz`, got %s", label)
+	}
+}
+
+func TestDaemonDiscoveryReload(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     "memory://127.0.0.1",
+			ClusterAdvertise: "127.0.0.1:3333",
+		},
+	}
+
+	if err := daemon.initDiscovery(daemon.configStore); err != nil {
+		t.Fatal(err)
+	}
+
+	expected := discovery.Entries{
+		&discovery.Entry{Host: "127.0.0.1", Port: "3333"},
+	}
+
+	stopCh := make(chan struct{})
+	defer close(stopCh)
+	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
+
+	select {
+	case <-time.After(1 * time.Second):
+		t.Fatal("failed to get discovery advertisements in time")
+	case e := <-ch:
+		if !reflect.DeepEqual(e, expected) {
+			t.Fatalf("expected %v, got %v\n", expected, e)
+		}
+	case e := <-errCh:
+		t.Fatal(e)
+	}
+
+	newConfig := &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     "memory://127.0.0.1:2222",
+			ClusterAdvertise: "127.0.0.1:5555",
+		},
+	}
+
+	expected = discovery.Entries{
+		&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
+	}
+
+	if err := daemon.Reload(newConfig); err != nil {
+		t.Fatal(err)
+	}
+	ch, errCh = daemon.discoveryWatcher.Watch(stopCh)
+
+	select {
+	case <-time.After(1 * time.Second):
+		t.Fatal("failed to get discovery advertisements in time")
+	case e := <-ch:
+		if !reflect.DeepEqual(e, expected) {
+			t.Fatalf("expected %v, got %v\n", expected, e)
+		}
+	case e := <-errCh:
+		t.Fatal(e)
+	}
+}
+
+func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &Config{}
+
+	newConfig := &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     "memory://127.0.0.1:2222",
+			ClusterAdvertise: "127.0.0.1:5555",
+		},
+	}
+
+	expected := discovery.Entries{
+		&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
+	}
+
+	if err := daemon.Reload(newConfig); err != nil {
+		t.Fatal(err)
+	}
+	stopCh := make(chan struct{})
+	defer close(stopCh)
+	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
+
+	select {
+	case <-time.After(1 * time.Second):
+		t.Fatal("failed to get discovery advertisements in time")
+	case e := <-ch:
+		if !reflect.DeepEqual(e, expected) {
+			t.Fatalf("expected %v, got %v\n", expected, e)
+		}
+	case e := <-errCh:
+		t.Fatal(e)
+	}
+}
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index 3cff028..4233a7e 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -7,6 +7,7 @@
 	"net"
 	"os"
 	"path/filepath"
+	"runtime"
 	"strconv"
 	"strings"
 	"syscall"
@@ -33,6 +34,7 @@
 	"github.com/docker/libnetwork/types"
 	blkiodev "github.com/opencontainers/runc/libcontainer/configs"
 	"github.com/opencontainers/runc/libcontainer/label"
+	"github.com/opencontainers/runc/libcontainer/user"
 )
 
 const (
@@ -42,6 +44,9 @@
 	platformSupported = true
 	// It's not kernel limit, we want this 4M limit to supply a reasonable functional container
 	linuxMinMemory = 4194304
+	// constants for remapped root settings
+	defaultIDSpecifier string = "default"
+	defaultRemappedID  string = "dockremap"
 )
 
 func getBlkioWeightDevices(config *containertypes.HostConfig) ([]*blkiodev.WeightDevice, error) {
@@ -205,6 +210,10 @@
 		defaultSwappiness := int64(-1)
 		hostConfig.MemorySwappiness = &defaultSwappiness
 	}
+	if hostConfig.OomKillDisable == nil {
+		defaultOomKillDisable := false
+		hostConfig.OomKillDisable = &defaultOomKillDisable
+	}
 
 	return nil
 }
@@ -265,9 +274,14 @@
 		warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
 		logrus.Warnf("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
 	}
-	if resources.OomKillDisable && !sysInfo.OomKillDisable {
-		resources.OomKillDisable = false
-		return warnings, fmt.Errorf("Your kernel does not support oom kill disable.")
+	if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
+		// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
+		// warning the caller if they already wanted the feature to be off
+		if *resources.OomKillDisable {
+			warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.")
+			logrus.Warnf("Your kernel does not support OomKillDisable, OomKillDisable discarded.")
+		}
+		resources.OomKillDisable = nil
 	}
 
 	// cpu subsystem checks and adjustments
@@ -375,6 +389,24 @@
 		warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
 		logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
 	}
+	// check for various conflicting options with user namespaces
+	if daemon.configStore.RemappedRoot != "" {
+		if hostConfig.Privileged {
+			return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces.")
+		}
+		if hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsContainer() {
+			return warnings, fmt.Errorf("Cannot share the host or a container's network namespace when user namespaces are enabled.")
+		}
+		if hostConfig.PidMode.IsHost() {
+			return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled.")
+		}
+		if hostConfig.IpcMode.IsContainer() {
+			return warnings, fmt.Errorf("Cannot share a container's IPC namespace when user namespaces are enabled.")
+		}
+		if hostConfig.ReadonlyRootfs {
+			return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled.")
+		}
+	}
 	return warnings, nil
 }
 
@@ -528,12 +560,16 @@
 		netOption[bridge.DefaultBindingIP] = config.Bridge.DefaultIP.String()
 	}
 
-	ipamV4Conf := libnetwork.IpamConf{}
+	var (
+		ipamV4Conf *libnetwork.IpamConf
+		ipamV6Conf *libnetwork.IpamConf
+	)
 
-	ipamV4Conf.AuxAddresses = make(map[string]string)
+	ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
 
-	if nw, _, err := ipamutils.ElectInterfaceAddresses(bridgeName); err == nil {
-		ipamV4Conf.PreferredPool = nw.String()
+	nw, nw6List, err := ipamutils.ElectInterfaceAddresses(bridgeName)
+	if err == nil {
+		ipamV4Conf.PreferredPool = types.GetIPNetCanonical(nw).String()
 		hip, _ := types.GetHostPartIP(nw.IP, nw.Mask)
 		if hip.IsGlobalUnicast() {
 			ipamV4Conf.Gateway = nw.IP.String()
@@ -564,10 +600,7 @@
 		ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.Bridge.DefaultGatewayIPv4.String()
 	}
 
-	var (
-		ipamV6Conf     *libnetwork.IpamConf
-		deferIPv6Alloc bool
-	)
+	var deferIPv6Alloc bool
 	if config.Bridge.FixedCIDRv6 != "" {
 		_, fCIDRv6, err := net.ParseCIDR(config.Bridge.FixedCIDRv6)
 		if err != nil {
@@ -587,6 +620,16 @@
 			ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
 		}
 		ipamV6Conf.PreferredPool = fCIDRv6.String()
+
+		// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
+		// address belongs to the same network, we need to inform libnetwork about it, so
+		// that it can be reserved with IPAM and it will not be given away to somebody else
+		for _, nw6 := range nw6List {
+			if fCIDRv6.Contains(nw6.IP) {
+				ipamV6Conf.Gateway = nw6.IP.String()
+				break
+			}
+		}
 	}
 
 	if config.Bridge.DefaultGatewayIPv6 != nil {
@@ -596,13 +639,13 @@
 		ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.Bridge.DefaultGatewayIPv6.String()
 	}
 
-	v4Conf := []*libnetwork.IpamConf{&ipamV4Conf}
+	v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
 	v6Conf := []*libnetwork.IpamConf{}
 	if ipamV6Conf != nil {
 		v6Conf = append(v6Conf, ipamV6Conf)
 	}
 	// Initialize default network on "bridge" with the same name
-	_, err := controller.NewNetwork("bridge", "bridge",
+	_, err = controller.NewNetwork("bridge", "bridge",
 		libnetwork.NetworkOptionGeneric(options.Generic{
 			netlabel.GenericData: netOption,
 			netlabel.EnableIPv6:  config.Bridge.EnableIPv6,
@@ -674,9 +717,174 @@
 	return nil
 }
 
+// Parse the remapped root (user namespace) option, which can be one of:
+//   username            - valid username from /etc/passwd
+//   username:groupname  - valid username; valid groupname from /etc/group
+//   uid                 - 32-bit unsigned int valid Linux UID value
+//   uid:gid             - uid value; 32-bit unsigned int Linux GID value
+//
+//  If no groupname is specified, and a username is specified, an attempt
+//  will be made to lookup a gid for that username as a groupname
+//
+//  If names are used, they are verified to exist in passwd/group
+func parseRemappedRoot(usergrp string) (string, string, error) {
+
+	var (
+		userID, groupID     int
+		username, groupname string
+	)
+
+	idparts := strings.Split(usergrp, ":")
+	if len(idparts) > 2 {
+		return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
+	}
+
+	if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
+		// must be a uid; take it as valid
+		userID = int(uid)
+		luser, err := user.LookupUid(userID)
+		if err != nil {
+			return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
+		}
+		username = luser.Name
+		if len(idparts) == 1 {
+			// if the uid was numeric and no gid was specified, take the uid as the gid
+			groupID = userID
+			lgrp, err := user.LookupGid(groupID)
+			if err != nil {
+				return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
+			}
+			groupname = lgrp.Name
+		}
+	} else {
+		lookupName := idparts[0]
+		// special case: if the user specified "default", they want Docker to create or
+		// use (after creation) the "dockremap" user/group for root remapping
+		if lookupName == defaultIDSpecifier {
+			lookupName = defaultRemappedID
+		}
+		luser, err := user.LookupUser(lookupName)
+		if err != nil && idparts[0] != defaultIDSpecifier {
+			// error if the name requested isn't the special "dockremap" ID
+			return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
+		} else if err != nil {
+			// special case-- if the username == "default", then we have been asked
+			// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
+			// ranges will be used for the user and group mappings in user namespaced containers
+			_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
+			if err == nil {
+				return defaultRemappedID, defaultRemappedID, nil
+			}
+			return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
+		}
+		userID = luser.Uid
+		username = luser.Name
+		if len(idparts) == 1 {
+			// we only have a string username, and no group specified; look up gid from username as group
+			group, err := user.LookupGroup(lookupName)
+			if err != nil {
+				return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
+			}
+			groupID = group.Gid
+			groupname = group.Name
+		}
+	}
+
+	if len(idparts) == 2 {
+		// groupname or gid is separately specified and must be resolved
+		// to a unsigned 32-bit gid
+		if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
+			// must be a gid, take it as valid
+			groupID = int(gid)
+			lgrp, err := user.LookupGid(groupID)
+			if err != nil {
+				return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
+			}
+			groupname = lgrp.Name
+		} else {
+			// not a number; attempt a lookup
+			group, err := user.LookupGroup(idparts[1])
+			if err != nil {
+				return "", "", fmt.Errorf("Error during gid lookup for %q: %v", idparts[1], err)
+			}
+			groupID = group.Gid
+			groupname = idparts[1]
+		}
+	}
+	return username, groupname, nil
+}
+
+func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) {
+	if runtime.GOOS != "linux" && config.RemappedRoot != "" {
+		return nil, nil, fmt.Errorf("User namespaces are only supported on Linux")
+	}
+
+	// if the daemon was started with remapped root option, parse
+	// the config option to the int uid,gid values
+	var (
+		uidMaps, gidMaps []idtools.IDMap
+	)
+	if config.RemappedRoot != "" {
+		username, groupname, err := parseRemappedRoot(config.RemappedRoot)
+		if err != nil {
+			return nil, nil, err
+		}
+		if username == "root" {
+			// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
+			// effectively
+			logrus.Warnf("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
+			return uidMaps, gidMaps, nil
+		}
+		logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname)
+		// update remapped root setting now that we have resolved them to actual names
+		config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
+
+		uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname)
+		if err != nil {
+			return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err)
+		}
+	}
+	return uidMaps, gidMaps, nil
+}
+
+func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error {
+	config.Root = rootDir
+	// the docker root metadata directory needs to have execute permissions for all users (o+x)
+	// so that syscalls executing as non-root, operating on subdirectories of the graph root
+	// (e.g. mounted layers of a container) can traverse this path.
+	// The user namespace support will create subdirectories for the remapped root host uid:gid
+	// pair owned by that same uid:gid pair for proper write access to those needed metadata and
+	// layer content subtrees.
+	if _, err := os.Stat(rootDir); err == nil {
+		// root current exists; verify the access bits are correct by setting them
+		if err = os.Chmod(rootDir, 0701); err != nil {
+			return err
+		}
+	} else if os.IsNotExist(err) {
+		// no root exists yet, create it 0701 with root:root ownership
+		if err := os.MkdirAll(rootDir, 0701); err != nil {
+			return err
+		}
+	}
+
+	// if user namespaces are enabled we will create a subtree underneath the specified root
+	// with any/all specified remapped root uid/gid options on the daemon creating
+	// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
+	// `chdir()` to work for containers namespaced to that uid/gid)
+	if config.RemappedRoot != "" {
+		config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID))
+		logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
+		// Create the root directory if it doesn't exists
+		if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil {
+			return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
+		}
+	}
+	return nil
+}
+
 // registerLinks writes the links to a file.
 func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
-	if hostConfig == nil || hostConfig.Links == nil {
+	if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
 		return nil
 	}
 
@@ -707,12 +915,7 @@
 
 	// After we load all the links into the daemon
 	// set them to nil on the hostconfig
-	hostConfig.Links = nil
-	if err := container.WriteHostConfig(); err != nil {
-		return err
-	}
-
-	return nil
+	return container.WriteHostConfig()
 }
 
 // conditionalMountOnStart is a platform specific helper function during the
diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go
index 1e36892..3b571b6 100644
--- a/daemon/daemon_windows.go
+++ b/daemon/daemon_windows.go
@@ -4,6 +4,7 @@
 	"encoding/json"
 	"errors"
 	"fmt"
+	"os"
 	"path/filepath"
 	"runtime"
 	"strings"
@@ -18,6 +19,7 @@
 	containertypes "github.com/docker/engine-api/types/container"
 	// register the windows graph driver
 	"github.com/docker/docker/daemon/graphdriver/windows"
+	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/libnetwork"
 	blkiodev "github.com/opencontainers/runc/libcontainer/configs"
@@ -135,6 +137,19 @@
 	return nil
 }
 
+func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) {
+	return nil, nil, nil
+}
+
+func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error {
+	config.Root = rootDir
+	// Create the root directory if it doesn't exists
+	if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) {
+		return err
+	}
+	return nil
+}
+
 // conditionalMountOnStart is a platform specific helper function during the
 // container start to call mount.
 func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
diff --git a/daemon/delete.go b/daemon/delete.go
index af9903f..c8be1bc 100644
--- a/daemon/delete.go
+++ b/daemon/delete.go
@@ -1,8 +1,10 @@
 package daemon
 
 import (
+	"fmt"
 	"os"
 	"path"
+	"strings"
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/container"
@@ -38,11 +40,10 @@
 	}
 
 	if config.RemoveLink {
-		return daemon.rmLink(name)
+		return daemon.rmLink(container, name)
 	}
 
 	if err := daemon.cleanupContainer(container, config.ForceRemove); err != nil {
-		// return derr.ErrorCodeCantDestroy.WithArgs(name, utils.GetErrorMessage(err))
 		return err
 	}
 
@@ -53,32 +54,29 @@
 	return nil
 }
 
-// rmLink removes link by name from other containers
-func (daemon *Daemon) rmLink(name string) error {
-	name, err := GetFullContainerName(name)
-	if err != nil {
-		return err
+func (daemon *Daemon) rmLink(container *container.Container, name string) error {
+	if name[0] != '/' {
+		name = "/" + name
 	}
 	parent, n := path.Split(name)
 	if parent == "/" {
-		return derr.ErrorCodeDefaultName
-	}
-	pe := daemon.containerGraph().Get(parent)
-	if pe == nil {
-		return derr.ErrorCodeNoParent.WithArgs(parent, name)
+		return fmt.Errorf("Conflict, cannot remove the default name of the container")
 	}
 
-	if err := daemon.containerGraph().Delete(name); err != nil {
-		return err
+	parent = strings.TrimSuffix(parent, "/")
+	pe, err := daemon.nameIndex.Get(parent)
+	if err != nil {
+		return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
 	}
 
-	parentContainer, _ := daemon.GetContainer(pe.ID())
+	daemon.releaseName(name)
+	parentContainer, _ := daemon.GetContainer(pe)
 	if parentContainer != nil {
+		daemon.linkIndex.unlink(name, container, parentContainer)
 		if err := daemon.updateNetwork(parentContainer); err != nil {
 			logrus.Debugf("Could not update network to remove link %s: %v", n, err)
 		}
 	}
-
 	return nil
 }
 
@@ -116,9 +114,8 @@
 	// indexes even if removal failed.
 	defer func() {
 		if err == nil || forceRemove {
-			if _, err := daemon.containerGraphDB.Purge(container.ID); err != nil {
-				logrus.Debugf("Unable to remove container from link graph: %s", err)
-			}
+			daemon.nameIndex.Delete(container.ID)
+			daemon.linkIndex.delete(container)
 			selinuxFreeLxcContexts(container.ProcessLabel)
 			daemon.idIndex.Delete(container.ID)
 			daemon.containers.Delete(container.ID)
@@ -139,7 +136,6 @@
 	if err = daemon.execDriver.Clean(container.ID); err != nil {
 		return derr.ErrorCodeRmExecDriver.WithArgs(container.ID, err)
 	}
-
 	return nil
 }
 
diff --git a/daemon/discovery.go b/daemon/discovery.go
index ef9307d..6c4bcc4 100644
--- a/daemon/discovery.go
+++ b/daemon/discovery.go
@@ -1,7 +1,9 @@
 package daemon
 
 import (
+	"errors"
 	"fmt"
+	"reflect"
 	"strconv"
 	"time"
 
@@ -19,6 +21,24 @@
 	defaultDiscoveryTTLFactor = 3
 )
 
+var errDiscoveryDisabled = errors.New("discovery is disabled")
+
+type discoveryReloader interface {
+	discovery.Watcher
+	Stop()
+	Reload(backend, address string, clusterOpts map[string]string) error
+}
+
+type daemonDiscoveryReloader struct {
+	backend discovery.Backend
+	ticker  *time.Ticker
+	term    chan bool
+}
+
+func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+	return d.backend.Watch(stopCh)
+}
+
 func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) {
 	var (
 		heartbeat = defaultDiscoveryHeartbeat
@@ -57,36 +77,94 @@
 
 // initDiscovery initialized the nodes discovery subsystem by connecting to the specified backend
 // and start a registration loop to advertise the current node under the specified address.
-func initDiscovery(backend, address string, clusterOpts map[string]string) (discovery.Backend, error) {
-
-	heartbeat, ttl, err := discoveryOpts(clusterOpts)
+func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) {
+	heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
 	if err != nil {
 		return nil, err
 	}
 
-	discoveryBackend, err := discovery.New(backend, heartbeat, ttl, clusterOpts)
-	if err != nil {
-		return nil, err
+	reloader := &daemonDiscoveryReloader{
+		backend: backend,
+		ticker:  time.NewTicker(heartbeat),
+		term:    make(chan bool),
 	}
-
 	// We call Register() on the discovery backend in a loop for the whole lifetime of the daemon,
 	// but we never actually Watch() for nodes appearing and disappearing for the moment.
-	go registrationLoop(discoveryBackend, address, heartbeat)
-	return discoveryBackend, nil
+	reloader.advertise(advertiseAddress)
+	return reloader, nil
 }
 
-func registerAddr(backend discovery.Backend, addr string) {
-	if err := backend.Register(addr); err != nil {
+func (d *daemonDiscoveryReloader) advertise(address string) {
+	d.registerAddr(address)
+	go d.advertiseHeartbeat(address)
+}
+
+func (d *daemonDiscoveryReloader) registerAddr(addr string) {
+	if err := d.backend.Register(addr); err != nil {
 		log.Warnf("Registering as %q in discovery failed: %v", addr, err)
 	}
 }
 
-// registrationLoop registers the current node against the discovery backend using the specified
+// advertiseHeartbeat registers the current node against the discovery backend using the specified
 // address. The function never returns, as registration against the backend comes with a TTL and
 // requires regular heartbeats.
-func registrationLoop(discoveryBackend discovery.Backend, address string, heartbeat time.Duration) {
-	registerAddr(discoveryBackend, address)
-	for range time.Tick(heartbeat) {
-		registerAddr(discoveryBackend, address)
+func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) {
+	for {
+		select {
+		case <-d.ticker.C:
+			d.registerAddr(address)
+		case <-d.term:
+			return
+		}
 	}
 }
+
+// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address.
+func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error {
+	d.Stop()
+
+	heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
+	if err != nil {
+		return err
+	}
+
+	d.backend = backend
+	d.ticker = time.NewTicker(heartbeat)
+
+	d.advertise(advertiseAddress)
+	return nil
+}
+
+// Stop terminates the discovery advertising.
+func (d *daemonDiscoveryReloader) Stop() {
+	d.ticker.Stop()
+	d.term <- true
+}
+
+func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) {
+	heartbeat, ttl, err := discoveryOpts(clusterOpts)
+	if err != nil {
+		return 0, nil, err
+	}
+
+	backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts)
+	if err != nil {
+		return 0, nil, err
+	}
+	return heartbeat, backend, nil
+}
+
+// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
+func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
+	if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
+		return true
+	}
+
+	if (config.ClusterOpts == nil && clusterOpts == nil) ||
+		(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
+		(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
+		return false
+	}
+
+	return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
+}
diff --git a/daemon/discovery_test.go b/daemon/discovery_test.go
index e65aecb..c761a69 100644
--- a/daemon/discovery_test.go
+++ b/daemon/discovery_test.go
@@ -89,3 +89,64 @@
 		t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl)
 	}
 }
+
+func TestModifiedDiscoverySettings(t *testing.T) {
+	cases := []struct {
+		current  *Config
+		modified *Config
+		expected bool
+	}{
+		{
+			current:  discoveryConfig("foo", "bar", map[string]string{}),
+			modified: discoveryConfig("foo", "bar", map[string]string{}),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
+			modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", map[string]string{}),
+			modified: discoveryConfig("foo", "bar", nil),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("foo", "bar", map[string]string{}),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("baz", "bar", nil),
+			expected: true,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("foo", "baz", nil),
+			expected: true,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
+			expected: true,
+		},
+	}
+
+	for _, c := range cases {
+		got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts)
+		if c.expected != got {
+			t.Fatalf("expected %v, got %v: current config %q, new config %q", c.expected, got, c.current, c.modified)
+		}
+	}
+}
+
+func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config {
+	return &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     backendAddr,
+			ClusterAdvertise: advertiseAddr,
+			ClusterOpts:      opts,
+		},
+	}
+}
diff --git a/daemon/execdriver/native/apparmor.go b/daemon/execdriver/native/apparmor.go
index dffc6d3..5bbfef6 100644
--- a/daemon/execdriver/native/apparmor.go
+++ b/daemon/execdriver/native/apparmor.go
@@ -44,7 +44,11 @@
   file,
   umount,
 
-  deny @{PROC}/{*,**^[0-9]*,sys/kernel/shm*} wkx,
+  deny @{PROC}/* w,   # deny write for all files directly in /proc (not in a subdir)
+  # deny write to files not in /proc/<number>/** or /proc/sys/**
+  deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w,
+  deny @{PROC}/sys/[^k]** w,  # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
+  deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w,  # deny everything except shm* in /proc/sys/kernel/
   deny @{PROC}/sysrq-trigger rwklx,
   deny @{PROC}/mem rwklx,
   deny @{PROC}/kmem rwklx,
diff --git a/daemon/execdriver/native/seccomp.go b/daemon/execdriver/native/seccomp.go
index c34651a..8263012 100644
--- a/daemon/execdriver/native/seccomp.go
+++ b/daemon/execdriver/native/seccomp.go
@@ -5,32 +5,26 @@
 import (
 	"encoding/json"
 	"fmt"
-	"io/ioutil"
 
+	"github.com/docker/engine-api/types"
 	"github.com/opencontainers/runc/libcontainer/configs"
 	"github.com/opencontainers/runc/libcontainer/seccomp"
-	"github.com/opencontainers/specs"
 )
 
 func getDefaultSeccompProfile() *configs.Seccomp {
 	return defaultSeccompProfile
 }
 
-func loadSeccompProfile(path string) (*configs.Seccomp, error) {
-	f, err := ioutil.ReadFile(path)
-	if err != nil {
-		return nil, fmt.Errorf("Opening seccomp profile failed: %v", err)
-	}
-
-	var config specs.Seccomp
-	if err := json.Unmarshal(f, &config); err != nil {
+func loadSeccompProfile(body string) (*configs.Seccomp, error) {
+	var config types.Seccomp
+	if err := json.Unmarshal([]byte(body), &config); err != nil {
 		return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err)
 	}
 
 	return setupSeccomp(&config)
 }
 
-func setupSeccomp(config *specs.Seccomp) (newConfig *configs.Seccomp, err error) {
+func setupSeccomp(config *types.Seccomp) (newConfig *configs.Seccomp, err error) {
 	if config == nil {
 		return nil, nil
 	}
diff --git a/daemon/execdriver/native/seccomp_default.go b/daemon/execdriver/native/seccomp_default.go
index 542ee95..b1c353a 100644
--- a/daemon/execdriver/native/seccomp_default.go
+++ b/daemon/execdriver/native/seccomp_default.go
@@ -1570,5 +1570,21 @@
 			Action: configs.Allow,
 			Args:   []*configs.Arg{},
 		},
+		// arm specific syscalls
+		{
+			Name:   "breakpoint",
+			Action: configs.Allow,
+			Args:   []*configs.Arg{},
+		},
+		{
+			Name:   "cacheflush",
+			Action: configs.Allow,
+			Args:   []*configs.Arg{},
+		},
+		{
+			Name:   "set_tls",
+			Action: configs.Allow,
+			Args:   []*configs.Arg{},
+		},
 	},
 }
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index d9c3618..d852234 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -35,7 +35,7 @@
 var (
 	defaultDataLoopbackSize     int64  = 100 * 1024 * 1024 * 1024
 	defaultMetaDataLoopbackSize int64  = 2 * 1024 * 1024 * 1024
-	defaultBaseFsSize           uint64 = 100 * 1024 * 1024 * 1024
+	defaultBaseFsSize           uint64 = 10 * 1024 * 1024 * 1024
 	defaultThinpBlockSize       uint32 = 128 // 64K = 128 512b sectors
 	defaultUdevSyncOverride            = false
 	maxDeviceID                        = 0xffffff // 24 bit, pool limit
@@ -47,6 +47,7 @@
 	driverDeferredRemovalSupport = false
 	enableDeferredRemoval        = false
 	enableDeferredDeletion       = false
+	userBaseSize                 = false
 )
 
 const deviceSetMetaFile string = "deviceset-metadata"
@@ -1056,6 +1057,80 @@
 	return nil
 }
 
+func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error {
+
+	if !userBaseSize {
+		return nil
+	}
+
+	if devices.baseFsSize < devices.getBaseDeviceSize() {
+		return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize())))
+	}
+
+	if devices.baseFsSize == devices.getBaseDeviceSize() {
+		return nil
+	}
+
+	info.lock.Lock()
+	defer info.lock.Unlock()
+
+	devices.Lock()
+	defer devices.Unlock()
+
+	info.Size = devices.baseFsSize
+
+	if err := devices.saveMetadata(info); err != nil {
+		// Try to remove unused device
+		delete(devices.Devices, info.Hash)
+		return err
+	}
+
+	return devices.growFS(info)
+}
+
+func (devices *DeviceSet) growFS(info *devInfo) error {
+	if err := devices.activateDeviceIfNeeded(info, false); err != nil {
+		return fmt.Errorf("Error activating devmapper device: %s", err)
+	}
+
+	defer devices.deactivateDevice(info)
+
+	fsMountPoint := "/run/docker/mnt"
+	if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) {
+		if err := os.MkdirAll(fsMountPoint, 0700); err != nil {
+			return err
+		}
+		defer os.RemoveAll(fsMountPoint)
+	}
+
+	options := ""
+	if devices.BaseDeviceFilesystem == "xfs" {
+		// XFS needs nouuid or it can't mount filesystems with the same fs
+		options = joinMountOptions(options, "nouuid")
+	}
+	options = joinMountOptions(options, devices.mountOptions)
+
+	if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
+		return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err)
+	}
+
+	defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH)
+
+	switch devices.BaseDeviceFilesystem {
+	case "ext4":
+		if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil {
+			return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+		}
+	case "xfs":
+		if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil {
+			return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
+		}
+	default:
+		return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem)
+	}
+	return nil
+}
+
 func (devices *DeviceSet) setupBaseImage() error {
 	oldInfo, _ := devices.lookupDeviceWithLock("")
 
@@ -1069,9 +1144,8 @@
 				return err
 			}
 
-			if devices.baseFsSize != defaultBaseFsSize && devices.baseFsSize != devices.getBaseDeviceSize() {
-				logrus.Warnf("devmapper: Base device is already initialized to size %s, new value of base device size %s will not take effect",
-					units.HumanSize(float64(devices.getBaseDeviceSize())), units.HumanSize(float64(devices.baseFsSize)))
+			if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil {
+				return err
 			}
 
 			return nil
@@ -1801,6 +1875,7 @@
 		if info.Deleted {
 			devices.nrDeletedDevices--
 		}
+		devices.markDeviceIDFree(info.DeviceID)
 	} else {
 		if err := devices.markForDeferredDeletion(info); err != nil {
 			return err
@@ -1855,8 +1930,6 @@
 		return err
 	}
 
-	devices.markDeviceIDFree(info.DeviceID)
-
 	return nil
 }
 
@@ -2379,6 +2452,7 @@
 			if err != nil {
 				return nil, err
 			}
+			userBaseSize = true
 			devices.baseFsSize = uint64(size)
 		case "dm.loopdatasize":
 			size, err := units.RAMInBytes(val)
diff --git a/daemon/image_delete.go b/daemon/image_delete.go
index 5de9f55..b6773f8 100644
--- a/daemon/image_delete.go
+++ b/daemon/image_delete.go
@@ -12,6 +12,17 @@
 	"github.com/docker/engine-api/types"
 )
 
+type conflictType int
+
+const (
+	conflictDependentChild conflictType = (1 << iota)
+	conflictRunningContainer
+	conflictActiveReference
+	conflictStoppedContainer
+	conflictHard = conflictDependentChild | conflictRunningContainer
+	conflictSoft = conflictActiveReference | conflictStoppedContainer
+)
+
 // ImageDelete deletes the image referenced by the given imageRef from this
 // daemon. The given imageRef can be an image ID, ID prefix, or a repository
 // reference (with an optional tag or digest, defaulting to the tag name
@@ -90,8 +101,34 @@
 		daemon.LogImageEvent(imgID.String(), imgID.String(), "untag")
 		records = append(records, untaggedRecord)
 
-		// If has remaining references then untag finishes the remove
-		if len(repoRefs) > 1 {
+		repoRefs = daemon.referenceStore.References(imgID)
+
+		// If this is a tag reference and all the remaining references
+		// to this image are digest references, delete the remaining
+		// references so that they don't prevent removal of the image.
+		if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical {
+			foundTagRef := false
+			for _, repoRef := range repoRefs {
+				if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical {
+					foundTagRef = true
+					break
+				}
+			}
+			if !foundTagRef {
+				for _, repoRef := range repoRefs {
+					if _, err := daemon.removeImageRef(repoRef); err != nil {
+						return records, err
+					}
+
+					untaggedRecord := types.ImageDelete{Untagged: repoRef.String()}
+					records = append(records, untaggedRecord)
+				}
+				repoRefs = []reference.Named{}
+			}
+		}
+
+		// If it has remaining references then the untag finished the remove
+		if len(repoRefs) > 0 {
 			return records, nil
 		}
 
@@ -102,6 +139,14 @@
 		// remove that reference.
 		// FIXME: Is this the behavior we want?
 		if len(repoRefs) == 1 {
+			c := conflictHard
+			if !force {
+				c |= conflictSoft &^ conflictActiveReference
+			}
+			if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
+				return nil, conflict
+			}
+
 			parsedRef, err := daemon.removeImageRef(repoRefs[0])
 			if err != nil {
 				return nil, err
@@ -215,7 +260,11 @@
 func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error {
 	// First, determine if this image has any conflicts. Ignore soft conflicts
 	// if force is true.
-	if conflict := daemon.checkImageDeleteConflict(imgID, force); conflict != nil {
+	c := conflictHard
+	if !force {
+		c |= conflictSoft
+	}
+	if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil {
 		if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) {
 			// Ignore conflicts UNLESS the image is "dangling" or not being used in
 			// which case we want the user to know.
@@ -267,24 +316,9 @@
 // using the image. A soft conflict is any tags/digest referencing the given
 // image or any stopped container using the image. If ignoreSoftConflicts is
 // true, this function will not check for soft conflict conditions.
-func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, ignoreSoftConflicts bool) *imageDeleteConflict {
-	// Check for hard conflicts first.
-	if conflict := daemon.checkImageDeleteHardConflict(imgID); conflict != nil {
-		return conflict
-	}
-
-	// Then check for soft conflicts.
-	if ignoreSoftConflicts {
-		// Don't bother checking for soft conflicts.
-		return nil
-	}
-
-	return daemon.checkImageDeleteSoftConflict(imgID)
-}
-
-func (daemon *Daemon) checkImageDeleteHardConflict(imgID image.ID) *imageDeleteConflict {
+func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict {
 	// Check if the image has any descendent images.
-	if len(daemon.imageStore.Children(imgID)) > 0 {
+	if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 {
 		return &imageDeleteConflict{
 			hard:    true,
 			imgID:   imgID,
@@ -292,47 +326,47 @@
 		}
 	}
 
-	// Check if any running container is using the image.
-	for _, container := range daemon.List() {
-		if !container.IsRunning() {
-			// Skip this until we check for soft conflicts later.
-			continue
-		}
+	if mask&conflictRunningContainer != 0 {
+		// Check if any running container is using the image.
+		for _, container := range daemon.List() {
+			if !container.IsRunning() {
+				// Skip this until we check for soft conflicts later.
+				continue
+			}
 
-		if container.ImageID == imgID {
-			return &imageDeleteConflict{
-				imgID:   imgID,
-				hard:    true,
-				used:    true,
-				message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
+			if container.ImageID == imgID {
+				return &imageDeleteConflict{
+					imgID:   imgID,
+					hard:    true,
+					used:    true,
+					message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)),
+				}
 			}
 		}
 	}
 
-	return nil
-}
-
-func (daemon *Daemon) checkImageDeleteSoftConflict(imgID image.ID) *imageDeleteConflict {
 	// Check if any repository tags/digest reference this image.
-	if len(daemon.referenceStore.References(imgID)) > 0 {
+	if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 {
 		return &imageDeleteConflict{
 			imgID:   imgID,
 			message: "image is referenced in one or more repositories",
 		}
 	}
 
-	// Check if any stopped containers reference this image.
-	for _, container := range daemon.List() {
-		if container.IsRunning() {
-			// Skip this as it was checked above in hard conflict conditions.
-			continue
-		}
+	if mask&conflictStoppedContainer != 0 {
+		// Check if any stopped containers reference this image.
+		for _, container := range daemon.List() {
+			if container.IsRunning() {
+				// Skip this as it was checked above in hard conflict conditions.
+				continue
+			}
 
-		if container.ImageID == imgID {
-			return &imageDeleteConflict{
-				imgID:   imgID,
-				used:    true,
-				message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
+			if container.ImageID == imgID {
+				return &imageDeleteConflict{
+					imgID:   imgID,
+					used:    true,
+					message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)),
+				}
 			}
 		}
 	}
diff --git a/daemon/images.go b/daemon/images.go
index 2b2a994..e4c3797 100644
--- a/daemon/images.go
+++ b/daemon/images.go
@@ -57,7 +57,6 @@
 			return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling"))
 		}
 	}
-
 	if danglingOnly {
 		allImages = daemon.imageStore.Heads()
 	} else {
@@ -124,6 +123,11 @@
 		}
 		if newImage.RepoDigests == nil && newImage.RepoTags == nil {
 			if all || len(daemon.imageStore.Children(id)) == 0 {
+
+				if imageFilters.Include("dangling") && !danglingOnly {
+					//dangling=false case, so dangling image is not needed
+					continue
+				}
 				if filter != "" { // skip images with no references if filtering by tag
 					continue
 				}
diff --git a/daemon/info.go b/daemon/info.go
index d060a14..804d6e4 100644
--- a/daemon/info.go
+++ b/daemon/info.go
@@ -54,9 +54,24 @@
 	initPath := utils.DockerInitPath("")
 	sysInfo := sysinfo.New(true)
 
+	var cRunning, cPaused, cStopped int
+	for _, c := range daemon.List() {
+		switch c.StateString() {
+		case "paused":
+			cPaused++
+		case "running":
+			cRunning++
+		default:
+			cStopped++
+		}
+	}
+
 	v := &types.Info{
 		ID:                 daemon.ID,
 		Containers:         len(daemon.List()),
+		ContainersRunning:  cRunning,
+		ContainersPaused:   cPaused,
+		ContainersStopped:  cStopped,
 		Images:             len(daemon.imageStore.Map()),
 		Driver:             daemon.GraphDriverName(),
 		DriverStatus:       daemon.layerStore.DriverStatus(),
@@ -64,7 +79,7 @@
 		IPv4Forwarding:     !sysInfo.IPv4ForwardingDisabled,
 		BridgeNfIptables:   !sysInfo.BridgeNfCallIptablesDisabled,
 		BridgeNfIP6tables:  !sysInfo.BridgeNfCallIP6tablesDisabled,
-		Debug:              os.Getenv("DEBUG") != "",
+		Debug:              utils.IsDebugEnabled(),
 		NFd:                fileutils.GetTotalUsedFds(),
 		NGoroutines:        runtime.NumGoroutine(),
 		SystemTime:         time.Now().Format(time.RFC3339Nano),
@@ -142,7 +157,7 @@
 		pluginsInfo.Network = append(pluginsInfo.Network, nd)
 	}
 
-	pluginsInfo.Authorization = daemon.configStore.AuthZPlugins
+	pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins
 
 	return pluginsInfo
 }
diff --git a/daemon/inspect.go b/daemon/inspect.go
index 098d43d..feb7de2 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -102,11 +102,12 @@
 	// make a copy to play with
 	hostConfig := *container.HostConfig
 
-	if children, err := daemon.children(container.Name); err == nil {
-		for linkAlias, child := range children {
-			hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
-		}
+	children := daemon.children(container)
+	hostConfig.Links = nil // do not expose the internal structure
+	for linkAlias, child := range children {
+		hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias))
 	}
+
 	// we need this trick to preserve empty log driver, so
 	// container will use daemon defaults even if daemon change them
 	if hostConfig.LogConfig.Type == "" {
diff --git a/daemon/links.go b/daemon/links.go
new file mode 100644
index 0000000..aaf1917
--- /dev/null
+++ b/daemon/links.go
@@ -0,0 +1,128 @@
+package daemon
+
+import (
+	"strings"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/container"
+	"github.com/docker/docker/pkg/graphdb"
+)
+
+// linkIndex stores link relationships between containers, including their specified alias
+// The alias is the name the parent uses to reference the child
+type linkIndex struct {
+	// idx maps a parent->alias->child relationship
+	idx map[*container.Container]map[string]*container.Container
+	// childIdx maps  child->parent->aliases
+	childIdx map[*container.Container]map[*container.Container]map[string]struct{}
+	mu       sync.Mutex
+}
+
+func newLinkIndex() *linkIndex {
+	return &linkIndex{
+		idx:      make(map[*container.Container]map[string]*container.Container),
+		childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}),
+	}
+}
+
+// link adds indexes for the passed in parent/child/alias relationships
+func (l *linkIndex) link(parent, child *container.Container, alias string) {
+	l.mu.Lock()
+
+	if l.idx[parent] == nil {
+		l.idx[parent] = make(map[string]*container.Container)
+	}
+	l.idx[parent][alias] = child
+	if l.childIdx[child] == nil {
+		l.childIdx[child] = make(map[*container.Container]map[string]struct{})
+	}
+	if l.childIdx[child][parent] == nil {
+		l.childIdx[child][parent] = make(map[string]struct{})
+	}
+	l.childIdx[child][parent][alias] = struct{}{}
+
+	l.mu.Unlock()
+}
+
+// unlink removes the requested alias for the given parent/child
+func (l *linkIndex) unlink(alias string, child, parent *container.Container) {
+	l.mu.Lock()
+	delete(l.idx[parent], alias)
+	delete(l.childIdx[child], parent)
+	l.mu.Unlock()
+}
+
+// children maps all the aliases-> children for the passed in parent
+// aliases here are the aliases the parent uses to refer to the child
+func (l *linkIndex) children(parent *container.Container) map[string]*container.Container {
+	l.mu.Lock()
+	children := l.idx[parent]
+	l.mu.Unlock()
+	return children
+}
+
+// parents maps all the aliases->parent for the passed in child
+// aliases here are the aliases the parents use to refer to the child
+func (l *linkIndex) parents(child *container.Container) map[string]*container.Container {
+	l.mu.Lock()
+
+	parents := make(map[string]*container.Container)
+	for parent, aliases := range l.childIdx[child] {
+		for alias := range aliases {
+			parents[alias] = parent
+		}
+	}
+
+	l.mu.Unlock()
+	return parents
+}
+
+// delete deletes all link relationships referencing this container
+func (l *linkIndex) delete(container *container.Container) {
+	l.mu.Lock()
+	for _, child := range l.idx[container] {
+		delete(l.childIdx[child], container)
+	}
+	delete(l.idx, container)
+	delete(l.childIdx, container)
+	l.mu.Unlock()
+}
+
+// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig
+// when sqlite links were used, hostConfig.Links was set to nil
+func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error {
+	// if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped
+	if container.HostConfig == nil || container.HostConfig.Links != nil {
+		return nil
+	}
+
+	logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID)
+
+	fullName := container.Name
+	if fullName[0] != '/' {
+		fullName = "/" + fullName
+	}
+
+	// don't use a nil slice, this ensures that the check above will skip once the migration has completed
+	links := []string{}
+	children, err := db.Children(fullName, 0)
+	if err != nil {
+		if !strings.Contains(err.Error(), "Cannot find child for") {
+			return err
+		}
+		// else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration
+	}
+
+	for _, child := range children {
+		c, err := daemon.GetContainer(child.Entity.ID())
+		if err != nil {
+			return err
+		}
+
+		links = append(links, c.Name+":"+child.Edge.Name)
+	}
+
+	container.HostConfig.Links = links
+	return container.WriteHostConfig()
+}
diff --git a/daemon/links_test.go b/daemon/links_test.go
new file mode 100644
index 0000000..79a6415
--- /dev/null
+++ b/daemon/links_test.go
@@ -0,0 +1,101 @@
+package daemon
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"testing"
+
+	"github.com/docker/docker/container"
+	"github.com/docker/docker/pkg/graphdb"
+	"github.com/docker/docker/pkg/stringid"
+	containertypes "github.com/docker/engine-api/types/container"
+)
+
+func TestMigrateLegacySqliteLinks(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	name1 := "test1"
+	c1 := &container.Container{
+		CommonContainer: container.CommonContainer{
+			ID:         stringid.GenerateNonCryptoID(),
+			Name:       name1,
+			HostConfig: &containertypes.HostConfig{},
+		},
+	}
+	c1.Root = tmpDir
+
+	name2 := "test2"
+	c2 := &container.Container{
+		CommonContainer: container.CommonContainer{
+			ID:   stringid.GenerateNonCryptoID(),
+			Name: name2,
+		},
+	}
+
+	store := &contStore{
+		s: map[string]*container.Container{
+			c1.ID: c1,
+			c2.ID: c2,
+		},
+	}
+
+	d := &Daemon{root: tmpDir, containers: store}
+	db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := db.Set("/"+name1, c1.ID); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := db.Set("/"+name2, c2.ID); err != nil {
+		t.Fatal(err)
+	}
+
+	alias := "hello"
+	if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := d.migrateLegacySqliteLinks(db, c1); err != nil {
+		t.Fatal(err)
+	}
+
+	if len(c1.HostConfig.Links) != 1 {
+		t.Fatal("expected links to be populated but is empty")
+	}
+
+	expected := name2 + ":" + alias
+	actual := c1.HostConfig.Links[0]
+	if actual != expected {
+		t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual)
+	}
+
+	// ensure this is persisted
+	b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	type hc struct {
+		Links []string
+	}
+	var cfg hc
+	if err := json.Unmarshal(b, &cfg); err != nil {
+		t.Fatal(err)
+	}
+
+	if len(cfg.Links) != 1 {
+		t.Fatalf("expected one entry in links, got: %d", len(cfg.Links))
+	}
+	if cfg.Links[0] != expected { // same expected as above
+		t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0])
+	}
+}
diff --git a/daemon/list.go b/daemon/list.go
index c5765e3..f262fbe 100644
--- a/daemon/list.go
+++ b/daemon/list.go
@@ -9,9 +9,9 @@
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/engine-api/types"
 	"github.com/docker/engine-api/types/filters"
+	networktypes "github.com/docker/engine-api/types/network"
 	"github.com/docker/go-connections/nat"
 )
 
@@ -197,12 +197,6 @@
 		})
 	}
 
-	names := make(map[string][]string)
-	daemon.containerGraph().Walk("/", func(p string, e *graphdb.Entity) error {
-		names[e.ID()] = append(names[e.ID()], p)
-		return nil
-	}, 1)
-
 	if config.Before != "" && beforeContFilter == nil {
 		beforeContFilter, err = daemon.GetContainer(config.Before)
 		if err != nil {
@@ -220,12 +214,12 @@
 	return &listContext{
 		filters:          psFilters,
 		ancestorFilter:   ancestorFilter,
-		names:            names,
 		images:           imagesFilter,
 		exitAllowed:      filtExited,
 		beforeFilter:     beforeContFilter,
 		sinceFilter:      sinceContFilter,
 		ContainersConfig: config,
+		names:            daemon.nameIndex.GetAll(),
 	}, nil
 }
 
@@ -351,7 +345,30 @@
 	newC.Created = container.Created.Unix()
 	newC.Status = container.State.String()
 	newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode)
-	newC.NetworkSettings = &types.SummaryNetworkSettings{container.NetworkSettings.Networks}
+	// copy networks to avoid races
+	networks := make(map[string]*networktypes.EndpointSettings)
+	for name, network := range container.NetworkSettings.Networks {
+		if network == nil {
+			continue
+		}
+		networks[name] = &networktypes.EndpointSettings{
+			EndpointID:          network.EndpointID,
+			Gateway:             network.Gateway,
+			IPAddress:           network.IPAddress,
+			IPPrefixLen:         network.IPPrefixLen,
+			IPv6Gateway:         network.IPv6Gateway,
+			GlobalIPv6Address:   network.GlobalIPv6Address,
+			GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen,
+			MacAddress:          network.MacAddress,
+		}
+		if network.IPAMConfig != nil {
+			networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{
+				IPv4Address: network.IPAMConfig.IPv4Address,
+				IPv6Address: network.IPAMConfig.IPv6Address,
+			}
+		}
+	}
+	newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks}
 
 	newC.Ports = []types.Port{}
 	for port, bindings := range container.NetworkSettings.Ports {
diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go
index 7d052a6..f686194 100644
--- a/daemon/logger/syslog/syslog.go
+++ b/daemon/logger/syslog/syslog.go
@@ -4,9 +4,9 @@
 package syslog
 
 import (
+	"crypto/tls"
 	"errors"
 	"fmt"
-	"log/syslog"
 	"net"
 	"net/url"
 	"os"
@@ -14,13 +14,19 @@
 	"strconv"
 	"strings"
 
+	syslog "github.com/RackSec/srslog"
+
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/daemon/logger"
 	"github.com/docker/docker/daemon/logger/loggerutils"
 	"github.com/docker/docker/pkg/urlutil"
+	"github.com/docker/go-connections/tlsconfig"
 )
 
-const name = "syslog"
+const (
+	name        = "syslog"
+	secureProto = "tcp+tls"
+)
 
 var facilities = map[string]syslog.Priority{
 	"kern":     syslog.LOG_KERN,
@@ -77,12 +83,19 @@
 		return nil, err
 	}
 
-	log, err := syslog.Dial(
-		proto,
-		address,
-		facility,
-		path.Base(os.Args[0])+"/"+tag,
-	)
+	logTag := path.Base(os.Args[0]) + "/" + tag
+
+	var log *syslog.Writer
+	if proto == secureProto {
+		tlsConfig, tlsErr := parseTLSConfig(ctx.Config)
+		if tlsErr != nil {
+			return nil, tlsErr
+		}
+		log, err = syslog.DialWithTLSConfig(proto, address, facility, logTag, tlsConfig)
+	} else {
+		log, err = syslog.Dial(proto, address, facility, logTag)
+	}
+
 	if err != nil {
 		return nil, err
 	}
@@ -147,6 +160,10 @@
 		case "syslog-address":
 		case "syslog-facility":
 		case "syslog-tag":
+		case "syslog-tls-ca-cert":
+		case "syslog-tls-cert":
+		case "syslog-tls-key":
+		case "syslog-tls-skip-verify":
 		case "tag":
 		default:
 			return fmt.Errorf("unknown log opt '%s' for syslog log driver", key)
@@ -177,3 +194,16 @@
 
 	return syslog.Priority(0), errors.New("invalid syslog facility")
 }
+
+func parseTLSConfig(cfg map[string]string) (*tls.Config, error) {
+	_, skipVerify := cfg["syslog-tls-skip-verify"]
+
+	opts := tlsconfig.Options{
+		CAFile:             cfg["syslog-tls-ca-cert"],
+		CertFile:           cfg["syslog-tls-cert"],
+		KeyFile:            cfg["syslog-tls-key"],
+		InsecureSkipVerify: skipVerify,
+	}
+
+	return tlsconfig.Client(opts)
+}
diff --git a/daemon/mounts.go b/daemon/mounts.go
index 4f3669d..81a8275 100644
--- a/daemon/mounts.go
+++ b/daemon/mounts.go
@@ -10,13 +10,8 @@
 
 func (daemon *Daemon) prepareMountPoints(container *container.Container) error {
 	for _, config := range container.MountPoints {
-		if len(config.Driver) > 0 {
-			v, err := daemon.volumes.GetWithRef(config.Name, config.Driver, container.ID)
-			if err != nil {
-				return err
-			}
-
-			config.Volume = v
+		if err := daemon.lazyInitializeVolume(container.ID, config); err != nil {
+			return err
 		}
 	}
 	return nil
diff --git a/daemon/network.go b/daemon/network.go
index 9b5daa2..f6a2515 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -101,7 +101,7 @@
 }
 
 // CreateNetwork creates a network with the given name, driver and other optional parameters
-func (daemon *Daemon) CreateNetwork(name, driver string, ipam network.IPAM, options map[string]string) (libnetwork.Network, error) {
+func (daemon *Daemon) CreateNetwork(name, driver string, ipam network.IPAM, options map[string]string, internal bool) (libnetwork.Network, error) {
 	c := daemon.netController
 	if driver == "" {
 		driver = c.Config().Daemon.DefaultDriver
@@ -114,8 +114,11 @@
 		return nil, err
 	}
 
-	nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, nil))
+	nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options))
 	nwOptions = append(nwOptions, libnetwork.NetworkOptionDriverOpts(options))
+	if internal {
+		nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
+	}
 	n, err := c.NewNetwork(driver, name, nwOptions...)
 	if err != nil {
 		return nil, err
@@ -160,12 +163,15 @@
 
 // DisconnectContainerFromNetwork disconnects the given container from
 // the given network. If either cannot be found, an err is returned.
-func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network) error {
+func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error {
 	container, err := daemon.GetContainer(containerName)
 	if err != nil {
+		if force {
+			return daemon.ForceEndpointDelete(containerName, network)
+		}
 		return err
 	}
-	return daemon.DisconnectFromNetwork(container, network)
+	return daemon.DisconnectFromNetwork(container, network, force)
 }
 
 // GetNetworkDriverList returns the list of plugins drivers
diff --git a/daemon/rename.go b/daemon/rename.go
index c09888c..eaed577 100644
--- a/daemon/rename.go
+++ b/daemon/rename.go
@@ -40,14 +40,11 @@
 		if err != nil {
 			container.Name = oldName
 			daemon.reserveName(container.ID, oldName)
-			daemon.containerGraphDB.Delete(newName)
+			daemon.releaseName(newName)
 		}
 	}()
 
-	if err = daemon.containerGraphDB.Delete(oldName); err != nil {
-		return derr.ErrorCodeRenameDelete.WithArgs(oldName, err)
-	}
-
+	daemon.releaseName(oldName)
 	if err = container.ToDisk(); err != nil {
 		return err
 	}
@@ -76,7 +73,6 @@
 	if err != nil {
 		return err
 	}
-
 	daemon.LogContainerEvent(container, "rename")
 	return nil
 }
diff --git a/daemon/start.go b/daemon/start.go
index 586fa9f..418dace 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -31,12 +31,22 @@
 		// creating a container, not during start.
 		if hostConfig != nil {
 			logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and will be removed in Docker 1.12")
+			oldNetworkMode := container.HostConfig.NetworkMode
 			if err := daemon.setSecurityOptions(container, hostConfig); err != nil {
 				return err
 			}
 			if err := daemon.setHostConfig(container, hostConfig); err != nil {
 				return err
 			}
+			newNetworkMode := container.HostConfig.NetworkMode
+			if string(oldNetworkMode) != string(newNetworkMode) {
+				// if user has change the network mode on starting, clean up the
+				// old networks. It is a deprecated feature and will be removed in Docker 1.12
+				container.NetworkSettings.Networks = nil
+				if err := container.ToDisk(); err != nil {
+					return err
+				}
+			}
 			container.InitDNSHostConfig()
 		}
 	} else {
@@ -132,15 +142,9 @@
 	mounts = append(mounts, container.TmpfsMounts()...)
 
 	container.Command.Mounts = mounts
-	container.Unlock()
-
-	// don't lock waitForStart because it has potential risk of blocking
-	// which will lead to dead lock, forever.
 	if err := daemon.waitForStart(container); err != nil {
-		container.Lock()
 		return err
 	}
-	container.Lock()
 	container.HasBeenStartedBefore = true
 	return nil
 }
diff --git a/daemon/volumes.go b/daemon/volumes.go
index a7b648a..c009767 100644
--- a/daemon/volumes.go
+++ b/daemon/volumes.go
@@ -153,3 +153,17 @@
 
 	return nil
 }
+
+// lazyInitializeVolume initializes a mountpoint's volume if needed.
+// This happens after a daemon restart.
+func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error {
+	if len(m.Driver) > 0 && m.Volume == nil {
+		v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID)
+
+		if err != nil {
+			return err
+		}
+		m.Volume = v
+	}
+	return nil
+}
diff --git a/daemon/volumes_unix.go b/daemon/volumes_unix.go
index ea19953..f257da7 100644
--- a/daemon/volumes_unix.go
+++ b/daemon/volumes_unix.go
@@ -20,6 +20,9 @@
 func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
 	var mounts []execdriver.Mount
 	for _, m := range container.MountPoints {
+		if err := daemon.lazyInitializeVolume(container.ID, m); err != nil {
+			return nil, err
+		}
 		path, err := m.Setup()
 		if err != nil {
 			return nil, err
diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go
index dd4a9c9..05a45c3 100644
--- a/daemon/volumes_windows.go
+++ b/daemon/volumes_windows.go
@@ -18,6 +18,9 @@
 func (daemon *Daemon) setupMounts(container *container.Container) ([]execdriver.Mount, error) {
 	var mnts []execdriver.Mount
 	for _, mount := range container.MountPoints { // type is volume.MountPoint
+		if err := daemon.lazyInitializeVolume(container.ID, mount); err != nil {
+			return nil, err
+		}
 		// If there is no source, take it from the volume path
 		s := mount.Source
 		if s == "" && mount.Volume != nil {
diff --git a/distribution/metadata/blobsum_service.go b/distribution/metadata/blobsum_service.go
deleted file mode 100644
index 88ed7bb..0000000
--- a/distribution/metadata/blobsum_service.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package metadata
-
-import (
-	"encoding/json"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/layer"
-)
-
-// BlobSumService maps layer IDs to a set of known blobsums for
-// the layer.
-type BlobSumService struct {
-	store Store
-}
-
-// maxBlobSums is the number of blobsums to keep per layer DiffID.
-const maxBlobSums = 5
-
-// NewBlobSumService creates a new blobsum mapping service.
-func NewBlobSumService(store Store) *BlobSumService {
-	return &BlobSumService{
-		store: store,
-	}
-}
-
-func (blobserv *BlobSumService) diffIDNamespace() string {
-	return "blobsum-storage"
-}
-
-func (blobserv *BlobSumService) blobSumNamespace() string {
-	return "blobsum-lookup"
-}
-
-func (blobserv *BlobSumService) diffIDKey(diffID layer.DiffID) string {
-	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
-}
-
-func (blobserv *BlobSumService) blobSumKey(blobsum digest.Digest) string {
-	return string(blobsum.Algorithm()) + "/" + blobsum.Hex()
-}
-
-// GetBlobSums finds the blobsums associated with a layer DiffID.
-func (blobserv *BlobSumService) GetBlobSums(diffID layer.DiffID) ([]digest.Digest, error) {
-	jsonBytes, err := blobserv.store.Get(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID))
-	if err != nil {
-		return nil, err
-	}
-
-	var blobsums []digest.Digest
-	if err := json.Unmarshal(jsonBytes, &blobsums); err != nil {
-		return nil, err
-	}
-
-	return blobsums, nil
-}
-
-// GetDiffID finds a layer DiffID from a blobsum hash.
-func (blobserv *BlobSumService) GetDiffID(blobsum digest.Digest) (layer.DiffID, error) {
-	diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum))
-	if err != nil {
-		return layer.DiffID(""), err
-	}
-
-	return layer.DiffID(diffIDBytes), nil
-}
-
-// Add associates a blobsum with a layer DiffID. If too many blobsums are
-// present, the oldest one is dropped.
-func (blobserv *BlobSumService) Add(diffID layer.DiffID, blobsum digest.Digest) error {
-	oldBlobSums, err := blobserv.GetBlobSums(diffID)
-	if err != nil {
-		oldBlobSums = nil
-	}
-	newBlobSums := make([]digest.Digest, 0, len(oldBlobSums)+1)
-
-	// Copy all other blobsums to new slice
-	for _, oldSum := range oldBlobSums {
-		if oldSum != blobsum {
-			newBlobSums = append(newBlobSums, oldSum)
-		}
-	}
-
-	newBlobSums = append(newBlobSums, blobsum)
-
-	if len(newBlobSums) > maxBlobSums {
-		newBlobSums = newBlobSums[len(newBlobSums)-maxBlobSums:]
-	}
-
-	jsonBytes, err := json.Marshal(newBlobSums)
-	if err != nil {
-		return err
-	}
-
-	err = blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes)
-	if err != nil {
-		return err
-	}
-
-	return blobserv.store.Set(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum), []byte(diffID))
-}
diff --git a/distribution/metadata/blobsum_service_test.go b/distribution/metadata/blobsum_service_test.go
deleted file mode 100644
index dee64df..0000000
--- a/distribution/metadata/blobsum_service_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package metadata
-
-import (
-	"io/ioutil"
-	"os"
-	"reflect"
-	"testing"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/layer"
-)
-
-func TestBlobSumService(t *testing.T) {
-	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
-	if err != nil {
-		t.Fatalf("could not create temp dir: %v", err)
-	}
-	defer os.RemoveAll(tmpDir)
-
-	metadataStore, err := NewFSMetadataStore(tmpDir)
-	if err != nil {
-		t.Fatalf("could not create metadata store: %v", err)
-	}
-	blobSumService := NewBlobSumService(metadataStore)
-
-	testVectors := []struct {
-		diffID   layer.DiffID
-		blobsums []digest.Digest
-	}{
-		{
-			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-			},
-		},
-		{
-			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
-			},
-		},
-		{
-			diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
-				digest.Digest("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"),
-				digest.Digest("sha256:8902a7ca89aabbb868835260912159026637634090dd8899eee969523252236e"),
-				digest.Digest("sha256:c84364306344ccc48532c52ff5209236273525231dddaaab53262322352883aa"),
-				digest.Digest("sha256:aa7583bbc87532a8352bbb72520a821b3623523523a8352523a52352aaa888fe"),
-			},
-		},
-	}
-
-	// Set some associations
-	for _, vec := range testVectors {
-		for _, blobsum := range vec.blobsums {
-			err := blobSumService.Add(vec.diffID, blobsum)
-			if err != nil {
-				t.Fatalf("error calling Set: %v", err)
-			}
-		}
-	}
-
-	// Check the correct values are read back
-	for _, vec := range testVectors {
-		blobsums, err := blobSumService.GetBlobSums(vec.diffID)
-		if err != nil {
-			t.Fatalf("error calling Get: %v", err)
-		}
-		expectedBlobsums := len(vec.blobsums)
-		if expectedBlobsums > 5 {
-			expectedBlobsums = 5
-		}
-		if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) {
-			t.Fatal("Get returned incorrect layer ID")
-		}
-	}
-
-	// Test GetBlobSums on a nonexistent entry
-	_, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
-	if err == nil {
-		t.Fatal("expected error looking up nonexistent entry")
-	}
-
-	// Test GetDiffID on a nonexistent entry
-	_, err = blobSumService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
-	if err == nil {
-		t.Fatal("expected error looking up nonexistent entry")
-	}
-
-	// Overwrite one of the entries and read it back
-	err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0])
-	if err != nil {
-		t.Fatalf("error calling Add: %v", err)
-	}
-	diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0])
-	if err != nil {
-		t.Fatalf("error calling GetDiffID: %v", err)
-	}
-	if diffID != testVectors[1].diffID {
-		t.Fatal("GetDiffID returned incorrect diffID")
-	}
-}
diff --git a/distribution/metadata/metadata.go b/distribution/metadata/metadata.go
index ab9cc5b..9f744d4 100644
--- a/distribution/metadata/metadata.go
+++ b/distribution/metadata/metadata.go
@@ -15,6 +15,8 @@
 	Get(namespace string, key string) ([]byte, error)
 	// Set writes data indexed by namespace and key.
 	Set(namespace, key string, value []byte) error
+	// Delete removes data indexed by namespace and key.
+	Delete(namespace, key string) error
 }
 
 // FSMetadataStore uses the filesystem to associate metadata with layer and
@@ -63,3 +65,13 @@
 	}
 	return os.Rename(tempFilePath, path)
 }
+
+// Delete removes data indexed by namespace and key. The data file named after
+// the key, stored in the namespace's directory is deleted.
+func (store *FSMetadataStore) Delete(namespace, key string) error {
+	store.Lock()
+	defer store.Unlock()
+
+	path := store.path(namespace, key)
+	return os.Remove(path)
+}
diff --git a/distribution/metadata/v2_metadata_service.go b/distribution/metadata/v2_metadata_service.go
new file mode 100644
index 0000000..239cd1f
--- /dev/null
+++ b/distribution/metadata/v2_metadata_service.go
@@ -0,0 +1,137 @@
+package metadata
+
+import (
+	"encoding/json"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+// V2MetadataService maps layer IDs to a set of known metadata for
+// the layer.
+type V2MetadataService struct {
+	store Store
+}
+
+// V2Metadata contains the digest and source repository information for a layer.
+type V2Metadata struct {
+	Digest           digest.Digest
+	SourceRepository string
+}
+
+// maxMetadata is the number of metadata entries to keep per layer DiffID.
+const maxMetadata = 50
+
+// NewV2MetadataService creates a new diff ID to v2 metadata mapping service.
+func NewV2MetadataService(store Store) *V2MetadataService {
+	return &V2MetadataService{
+		store: store,
+	}
+}
+
+func (serv *V2MetadataService) diffIDNamespace() string {
+	return "v2metadata-by-diffid"
+}
+
+func (serv *V2MetadataService) digestNamespace() string {
+	return "diffid-by-digest"
+}
+
+func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string {
+	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
+}
+
+func (serv *V2MetadataService) digestKey(dgst digest.Digest) string {
+	return string(dgst.Algorithm()) + "/" + dgst.Hex()
+}
+
+// GetMetadata finds the metadata associated with a layer DiffID.
+func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
+	jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
+	if err != nil {
+		return nil, err
+	}
+
+	var metadata []V2Metadata
+	if err := json.Unmarshal(jsonBytes, &metadata); err != nil {
+		return nil, err
+	}
+
+	return metadata, nil
+}
+
+// GetDiffID finds a layer DiffID from a digest.
+func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
+	diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
+	if err != nil {
+		return layer.DiffID(""), err
+	}
+
+	return layer.DiffID(diffIDBytes), nil
+}
+
+// Add associates metadata with a layer DiffID. If too many metadata entries are
+// present, the oldest one is dropped.
+func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
+	oldMetadata, err := serv.GetMetadata(diffID)
+	if err != nil {
+		oldMetadata = nil
+	}
+	newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1)
+
+	// Copy all other metadata to new slice
+	for _, oldMeta := range oldMetadata {
+		if oldMeta != metadata {
+			newMetadata = append(newMetadata, oldMeta)
+		}
+	}
+
+	newMetadata = append(newMetadata, metadata)
+
+	if len(newMetadata) > maxMetadata {
+		newMetadata = newMetadata[len(newMetadata)-maxMetadata:]
+	}
+
+	jsonBytes, err := json.Marshal(newMetadata)
+	if err != nil {
+		return err
+	}
+
+	err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
+	if err != nil {
+		return err
+	}
+
+	return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID))
+}
+
+// Remove unassociates a metadata entry from a layer DiffID.
+func (serv *V2MetadataService) Remove(metadata V2Metadata) error {
+	diffID, err := serv.GetDiffID(metadata.Digest)
+	if err != nil {
+		return err
+	}
+	oldMetadata, err := serv.GetMetadata(diffID)
+	if err != nil {
+		oldMetadata = nil
+	}
+	newMetadata := make([]V2Metadata, 0, len(oldMetadata))
+
+	// Copy all other metadata to new slice
+	for _, oldMeta := range oldMetadata {
+		if oldMeta != metadata {
+			newMetadata = append(newMetadata, oldMeta)
+		}
+	}
+
+	if len(newMetadata) == 0 {
+		return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID))
+	}
+
+	jsonBytes, err := json.Marshal(newMetadata)
+	if err != nil {
+		return err
+	}
+
+	return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
+}
diff --git a/distribution/metadata/v2_metadata_service_test.go b/distribution/metadata/v2_metadata_service_test.go
new file mode 100644
index 0000000..7b0ecb1
--- /dev/null
+++ b/distribution/metadata/v2_metadata_service_test.go
@@ -0,0 +1,115 @@
+package metadata
+
+import (
+	"encoding/hex"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"reflect"
+	"testing"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+func TestV2MetadataService(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
+	if err != nil {
+		t.Fatalf("could not create temp dir: %v", err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	metadataStore, err := NewFSMetadataStore(tmpDir)
+	if err != nil {
+		t.Fatalf("could not create metadata store: %v", err)
+	}
+	V2MetadataService := NewV2MetadataService(metadataStore)
+
+	tooManyBlobSums := make([]V2Metadata, 100)
+	for i := range tooManyBlobSums {
+		randDigest := randomDigest()
+		tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
+	}
+
+	testVectors := []struct {
+		diffID   layer.DiffID
+		metadata []V2Metadata
+	}{
+		{
+			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
+			metadata: []V2Metadata{
+				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
+			},
+		},
+		{
+			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
+			metadata: []V2Metadata{
+				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
+				{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
+			},
+		},
+		{
+			diffID:   layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
+			metadata: tooManyBlobSums,
+		},
+	}
+
+	// Set some associations
+	for _, vec := range testVectors {
+		for _, blobsum := range vec.metadata {
+			err := V2MetadataService.Add(vec.diffID, blobsum)
+			if err != nil {
+				t.Fatalf("error calling Set: %v", err)
+			}
+		}
+	}
+
+	// Check the correct values are read back
+	for _, vec := range testVectors {
+		metadata, err := V2MetadataService.GetMetadata(vec.diffID)
+		if err != nil {
+			t.Fatalf("error calling Get: %v", err)
+		}
+		expectedMetadataEntries := len(vec.metadata)
+		if expectedMetadataEntries > 50 {
+			expectedMetadataEntries = 50
+		}
+		if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
+			t.Fatal("Get returned incorrect layer ID")
+		}
+	}
+
+	// Test GetMetadata on a nonexistent entry
+	_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
+	if err == nil {
+		t.Fatal("expected error looking up nonexistent entry")
+	}
+
+	// Test GetDiffID on a nonexistent entry
+	_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
+	if err == nil {
+		t.Fatal("expected error looking up nonexistent entry")
+	}
+
+	// Overwrite one of the entries and read it back
+	err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
+	if err != nil {
+		t.Fatalf("error calling Add: %v", err)
+	}
+	diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
+	if err != nil {
+		t.Fatalf("error calling GetDiffID: %v", err)
+	}
+	if diffID != testVectors[1].diffID {
+		t.Fatal("GetDiffID returned incorrect diffID")
+	}
+}
+
+func randomDigest() digest.Digest {
+	b := [32]byte{}
+	for i := 0; i < len(b); i++ {
+		b[i] = byte(rand.Intn(256))
+	}
+	d := hex.EncodeToString(b[:])
+	return digest.Digest("sha256:" + d)
+}
diff --git a/distribution/pull.go b/distribution/pull.go
index db6e29d..5f38a67 100644
--- a/distribution/pull.go
+++ b/distribution/pull.go
@@ -61,10 +61,10 @@
 	switch endpoint.Version {
 	case registry.APIVersion2:
 		return &v2Puller{
-			blobSumService: metadata.NewBlobSumService(imagePullConfig.MetadataStore),
-			endpoint:       endpoint,
-			config:         imagePullConfig,
-			repoInfo:       repoInfo,
+			V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore),
+			endpoint:          endpoint,
+			config:            imagePullConfig,
+			repoInfo:          repoInfo,
 		}, nil
 	case registry.APIVersion1:
 		return &v1Puller{
diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go
index 59ed902..7bb1710 100644
--- a/distribution/pull_v2.go
+++ b/distribution/pull_v2.go
@@ -12,8 +12,11 @@
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest/manifestlist"
 	"github.com/docker/distribution/manifest/schema1"
+	"github.com/docker/distribution/manifest/schema2"
 	"github.com/docker/distribution/registry/api/errcode"
+	"github.com/docker/distribution/registry/client"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/image"
@@ -27,12 +30,14 @@
 	"golang.org/x/net/context"
 )
 
+var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
+
 type v2Puller struct {
-	blobSumService *metadata.BlobSumService
-	endpoint       registry.APIEndpoint
-	config         *ImagePullConfig
-	repoInfo       *registry.RepositoryInfo
-	repo           distribution.Repository
+	V2MetadataService *metadata.V2MetadataService
+	endpoint          registry.APIEndpoint
+	config            *ImagePullConfig
+	repoInfo          *registry.RepositoryInfo
+	repo              distribution.Repository
 	// confirmedV2 is set to true if we confirm we're talking to a v2
 	// registry. This is used to limit fallbacks to the v1 protocol.
 	confirmedV2 bool
@@ -61,18 +66,12 @@
 func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
 	var layersDownloaded bool
 	if !reference.IsNameOnly(ref) {
-		var err error
 		layersDownloaded, err = p.pullV2Tag(ctx, ref)
 		if err != nil {
 			return err
 		}
 	} else {
-		manSvc, err := p.repo.Manifests(ctx)
-		if err != nil {
-			return err
-		}
-
-		tags, err := manSvc.Tags()
+		tags, err := p.repo.Tags(ctx).All(ctx)
 		if err != nil {
 			// If this repository doesn't exist on V2, we should
 			// permit a fallback to V1.
@@ -84,8 +83,6 @@
 		// error later on.
 		p.confirmedV2 = true
 
-		// This probably becomes a lot nicer after the manifest
-		// refactor...
 		for _, tag := range tags {
 			tagRef, err := reference.WithTag(ref, tag)
 			if err != nil {
@@ -113,9 +110,10 @@
 }
 
 type v2LayerDescriptor struct {
-	digest         digest.Digest
-	repo           distribution.Repository
-	blobSumService *metadata.BlobSumService
+	digest            digest.Digest
+	repoInfo          *registry.RepositoryInfo
+	repo              distribution.Repository
+	V2MetadataService *metadata.V2MetadataService
 }
 
 func (ld *v2LayerDescriptor) Key() string {
@@ -127,7 +125,7 @@
 }
 
 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
-	return ld.blobSumService.GetDiffID(ld.digest)
+	return ld.V2MetadataService.GetDiffID(ld.digest)
 }
 
 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
@@ -199,62 +197,115 @@
 
 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
 	// Cache mapping from this layer's DiffID to the blobsum
-	ld.blobSumService.Add(diffID, ld.digest)
+	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
 }
 
 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
-	tagOrDigest := ""
-	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
-		tagOrDigest = tagged.Tag()
-	} else if digested, isCanonical := ref.(reference.Canonical); isCanonical {
-		tagOrDigest = digested.Digest().String()
-	} else {
-		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
-	}
-
-	logrus.Debugf("Pulling ref from V2 registry: %s:%s", ref.FullName(), tagOrDigest)
-
 	manSvc, err := p.repo.Manifests(ctx)
 	if err != nil {
 		return false, err
 	}
 
-	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
-	if err != nil {
-		// If this manifest did not exist, we should allow a possible
-		// fallback to the v1 protocol, because dual-version setups may
-		// not host all manifests with the v2 protocol. We may also get
-		// a "not authorized" error if the manifest doesn't exist.
-		return false, allowV1Fallback(err)
+	var (
+		manifest    distribution.Manifest
+		tagOrDigest string // Used for logging/progress only
+	)
+	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
+		// NOTE: not using TagService.Get, since it uses HEAD requests
+		// against the manifests endpoint, which are not supported by
+		// all registry versions.
+		manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
+		if err != nil {
+			return false, allowV1Fallback(err)
+		}
+		tagOrDigest = tagged.Tag()
+	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
+		manifest, err = manSvc.Get(ctx, digested.Digest())
+		if err != nil {
+			return false, err
+		}
+		tagOrDigest = digested.Digest().String()
+	} else {
+		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
 	}
-	if unverifiedManifest == nil {
+
+	if manifest == nil {
 		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
 	}
 
-	// If GetByTag succeeded, we can be confident that the registry on
+	// If manSvc.Get succeeded, we can be confident that the registry on
 	// the other side speaks the v2 protocol.
 	p.confirmedV2 = true
 
-	var verifiedManifest *schema1.Manifest
-	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
-	if err != nil {
+	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
+	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
+
+	var (
+		imageID        image.ID
+		manifestDigest digest.Digest
+	)
+
+	switch v := manifest.(type) {
+	case *schema1.SignedManifest:
+		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
+		if err != nil {
+			return false, err
+		}
+	case *schema2.DeserializedManifest:
+		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
+		if err != nil {
+			return false, err
+		}
+	case *manifestlist.DeserializedManifestList:
+		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
+		if err != nil {
+			return false, err
+		}
+	default:
+		return false, errors.New("unsupported manifest format")
+	}
+
+	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
+
+	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
+	if err == nil {
+		if oldTagImageID == imageID {
+			return false, nil
+		}
+	} else if err != reference.ErrDoesNotExist {
 		return false, err
 	}
 
+	if canonical, ok := ref.(reference.Canonical); ok {
+		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
+			return false, err
+		}
+	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
+		return false, err
+	}
+
+	return true, nil
+}
+
+func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
+	var verifiedManifest *schema1.Manifest
+	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
+	if err != nil {
+		return "", "", err
+	}
+
 	rootFS := image.NewRootFS()
 
 	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
-		return false, err
+		return "", "", err
 	}
 
 	// remove duplicate layers and check parent chain validity
 	err = fixManifestLayers(verifiedManifest)
 	if err != nil {
-		return false, err
+		return "", "", err
 	}
 
-	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())
-
 	var descriptors []xfer.DownloadDescriptor
 
 	// Image history converted to the new format
@@ -269,12 +320,12 @@
 			ThrowAway bool `json:"throwaway,omitempty"`
 		}
 		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
-			return false, err
+			return "", "", err
 		}
 
 		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
 		if err != nil {
-			return false, err
+			return "", "", err
 		}
 		history = append(history, h)
 
@@ -283,9 +334,10 @@
 		}
 
 		layerDescriptor := &v2LayerDescriptor{
-			digest:         blobSum,
-			repo:           p.repo,
-			blobSumService: p.blobSumService,
+			digest:            blobSum,
+			repoInfo:          p.repoInfo,
+			repo:              p.repo,
+			V2MetadataService: p.V2MetadataService,
 		}
 
 		descriptors = append(descriptors, layerDescriptor)
@@ -293,43 +345,258 @@
 
 	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
 	if err != nil {
-		return false, err
+		return "", "", err
 	}
 	defer release()
 
 	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
 	if err != nil {
-		return false, err
+		return "", "", err
 	}
 
-	imageID, err := p.config.ImageStore.Create(config)
+	imageID, err = p.config.ImageStore.Create(config)
 	if err != nil {
-		return false, err
+		return "", "", err
 	}
 
-	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo)
+	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
+
+	return imageID, manifestDigest, nil
+}
+
+func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
+	manifestDigest, err = schema2ManifestDigest(ref, mfst)
 	if err != nil {
-		return false, err
+		return "", "", err
 	}
 
-	if manifestDigest != "" {
-		progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
+	target := mfst.Target()
+	imageID = image.ID(target.Digest)
+	if _, err := p.config.ImageStore.Get(imageID); err == nil {
+		// If the image already exists locally, no need to pull
+		// anything.
+		return imageID, manifestDigest, nil
 	}
 
-	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
-	if err == nil && oldTagImageID == imageID {
-		return false, nil
-	}
+	configChan := make(chan []byte, 1)
+	errChan := make(chan error, 1)
+	var cancel func()
+	ctx, cancel = context.WithCancel(ctx)
 
-	if canonical, ok := ref.(reference.Canonical); ok {
-		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
-			return false, err
+	// Pull the image config
+	go func() {
+		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
+		if err != nil {
+			errChan <- err
+			cancel()
+			return
 		}
-	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
-		return false, err
+		configChan <- configJSON
+	}()
+
+	var descriptors []xfer.DownloadDescriptor
+
+	// Note that the order of this loop is in the direction of bottom-most
+	// to top-most, so that the downloads slice gets ordered correctly.
+	for _, d := range mfst.References() {
+		layerDescriptor := &v2LayerDescriptor{
+			digest:            d.Digest,
+			repo:              p.repo,
+			repoInfo:          p.repoInfo,
+			V2MetadataService: p.V2MetadataService,
+		}
+
+		descriptors = append(descriptors, layerDescriptor)
 	}
 
-	return true, nil
+	var (
+		configJSON         []byte       // raw serialized image config
+		unmarshalledConfig image.Image  // deserialized image config
+		downloadRootFS     image.RootFS // rootFS to use for registering layers.
+	)
+	if runtime.GOOS == "windows" {
+		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
+		if err != nil {
+			return "", "", err
+		}
+		if unmarshalledConfig.RootFS == nil {
+			return "", "", errors.New("image config has no rootfs section")
+		}
+		downloadRootFS = *unmarshalledConfig.RootFS
+		downloadRootFS.DiffIDs = []layer.DiffID{}
+	} else {
+		downloadRootFS = *image.NewRootFS()
+	}
+
+	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
+	if err != nil {
+		if configJSON != nil {
+			// Already received the config
+			return "", "", err
+		}
+		select {
+		case err = <-errChan:
+			return "", "", err
+		default:
+			cancel()
+			select {
+			case <-configChan:
+			case <-errChan:
+			}
+			return "", "", err
+		}
+	}
+	defer release()
+
+	if configJSON == nil {
+		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
+		if err != nil {
+			return "", "", err
+		}
+	}
+
+	// The DiffIDs returned in rootFS MUST match those in the config.
+	// Otherwise the image config could be referencing layers that aren't
+	// included in the manifest.
+	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
+		return "", "", errRootFSMismatch
+	}
+
+	for i := range rootFS.DiffIDs {
+		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
+			return "", "", errRootFSMismatch
+		}
+	}
+
+	imageID, err = p.config.ImageStore.Create(configJSON)
+	if err != nil {
+		return "", "", err
+	}
+
+	return imageID, manifestDigest, nil
+}
+
+func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) {
+	select {
+	case configJSON := <-configChan:
+		var unmarshalledConfig image.Image
+		if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil {
+			return nil, image.Image{}, err
+		}
+		return configJSON, unmarshalledConfig, nil
+	case err := <-errChan:
+		return nil, image.Image{}, err
+		// Don't need a case for ctx.Done in the select because cancellation
+		// will trigger an error in p.pullSchema2ImageConfig.
+	}
+}
+
+// pullManifestList handles "manifest lists" which point to various
+// platform-specifc manifests.
+func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) {
+	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
+	if err != nil {
+		return "", "", err
+	}
+
+	var manifestDigest digest.Digest
+	for _, manifestDescriptor := range mfstList.Manifests {
+		// TODO(aaronl): The manifest list spec supports optional
+		// "features" and "variant" fields. These are not yet used.
+		// Once they are, their values should be interpreted here.
+		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
+			manifestDigest = manifestDescriptor.Digest
+			break
+		}
+	}
+
+	if manifestDigest == "" {
+		return "", "", errors.New("no supported platform found in manifest list")
+	}
+
+	manSvc, err := p.repo.Manifests(ctx)
+	if err != nil {
+		return "", "", err
+	}
+
+	manifest, err := manSvc.Get(ctx, manifestDigest)
+	if err != nil {
+		return "", "", err
+	}
+
+	manifestRef, err := reference.WithDigest(ref, manifestDigest)
+	if err != nil {
+		return "", "", err
+	}
+
+	switch v := manifest.(type) {
+	case *schema1.SignedManifest:
+		imageID, _, err = p.pullSchema1(ctx, manifestRef, v)
+		if err != nil {
+			return "", "", err
+		}
+	case *schema2.DeserializedManifest:
+		imageID, _, err = p.pullSchema2(ctx, manifestRef, v)
+		if err != nil {
+			return "", "", err
+		}
+	default:
+		return "", "", errors.New("unsupported manifest format")
+	}
+
+	return imageID, manifestListDigest, err
+}
+
+func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
+	blobs := p.repo.Blobs(ctx)
+	configJSON, err = blobs.Get(ctx, dgst)
+	if err != nil {
+		return nil, err
+	}
+
+	// Verify image config digest
+	verifier, err := digest.NewDigestVerifier(dgst)
+	if err != nil {
+		return nil, err
+	}
+	if _, err := verifier.Write(configJSON); err != nil {
+		return nil, err
+	}
+	if !verifier.Verified() {
+		err := fmt.Errorf("image config verification failed for digest %s", dgst)
+		logrus.Error(err)
+		return nil, err
+	}
+
+	return configJSON, nil
+}
+
+// schema2ManifestDigest computes the manifest digest, and, if pulling by
+// digest, ensures that it matches the requested digest.
+func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
+	_, canonical, err := mfst.Payload()
+	if err != nil {
+		return "", err
+	}
+
+	// If pull by digest, then verify the manifest digest.
+	if digested, isDigested := ref.(reference.Canonical); isDigested {
+		verifier, err := digest.NewDigestVerifier(digested.Digest())
+		if err != nil {
+			return "", err
+		}
+		if _, err := verifier.Write(canonical); err != nil {
+			return "", err
+		}
+		if !verifier.Verified() {
+			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
+			logrus.Error(err)
+			return "", err
+		}
+		return digested.Digest(), nil
+	}
+
+	return digest.FromBytes(canonical), nil
 }
 
 // allowV1Fallback checks if the error is a possible reason to fallback to v1
@@ -353,7 +620,7 @@
 	return err
 }
 
-func verifyManifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
+func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
 	// If pull by digest, then verify the manifest digest. NOTE: It is
 	// important to do this first, before any other content validation. If the
 	// digest cannot be verified, don't even bother with those other things.
@@ -362,13 +629,7 @@
 		if err != nil {
 			return nil, err
 		}
-		payload, err := signedManifest.Payload()
-		if err != nil {
-			// If this failed, the signatures section was corrupted
-			// or missing. Treat the entire manifest as the payload.
-			payload = signedManifest.Raw
-		}
-		if _, err := verifier.Write(payload); err != nil {
+		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
 			return nil, err
 		}
 		if !verifier.Verified() {
@@ -376,15 +637,8 @@
 			logrus.Error(err)
 			return nil, err
 		}
-
-		var verifiedManifest schema1.Manifest
-		if err = json.Unmarshal(payload, &verifiedManifest); err != nil {
-			return nil, err
-		}
-		m = &verifiedManifest
-	} else {
-		m = &signedManifest.Manifest
 	}
+	m = &signedManifest.Manifest
 
 	if m.SchemaVersion != 1 {
 		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
diff --git a/distribution/pull_v2_test.go b/distribution/pull_v2_test.go
index 87356ae..53995bf 100644
--- a/distribution/pull_v2_test.go
+++ b/distribution/pull_v2_test.go
@@ -123,7 +123,7 @@
 		t.Fatal("error unmarshaling manifest:", err)
 	}
 
-	verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest)
+	verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest)
 	if err != nil {
 		t.Fatal("validateManifest failed:", err)
 	}
@@ -145,7 +145,7 @@
 		t.Fatal("error unmarshaling manifest:", err)
 	}
 
-	verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest)
+	verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest)
 	if err != nil {
 		t.Fatal("validateManifest failed:", err)
 	}
@@ -167,7 +167,7 @@
 		t.Fatal("error unmarshaling manifest:", err)
 	}
 
-	verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest)
+	verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest)
 	if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") {
 		t.Fatal("expected validateManifest to fail with digest error")
 	}
diff --git a/distribution/push.go b/distribution/push.go
index b4dec42..445f6bb 100644
--- a/distribution/push.go
+++ b/distribution/push.go
@@ -7,7 +7,6 @@
 	"io"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/image"
@@ -72,12 +71,11 @@
 	switch endpoint.Version {
 	case registry.APIVersion2:
 		return &v2Pusher{
-			blobSumService: metadata.NewBlobSumService(imagePushConfig.MetadataStore),
-			ref:            ref,
-			endpoint:       endpoint,
-			repoInfo:       repoInfo,
-			config:         imagePushConfig,
-			layersPushed:   pushMap{layersPushed: make(map[digest.Digest]bool)},
+			v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore),
+			ref:               ref,
+			endpoint:          endpoint,
+			repoInfo:          repoInfo,
+			config:            imagePushConfig,
 		}, nil
 	case registry.APIVersion1:
 		return &v1Pusher{
diff --git a/distribution/push_v2.go b/distribution/push_v2.go
index bf68c94..68c8f69 100644
--- a/distribution/push_v2.go
+++ b/distribution/push_v2.go
@@ -1,22 +1,21 @@
 package distribution
 
 import (
-	"encoding/json"
 	"errors"
 	"fmt"
 	"io"
 	"sync"
-	"time"
 
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest"
 	"github.com/docker/distribution/manifest/schema1"
+	"github.com/docker/distribution/manifest/schema2"
+	distreference "github.com/docker/distribution/reference"
+	"github.com/docker/distribution/registry/client"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
 	"github.com/docker/docker/image"
-	"github.com/docker/docker/image/v1"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progress"
@@ -36,82 +35,82 @@
 }
 
 type v2Pusher struct {
-	blobSumService *metadata.BlobSumService
-	ref            reference.Named
-	endpoint       registry.APIEndpoint
-	repoInfo       *registry.RepositoryInfo
-	config         *ImagePushConfig
-	repo           distribution.Repository
+	v2MetadataService *metadata.V2MetadataService
+	ref               reference.Named
+	endpoint          registry.APIEndpoint
+	repoInfo          *registry.RepositoryInfo
+	config            *ImagePushConfig
+	repo              distribution.Repository
 
+	// pushState is state built by the Download functions.
+	pushState pushState
+}
+
+type pushState struct {
+	sync.Mutex
+	// remoteLayers is the set of layers known to exist on the remote side.
+	// This avoids redundant queries when pushing multiple tags that
+	// involve the same layers. It is also used to fill in digest and size
+	// information when building the manifest.
+	remoteLayers map[layer.DiffID]distribution.Descriptor
 	// confirmedV2 is set to true if we confirm we're talking to a v2
 	// registry. This is used to limit fallbacks to the v1 protocol.
 	confirmedV2 bool
-
-	// layersPushed is the set of layers known to exist on the remote side.
-	// This avoids redundant queries when pushing multiple tags that
-	// involve the same layers.
-	layersPushed pushMap
-}
-
-type pushMap struct {
-	sync.Mutex
-	layersPushed map[digest.Digest]bool
 }
 
 func (p *v2Pusher) Push(ctx context.Context) (err error) {
-	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
+	p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
+
+	p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
 	if err != nil {
 		logrus.Debugf("Error getting v2 registry: %v", err)
-		return fallbackError{err: err, confirmedV2: p.confirmedV2}
+		return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
 	}
 
 	if err = p.pushV2Repository(ctx); err != nil {
 		if registry.ContinueOnError(err) {
-			return fallbackError{err: err, confirmedV2: p.confirmedV2}
+			return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
 		}
 	}
 	return err
 }
 
 func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
-	var associations []reference.Association
-	if _, isTagged := p.ref.(reference.NamedTagged); isTagged {
+	if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged {
 		imageID, err := p.config.ReferenceStore.Get(p.ref)
 		if err != nil {
 			return fmt.Errorf("tag does not exist: %s", p.ref.String())
 		}
 
-		associations = []reference.Association{
-			{
-				Ref:     p.ref,
-				ImageID: imageID,
-			},
-		}
-	} else {
-		// Pull all tags
-		associations = p.config.ReferenceStore.ReferencesByName(p.ref)
-	}
-	if err != nil {
-		return fmt.Errorf("error getting tags for %s: %s", p.repoInfo.Name(), err)
-	}
-	if len(associations) == 0 {
-		return fmt.Errorf("no tags to push for %s", p.repoInfo.Name())
+		return p.pushV2Tag(ctx, namedTagged, imageID)
 	}
 
-	for _, association := range associations {
-		if err := p.pushV2Tag(ctx, association); err != nil {
-			return err
+	if !reference.IsNameOnly(p.ref) {
+		return errors.New("cannot push a digest reference")
+	}
+
+	// Pull all tags
+	pushed := 0
+	for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) {
+		if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged {
+			pushed++
+			if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil {
+				return err
+			}
 		}
 	}
 
+	if pushed == 0 {
+		return fmt.Errorf("no tags to push for %s", p.repoInfo.Name())
+	}
+
 	return nil
 }
 
-func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Association) error {
-	ref := association.Ref
+func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error {
 	logrus.Debugf("Pushing repository: %s", ref.String())
 
-	img, err := p.config.ImageStore.Get(association.ImageID)
+	img, err := p.config.ImageStore.Get(imageID)
 	if err != nil {
 		return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
 	}
@@ -132,20 +131,10 @@
 	var descriptors []xfer.UploadDescriptor
 
 	descriptorTemplate := v2PushDescriptor{
-		blobSumService: p.blobSumService,
-		repo:           p.repo,
-		layersPushed:   &p.layersPushed,
-		confirmedV2:    &p.confirmedV2,
-	}
-
-	// Push empty layer if necessary
-	for _, h := range img.History {
-		if h.EmptyLayer {
-			descriptor := descriptorTemplate
-			descriptor.layer = layer.EmptyLayer
-			descriptors = []xfer.UploadDescriptor{&descriptor}
-			break
-		}
+		v2MetadataService: p.v2MetadataService,
+		repoInfo:          p.repoInfo,
+		repo:              p.repo,
+		pushState:         &p.pushState,
 	}
 
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
@@ -157,52 +146,76 @@
 		l = l.Parent()
 	}
 
-	fsLayers, err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput)
-	if err != nil {
+	if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
 		return err
 	}
 
-	var tag string
-	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
-		tag = tagged.Tag()
-	}
-	m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers)
+	// Try schema2 first
+	builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON())
+	manifest, err := manifestFromBuilder(ctx, builder, descriptors)
 	if err != nil {
 		return err
 	}
 
-	logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID())
-	signed, err := schema1.Sign(m, p.config.TrustKey)
-	if err != nil {
-		return err
-	}
-
-	manifestDigest, manifestSize, err := digestFromManifest(signed, ref)
-	if err != nil {
-		return err
-	}
-	if manifestDigest != "" {
-		if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
-			progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize)
-			// Signal digest to the trust client so it can sign the
-			// push, if appropriate.
-			progress.Aux(p.config.ProgressOutput, PushResult{Tag: tagged.Tag(), Digest: manifestDigest, Size: manifestSize})
-		}
-	}
-
 	manSvc, err := p.repo.Manifests(ctx)
 	if err != nil {
 		return err
 	}
-	return manSvc.Put(signed)
+
+	putOptions := []distribution.ManifestServiceOption{client.WithTag(ref.Tag())}
+	if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
+		logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err)
+
+		builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), ref.Tag(), img.RawJSON())
+		manifest, err = manifestFromBuilder(ctx, builder, descriptors)
+		if err != nil {
+			return err
+		}
+
+		if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil {
+			return err
+		}
+	}
+
+	var canonicalManifest []byte
+
+	switch v := manifest.(type) {
+	case *schema1.SignedManifest:
+		canonicalManifest = v.Canonical
+	case *schema2.DeserializedManifest:
+		_, canonicalManifest, err = v.Payload()
+		if err != nil {
+			return err
+		}
+	}
+
+	manifestDigest := digest.FromBytes(canonicalManifest)
+	progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest))
+	// Signal digest to the trust client so it can sign the
+	// push, if appropriate.
+	progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)})
+
+	return nil
+}
+
+func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) {
+	// descriptors is in reverse order; iterate backwards to get references
+	// appended in the right order.
+	for i := len(descriptors) - 1; i >= 0; i-- {
+		if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
+			return nil, err
+		}
+	}
+
+	return builder.Build(ctx)
 }
 
 type v2PushDescriptor struct {
-	layer          layer.Layer
-	blobSumService *metadata.BlobSumService
-	repo           distribution.Repository
-	layersPushed   *pushMap
-	confirmedV2    *bool
+	layer             layer.Layer
+	v2MetadataService *metadata.V2MetadataService
+	repoInfo          reference.Named
+	repo              distribution.Repository
+	pushState         *pushState
 }
 
 func (pd *v2PushDescriptor) Key() string {
@@ -217,39 +230,113 @@
 	return pd.layer.DiffID()
 }
 
-func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
+func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
 	diffID := pd.DiffID()
 
-	logrus.Debugf("Pushing layer: %s", diffID)
+	pd.pushState.Lock()
+	if _, ok := pd.pushState.remoteLayers[diffID]; ok {
+		// it is already known that the push is not needed and
+		// therefore doing a stat is unnecessary
+		pd.pushState.Unlock()
+		progress.Update(progressOutput, pd.ID(), "Layer already exists")
+		return nil
+	}
+	pd.pushState.Unlock()
 
-	// Do we have any blobsums associated with this layer's DiffID?
-	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
+	// Do we have any metadata associated with this layer's DiffID?
+	v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
 	if err == nil {
-		dgst, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.layersPushed)
+		descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
 		if err != nil {
 			progress.Update(progressOutput, pd.ID(), "Image push failed")
-			return "", retryOnError(err)
+			return retryOnError(err)
 		}
 		if exists {
 			progress.Update(progressOutput, pd.ID(), "Layer already exists")
-			return dgst, nil
+			pd.pushState.Lock()
+			pd.pushState.remoteLayers[diffID] = descriptor
+			pd.pushState.Unlock()
+			return nil
 		}
 	}
 
+	logrus.Debugf("Pushing layer: %s", diffID)
+
 	// if digest was empty or not saved, or if blob does not exist on the remote repository,
 	// then push the blob.
 	bs := pd.repo.Blobs(ctx)
 
+	var mountFrom metadata.V2Metadata
+
+	// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
+	for _, metadata := range v2Metadata {
+		sourceRepo, err := reference.ParseNamed(metadata.SourceRepository)
+		if err != nil {
+			continue
+		}
+		if pd.repoInfo.Hostname() == sourceRepo.Hostname() {
+			logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName())
+			mountFrom = metadata
+			break
+		}
+	}
+
+	var createOpts []distribution.BlobCreateOption
+
+	if mountFrom.SourceRepository != "" {
+		namedRef, err := reference.WithName(mountFrom.SourceRepository)
+		if err != nil {
+			return err
+		}
+
+		// TODO (brianbland): We need to construct a reference where the Name is
+		// only the full remote name, so clean this up when distribution has a
+		// richer reference package
+		remoteRef, err := distreference.WithName(namedRef.RemoteName())
+		if err != nil {
+			return err
+		}
+
+		canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
+		if err != nil {
+			return err
+		}
+
+		createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
+	}
+
 	// Send the layer
-	layerUpload, err := bs.Create(ctx)
+	layerUpload, err := bs.Create(ctx, createOpts...)
+	switch err := err.(type) {
+	case distribution.ErrBlobMounted:
+		progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
+
+		pd.pushState.Lock()
+		pd.pushState.confirmedV2 = true
+		pd.pushState.remoteLayers[diffID] = err.Descriptor
+		pd.pushState.Unlock()
+
+		// Cache mapping from this layer's DiffID to the blobsum
+		if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
+			return xfer.DoNotRetry{Err: err}
+		}
+
+		return nil
+	}
+	if mountFrom.SourceRepository != "" {
+		// unable to mount layer from this repository, so this source mapping is no longer valid
+		logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
+		pd.v2MetadataService.Remove(mountFrom)
+	}
+
 	if err != nil {
-		return "", retryOnError(err)
+		return retryOnError(err)
 	}
 	defer layerUpload.Close()
 
 	arch, err := pd.layer.TarStream()
 	if err != nil {
-		return "", xfer.DoNotRetry{Err: err}
+		return xfer.DoNotRetry{Err: err}
 	}
 
 	// don't care if this fails; best effort
@@ -265,183 +352,66 @@
 	nn, err := layerUpload.ReadFrom(tee)
 	compressedReader.Close()
 	if err != nil {
-		return "", retryOnError(err)
+		return retryOnError(err)
 	}
 
 	pushDigest := digester.Digest()
 	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
-		return "", retryOnError(err)
+		return retryOnError(err)
 	}
 
-	// If Commit succeded, that's an indication that the remote registry
-	// speaks the v2 protocol.
-	*pd.confirmedV2 = true
-
 	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
 	progress.Update(progressOutput, pd.ID(), "Pushed")
 
 	// Cache mapping from this layer's DiffID to the blobsum
-	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
-		return "", xfer.DoNotRetry{Err: err}
+	if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
+		return xfer.DoNotRetry{Err: err}
 	}
 
-	pd.layersPushed.Lock()
-	pd.layersPushed.layersPushed[pushDigest] = true
-	pd.layersPushed.Unlock()
+	pd.pushState.Lock()
 
-	return pushDigest, nil
+	// If Commit succeded, that's an indication that the remote registry
+	// speaks the v2 protocol.
+	pd.pushState.confirmedV2 = true
+
+	pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
+		Digest:    pushDigest,
+		MediaType: schema2.MediaTypeLayer,
+		Size:      nn,
+	}
+
+	pd.pushState.Unlock()
+
+	return nil
 }
 
-// blobSumAlreadyExists checks if the registry already know about any of the
-// blobsums passed in the "blobsums" slice. If it finds one that the registry
-// knows about, it returns the known digest and "true".
-func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) {
-	layersPushed.Lock()
-	for _, dgst := range blobsums {
-		if layersPushed.layersPushed[dgst] {
-			// it is already known that the push is not needed and
-			// therefore doing a stat is unnecessary
-			layersPushed.Unlock()
-			return dgst, true, nil
-		}
-	}
-	layersPushed.Unlock()
+func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
+	// Not necessary to lock pushStatus because this is always
+	// called after all the mutation in pushStatus.
+	// By the time this function is called, every layer will have
+	// an entry in remoteLayers.
+	return pd.pushState.remoteLayers[pd.DiffID()]
+}
 
-	for _, dgst := range blobsums {
-		_, err := repo.Blobs(ctx).Stat(ctx, dgst)
+// layerAlreadyExists checks if the registry already know about any of the
+// metadata passed in the "metadata" slice. If it finds one that the registry
+// knows about, it returns the known digest and "true".
+func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
+	for _, meta := range metadata {
+		// Only check blobsums that are known to this repository or have an unknown source
+		if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() {
+			continue
+		}
+		descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest)
 		switch err {
 		case nil:
-			return dgst, true, nil
+			descriptor.MediaType = schema2.MediaTypeLayer
+			return descriptor, true, nil
 		case distribution.ErrBlobUnknown:
 			// nop
 		default:
-			return "", false, err
+			return distribution.Descriptor{}, false, err
 		}
 	}
-	return "", false, nil
-}
-
-// CreateV2Manifest creates a V2 manifest from an image config and set of
-// FSLayer digests.
-// FIXME: This should be moved to the distribution repo, since it will also
-// be useful for converting new manifests to the old format.
-func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]digest.Digest) (*schema1.Manifest, error) {
-	if len(img.History) == 0 {
-		return nil, errors.New("empty history when trying to create V2 manifest")
-	}
-
-	// Generate IDs for each layer
-	// For non-top-level layers, create fake V1Compatibility strings that
-	// fit the format and don't collide with anything else, but don't
-	// result in runnable images on their own.
-	type v1Compatibility struct {
-		ID              string    `json:"id"`
-		Parent          string    `json:"parent,omitempty"`
-		Comment         string    `json:"comment,omitempty"`
-		Created         time.Time `json:"created"`
-		ContainerConfig struct {
-			Cmd []string
-		} `json:"container_config,omitempty"`
-		ThrowAway bool `json:"throwaway,omitempty"`
-	}
-
-	fsLayerList := make([]schema1.FSLayer, len(img.History))
-	history := make([]schema1.History, len(img.History))
-
-	parent := ""
-	layerCounter := 0
-	for i, h := range img.History {
-		if i == len(img.History)-1 {
-			break
-		}
-
-		var diffID layer.DiffID
-		if h.EmptyLayer {
-			diffID = layer.EmptyLayer.DiffID()
-		} else {
-			if len(img.RootFS.DiffIDs) <= layerCounter {
-				return nil, errors.New("too many non-empty layers in History section")
-			}
-			diffID = img.RootFS.DiffIDs[layerCounter]
-			layerCounter++
-		}
-
-		fsLayer, present := fsLayers[diffID]
-		if !present {
-			return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
-		}
-		dgst, err := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent))
-		if err != nil {
-			return nil, err
-		}
-		v1ID := dgst.Hex()
-
-		v1Compatibility := v1Compatibility{
-			ID:      v1ID,
-			Parent:  parent,
-			Comment: h.Comment,
-			Created: h.Created,
-		}
-		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
-		if h.EmptyLayer {
-			v1Compatibility.ThrowAway = true
-		}
-		jsonBytes, err := json.Marshal(&v1Compatibility)
-		if err != nil {
-			return nil, err
-		}
-
-		reversedIndex := len(img.History) - i - 1
-		history[reversedIndex].V1Compatibility = string(jsonBytes)
-		fsLayerList[reversedIndex] = schema1.FSLayer{BlobSum: fsLayer}
-
-		parent = v1ID
-	}
-
-	latestHistory := img.History[len(img.History)-1]
-
-	var diffID layer.DiffID
-	if latestHistory.EmptyLayer {
-		diffID = layer.EmptyLayer.DiffID()
-	} else {
-		if len(img.RootFS.DiffIDs) <= layerCounter {
-			return nil, errors.New("too many non-empty layers in History section")
-		}
-		diffID = img.RootFS.DiffIDs[layerCounter]
-	}
-	fsLayer, present := fsLayers[diffID]
-	if !present {
-		return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
-	}
-
-	dgst, err := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent + " " + string(img.RawJSON())))
-	if err != nil {
-		return nil, err
-	}
-	fsLayerList[0] = schema1.FSLayer{BlobSum: fsLayer}
-
-	// Top-level v1compatibility string should be a modified version of the
-	// image config.
-	transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer)
-	if err != nil {
-		return nil, err
-	}
-
-	history[0].V1Compatibility = string(transformedConfig)
-
-	// windows-only baselayer setup
-	if err := setupBaseLayer(history, *img.RootFS); err != nil {
-		return nil, err
-	}
-
-	return &schema1.Manifest{
-		Versioned: manifest.Versioned{
-			SchemaVersion: 1,
-		},
-		Name:         name,
-		Tag:          tag,
-		Architecture: img.Architecture,
-		FSLayers:     fsLayerList,
-		History:      history,
-	}, nil
+	return distribution.Descriptor{}, false, nil
 }
diff --git a/distribution/push_v2_test.go b/distribution/push_v2_test.go
deleted file mode 100644
index 96a3939..0000000
--- a/distribution/push_v2_test.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package distribution
-
-import (
-	"reflect"
-	"testing"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/layer"
-)
-
-func TestCreateV2Manifest(t *testing.T) {
-	imgJSON := `{
-    "architecture": "amd64",
-    "config": {
-        "AttachStderr": false,
-        "AttachStdin": false,
-        "AttachStdout": false,
-        "Cmd": [
-            "/bin/sh",
-            "-c",
-            "echo hi"
-        ],
-        "Domainname": "",
-        "Entrypoint": null,
-        "Env": [
-            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-            "derived=true",
-            "asdf=true"
-        ],
-        "Hostname": "23304fc829f9",
-        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
-        "Labels": {},
-        "OnBuild": [],
-        "OpenStdin": false,
-        "StdinOnce": false,
-        "Tty": false,
-        "User": "",
-        "Volumes": null,
-        "WorkingDir": ""
-    },
-    "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
-    "container_config": {
-        "AttachStderr": false,
-        "AttachStdin": false,
-        "AttachStdout": false,
-        "Cmd": [
-            "/bin/sh",
-            "-c",
-            "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
-        ],
-        "Domainname": "",
-        "Entrypoint": null,
-        "Env": [
-            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-            "derived=true",
-            "asdf=true"
-        ],
-        "Hostname": "23304fc829f9",
-        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
-        "Labels": {},
-        "OnBuild": [],
-        "OpenStdin": false,
-        "StdinOnce": false,
-        "Tty": false,
-        "User": "",
-        "Volumes": null,
-        "WorkingDir": ""
-    },
-    "created": "2015-11-04T23:06:32.365666163Z",
-    "docker_version": "1.9.0-dev",
-    "history": [
-        {
-            "created": "2015-10-31T22:22:54.690851953Z",
-            "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
-        },
-        {
-            "created": "2015-10-31T22:22:55.613815829Z",
-            "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
-        },
-        {
-            "created": "2015-11-04T23:06:30.934316144Z",
-            "created_by": "/bin/sh -c #(nop) ENV derived=true",
-            "empty_layer": true
-        },
-        {
-            "created": "2015-11-04T23:06:31.192097572Z",
-            "created_by": "/bin/sh -c #(nop) ENV asdf=true",
-            "empty_layer": true
-        },
-        {
-            "created": "2015-11-04T23:06:32.083868454Z",
-            "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
-        },
-        {
-            "created": "2015-11-04T23:06:32.365666163Z",
-            "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
-            "empty_layer": true
-        }
-    ],
-    "os": "linux",
-    "rootfs": {
-        "diff_ids": [
-            "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
-            "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
-            "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
-        ],
-        "type": "layers"
-    }
-}`
-
-	// To fill in rawJSON
-	img, err := image.NewFromJSON([]byte(imgJSON))
-	if err != nil {
-		t.Fatalf("json decoding failed: %v", err)
-	}
-
-	fsLayers := map[layer.DiffID]digest.Digest{
-		layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-		layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
-		layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-	}
-
-	manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
-	if err != nil {
-		t.Fatalf("CreateV2Manifest returned error: %v", err)
-	}
-
-	if manifest.Versioned.SchemaVersion != 1 {
-		t.Fatal("SchemaVersion != 1")
-	}
-	if manifest.Name != "testrepo" {
-		t.Fatal("incorrect name in manifest")
-	}
-	if manifest.Tag != "testtag" {
-		t.Fatal("incorrect tag in manifest")
-	}
-	if manifest.Architecture != "amd64" {
-		t.Fatal("incorrect arch in manifest")
-	}
-
-	expectedFSLayers := []schema1.FSLayer{
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
-		{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
-	}
-
-	if len(manifest.FSLayers) != len(expectedFSLayers) {
-		t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
-	}
-	if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
-		t.Fatal("wrong FSLayers list")
-	}
-
-	expectedV1Compatibility := []string{
-		`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
-		`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
-		`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
-		`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
-		`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
-		`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
-	}
-
-	if len(manifest.History) != len(expectedV1Compatibility) {
-		t.Fatalf("wrong number of history entries: %d", len(manifest.History))
-	}
-	for i := range expectedV1Compatibility {
-		if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
-			t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
-		}
-	}
-}
diff --git a/distribution/push_v2_unix.go b/distribution/push_v2_unix.go
deleted file mode 100644
index 3450856..0000000
--- a/distribution/push_v2_unix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !windows
-
-package distribution
-
-import (
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/docker/image"
-)
-
-func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
-	return nil
-}
diff --git a/distribution/push_v2_windows.go b/distribution/push_v2_windows.go
deleted file mode 100644
index 59ad654..0000000
--- a/distribution/push_v2_windows.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// +build windows
-
-package distribution
-
-import (
-	"encoding/json"
-
-	"github.com/docker/distribution/manifest/schema1"
-	"github.com/docker/docker/image"
-)
-
-func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
-	var v1Config map[string]*json.RawMessage
-	if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
-		return err
-	}
-	baseID, err := json.Marshal(rootFS.BaseLayerID())
-	if err != nil {
-		return err
-	}
-	v1Config["parent"] = (*json.RawMessage)(&baseID)
-	configJSON, err := json.Marshal(v1Config)
-	if err != nil {
-		return err
-	}
-	history[len(history)-1].V1Compatibility = string(configJSON)
-	return nil
-}
diff --git a/distribution/registry.go b/distribution/registry.go
index d39a444..1d4a2c4 100644
--- a/distribution/registry.go
+++ b/distribution/registry.go
@@ -8,16 +8,12 @@
 	"strings"
 	"time"
 
-	"github.com/Sirupsen/logrus"
 	"github.com/docker/distribution"
-	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/registry/api/errcode"
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/distribution/registry/client/auth"
 	"github.com/docker/distribution/registry/client/transport"
 	"github.com/docker/docker/distribution/xfer"
-	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/engine-api/types"
 	"golang.org/x/net/context"
@@ -125,20 +121,6 @@
 	return repo, foundVersion, err
 }
 
-func digestFromManifest(m *schema1.SignedManifest, name reference.Named) (digest.Digest, int, error) {
-	payload, err := m.Payload()
-	if err != nil {
-		// If this failed, the signatures section was corrupted
-		// or missing. Treat the entire manifest as the payload.
-		payload = m.Raw
-	}
-	manifestDigest, err := digest.FromBytes(payload)
-	if err != nil {
-		logrus.Infof("Could not compute manifest digest for %s:%s : %v", name.Name(), m.Tag, err)
-	}
-	return manifestDigest, len(payload), nil
-}
-
 type existingTokenHandler struct {
 	token string
 }
diff --git a/distribution/xfer/download_test.go b/distribution/xfer/download_test.go
index 5d42703..6dc6708 100644
--- a/distribution/xfer/download_test.go
+++ b/distribution/xfer/download_test.go
@@ -65,12 +65,7 @@
 		return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...)
 	}
 	// H = "H(n-1) SHA256(n)"
-	dgst, err := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
-	if err != nil {
-		// Digest calculation is not expected to throw an error,
-		// any error at this point is a program error
-		panic(err)
-	}
+	dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
 	return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...)
 }
 
@@ -92,11 +87,7 @@
 	if err != nil {
 		return nil, err
 	}
-	diffID, err := digest.FromBytes(l.layerData.Bytes())
-	if err != nil {
-		return nil, err
-	}
-	l.diffID = layer.DiffID(diffID)
+	l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes()))
 	l.chainID = createChainIDFromParent(parentID, l.diffID)
 
 	ls.layers[l.chainID] = l
@@ -250,15 +241,11 @@
 
 	progressChan := make(chan progress.Progress)
 	progressDone := make(chan struct{})
-	receivedProgress := make(map[string]int64)
+	receivedProgress := make(map[string]progress.Progress)
 
 	go func() {
 		for p := range progressChan {
-			if p.Action == "Downloading" {
-				receivedProgress[p.ID] = p.Current
-			} else if p.Action == "Already exists" {
-				receivedProgress[p.ID] = -1
-			}
+			receivedProgress[p.ID] = p
 		}
 		close(progressDone)
 	}()
@@ -293,11 +280,11 @@
 		descriptor := d.(*mockDownloadDescriptor)
 
 		if descriptor.diffID != "" {
-			if receivedProgress[d.ID()] != -1 {
-				t.Fatalf("did not get 'already exists' message for %v", d.ID())
+			if receivedProgress[d.ID()].Action != "Already exists" {
+				t.Fatalf("did not get 'Already exists' message for %v", d.ID())
 			}
-		} else if receivedProgress[d.ID()] != 10 {
-			t.Fatalf("missing or wrong progress output for %v (got: %d)", d.ID(), receivedProgress[d.ID()])
+		} else if receivedProgress[d.ID()].Action != "Pull complete" {
+			t.Fatalf("did not get 'Pull complete' message for %v", d.ID())
 		}
 
 		if rootFS.DiffIDs[i] != descriptor.expectedDiffID {
diff --git a/distribution/xfer/upload.go b/distribution/xfer/upload.go
index 9a7d2c1..8da6a89 100644
--- a/distribution/xfer/upload.go
+++ b/distribution/xfer/upload.go
@@ -5,7 +5,6 @@
 	"time"
 
 	"github.com/Sirupsen/logrus"
-	"github.com/docker/distribution/digest"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/progress"
 	"golang.org/x/net/context"
@@ -30,7 +29,6 @@
 	Transfer
 
 	diffID layer.DiffID
-	digest digest.Digest
 	err    error
 }
 
@@ -43,16 +41,15 @@
 	// DiffID should return the DiffID for this layer.
 	DiffID() layer.DiffID
 	// Upload is called to perform the Upload.
-	Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error)
+	Upload(ctx context.Context, progressOutput progress.Output) error
 }
 
 // Upload is a blocking function which ensures the listed layers are present on
 // the remote registry. It uses the string returned by the Key method to
 // deduplicate uploads.
-func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) (map[layer.DiffID]digest.Digest, error) {
+func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
 	var (
 		uploads          []*uploadTransfer
-		digests          = make(map[layer.DiffID]digest.Digest)
 		dedupDescriptors = make(map[string]struct{})
 	)
 
@@ -74,16 +71,15 @@
 	for _, upload := range uploads {
 		select {
 		case <-ctx.Done():
-			return nil, ctx.Err()
+			return ctx.Err()
 		case <-upload.Transfer.Done():
 			if upload.err != nil {
-				return nil, upload.err
+				return upload.err
 			}
-			digests[upload.diffID] = upload.digest
 		}
 	}
 
-	return digests, nil
+	return nil
 }
 
 func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
@@ -109,9 +105,8 @@
 
 			retries := 0
 			for {
-				digest, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
+				err := descriptor.Upload(u.Transfer.Context(), progressOutput)
 				if err == nil {
-					u.digest = digest
 					break
 				}
 
diff --git a/distribution/xfer/upload_test.go b/distribution/xfer/upload_test.go
index df5b2ba..d87dfca 100644
--- a/distribution/xfer/upload_test.go
+++ b/distribution/xfer/upload_test.go
@@ -36,12 +36,12 @@
 }
 
 // Upload is called to perform the upload.
-func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
+func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
 	if u.currentUploads != nil {
 		defer atomic.AddInt32(u.currentUploads, -1)
 
 		if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
-			return "", errors.New("concurrency limit exceeded")
+			return errors.New("concurrency limit exceeded")
 		}
 	}
 
@@ -49,7 +49,7 @@
 	for i := int64(0); i <= 10; i++ {
 		select {
 		case <-ctx.Done():
-			return "", ctx.Err()
+			return ctx.Err()
 		case <-time.After(10 * time.Millisecond):
 			progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
 		}
@@ -57,12 +57,10 @@
 
 	if u.simulateRetries != 0 {
 		u.simulateRetries--
-		return "", errors.New("simulating retry")
+		return errors.New("simulating retry")
 	}
 
-	// For the mock implementation, use SHA256(DiffID) as the returned
-	// digest.
-	return digest.FromBytes([]byte(u.diffID.String()))
+	return nil
 }
 
 func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
@@ -101,26 +99,13 @@
 	var currentUploads int32
 	descriptors := uploadDescriptors(&currentUploads)
 
-	digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
+	err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
 	if err != nil {
 		t.Fatalf("upload error: %v", err)
 	}
 
 	close(progressChan)
 	<-progressDone
-
-	if len(digests) != len(expectedDigests) {
-		t.Fatal("wrong number of keys in digests map")
-	}
-
-	for key, val := range expectedDigests {
-		if digests[key] != val {
-			t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
-		}
-		if receivedProgress[key.String()] != 10 {
-			t.Fatalf("missing or wrong progress output for %v", key)
-		}
-	}
 }
 
 func TestCancelledUpload(t *testing.T) {
@@ -143,7 +128,7 @@
 	}()
 
 	descriptors := uploadDescriptors(nil)
-	_, err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
+	err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
 	if err != context.Canceled {
 		t.Fatal("expected upload to be cancelled")
 	}
diff --git a/docker/common.go b/docker/common.go
index 2509246..893de71 100644
--- a/docker/common.go
+++ b/docker/common.go
@@ -21,7 +21,6 @@
 )
 
 var (
-	daemonFlags *flag.FlagSet
 	commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)}
 
 	dockerCertPath  = os.Getenv("DOCKER_CERT_PATH")
@@ -50,7 +49,7 @@
 	cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file")
 	cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file")
 
-	cmd.Var(opts.NewListOptsRef(&commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to")
+	cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to")
 }
 
 func postParseCommon() {
@@ -67,11 +66,6 @@
 		logrus.SetLevel(logrus.InfoLevel)
 	}
 
-	if commonFlags.Debug {
-		os.Setenv("DEBUG", "1")
-		logrus.SetLevel(logrus.DebugLevel)
-	}
-
 	// Regardless of whether the user sets it to true or false, if they
 	// specify --tlsverify at all then we need to turn on tls
 	// TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well
diff --git a/docker/daemon.go b/docker/daemon.go
index e746090..a842212 100644
--- a/docker/daemon.go
+++ b/docker/daemon.go
@@ -30,23 +30,34 @@
 	"github.com/docker/go-connections/tlsconfig"
 )
 
-const daemonUsage = "       docker daemon [ --help | ... ]\n"
+const (
+	daemonUsage          = "       docker daemon [ --help | ... ]\n"
+	daemonConfigFileFlag = "-config-file"
+)
 
 var (
 	daemonCli cli.Handler = NewDaemonCli()
 )
 
+// DaemonCli represents the daemon CLI.
+type DaemonCli struct {
+	*daemon.Config
+	registryOptions *registry.Options
+	flags           *flag.FlagSet
+}
+
 func presentInHelp(usage string) string { return usage }
 func absentFromHelp(string) string      { return "" }
 
 // NewDaemonCli returns a pre-configured daemon CLI
 func NewDaemonCli() *DaemonCli {
-	daemonFlags = cli.Subcmd("daemon", nil, "Enable daemon mode", true)
+	daemonFlags := cli.Subcmd("daemon", nil, "Enable daemon mode", true)
 
 	// TODO(tiborvass): remove InstallFlags?
 	daemonConfig := new(daemon.Config)
 	daemonConfig.LogConfig.Config = make(map[string]string)
 	daemonConfig.ClusterOpts = make(map[string]string)
+
 	daemonConfig.InstallFlags(daemonFlags, presentInHelp)
 	daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp)
 	registryOptions := new(registry.Options)
@@ -57,6 +68,7 @@
 	return &DaemonCli{
 		Config:          daemonConfig,
 		registryOptions: registryOptions,
+		flags:           daemonFlags,
 	}
 }
 
@@ -101,12 +113,6 @@
 	return nil
 }
 
-// DaemonCli represents the daemon CLI.
-type DaemonCli struct {
-	*daemon.Config
-	registryOptions *registry.Options
-}
-
 func getGlobalFlag() (globalFlag *flag.Flag) {
 	defer func() {
 		if x := recover(); x != nil {
@@ -136,15 +142,27 @@
 		os.Exit(1)
 	} else {
 		// allow new form `docker daemon -D`
-		flag.Merge(daemonFlags, commonFlags.FlagSet)
+		flag.Merge(cli.flags, commonFlags.FlagSet)
 	}
 
-	daemonFlags.ParseFlags(args, true)
+	configFile := cli.flags.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file")
+
+	cli.flags.ParseFlags(args, true)
 	commonFlags.PostParse()
 
 	if commonFlags.TrustKey == "" {
 		commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
 	}
+	cliConfig, err := loadDaemonCliConfig(cli.Config, cli.flags, commonFlags, *configFile)
+	if err != nil {
+		fmt.Fprint(os.Stderr, err)
+		os.Exit(1)
+	}
+	cli.Config = cliConfig
+
+	if cli.Config.Debug {
+		utils.EnableDebug()
+	}
 
 	if utils.ExperimentalBuild() {
 		logrus.Warn("Running experimental build")
@@ -177,19 +195,25 @@
 	}
 
 	serverConfig := &apiserver.Config{
-		AuthZPluginNames: cli.Config.AuthZPlugins,
-		Logging:          true,
-		Version:          dockerversion.Version,
+		AuthorizationPluginNames: cli.Config.AuthorizationPlugins,
+		Logging:                  true,
+		Version:                  dockerversion.Version,
 	}
 	serverConfig = setPlatformServerConfig(serverConfig, cli.Config)
 
 	defaultHost := opts.DefaultHost
-	if commonFlags.TLSOptions != nil {
-		if !commonFlags.TLSOptions.InsecureSkipVerify {
-			// server requires and verifies client's certificate
-			commonFlags.TLSOptions.ClientAuth = tls.RequireAndVerifyClientCert
+	if cli.Config.TLS {
+		tlsOptions := tlsconfig.Options{
+			CAFile:   cli.Config.TLSOptions.CAFile,
+			CertFile: cli.Config.TLSOptions.CertFile,
+			KeyFile:  cli.Config.TLSOptions.KeyFile,
 		}
-		tlsConfig, err := tlsconfig.Server(*commonFlags.TLSOptions)
+
+		if cli.Config.TLSVerify {
+			// server requires and verifies client's certificate
+			tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
+		}
+		tlsConfig, err := tlsconfig.Server(tlsOptions)
 		if err != nil {
 			logrus.Fatal(err)
 		}
@@ -197,22 +221,23 @@
 		defaultHost = opts.DefaultTLSHost
 	}
 
-	if len(commonFlags.Hosts) == 0 {
-		commonFlags.Hosts = make([]string, 1)
+	if len(cli.Config.Hosts) == 0 {
+		cli.Config.Hosts = make([]string, 1)
 	}
-	for i := 0; i < len(commonFlags.Hosts); i++ {
+	for i := 0; i < len(cli.Config.Hosts); i++ {
 		var err error
-		if commonFlags.Hosts[i], err = opts.ParseHost(defaultHost, commonFlags.Hosts[i]); err != nil {
-			logrus.Fatalf("error parsing -H %s : %v", commonFlags.Hosts[i], err)
+		if cli.Config.Hosts[i], err = opts.ParseHost(defaultHost, cli.Config.Hosts[i]); err != nil {
+			logrus.Fatalf("error parsing -H %s : %v", cli.Config.Hosts[i], err)
 		}
-	}
-	for _, protoAddr := range commonFlags.Hosts {
+
+		protoAddr := cli.Config.Hosts[i]
 		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
 		if len(protoAddrParts) != 2 {
 			logrus.Fatalf("bad format %s, expected PROTO://ADDR", protoAddr)
 		}
 		serverConfig.Addrs = append(serverConfig.Addrs, apiserver.Addr{Proto: protoAddrParts[0], Addr: protoAddrParts[1]})
 	}
+
 	api, err := apiserver.New(serverConfig)
 	if err != nil {
 		logrus.Fatal(err)
@@ -245,18 +270,21 @@
 
 	api.InitRouters(d)
 
+	reload := func(config *daemon.Config) {
+		if err := d.Reload(config); err != nil {
+			logrus.Errorf("Error reconfiguring the daemon: %v", err)
+			return
+		}
+		api.Reload(config)
+	}
+
+	setupConfigReloadTrap(*configFile, cli.flags, reload)
+
 	// The serve API routine never exits unless an error occurs
 	// We need to start it as a goroutine and wait on it so
 	// daemon doesn't exit
 	serveAPIWait := make(chan error)
-	go func() {
-		if err := api.ServeAPI(); err != nil {
-			logrus.Errorf("ServeAPI error: %v", err)
-			serveAPIWait <- err
-			return
-		}
-		serveAPIWait <- nil
-	}()
+	go api.Wait(serveAPIWait)
 
 	signal.Trap(func() {
 		api.Close()
@@ -303,3 +331,34 @@
 		logrus.Error("Force shutdown daemon")
 	}
 }
+
+func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commonConfig *cli.CommonFlags, configFile string) (*daemon.Config, error) {
+	config.Debug = commonConfig.Debug
+	config.Hosts = commonConfig.Hosts
+	config.LogLevel = commonConfig.LogLevel
+	config.TLS = commonConfig.TLS
+	config.TLSVerify = commonConfig.TLSVerify
+	config.TLSOptions = daemon.CommonTLSOptions{}
+
+	if commonConfig.TLSOptions != nil {
+		config.TLSOptions.CAFile = commonConfig.TLSOptions.CAFile
+		config.TLSOptions.CertFile = commonConfig.TLSOptions.CertFile
+		config.TLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile
+	}
+
+	if configFile != "" {
+		c, err := daemon.MergeDaemonConfigurations(config, daemonFlags, configFile)
+		if err != nil {
+			if daemonFlags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) {
+				return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err)
+			}
+		}
+		// the merged configuration can be nil if the config file didn't exist.
+		// leave the current configuration as it is if when that happens.
+		if c != nil {
+			config = c
+		}
+	}
+
+	return config, nil
+}
diff --git a/docker/daemon_test.go b/docker/daemon_test.go
new file mode 100644
index 0000000..bc519e7
--- /dev/null
+++ b/docker/daemon_test.go
@@ -0,0 +1,91 @@
+// +build daemon
+
+package main
+
+import (
+	"io/ioutil"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/mflag"
+	"github.com/docker/go-connections/tlsconfig"
+)
+
+func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) {
+	c := &daemon.Config{}
+	common := &cli.CommonFlags{
+		Debug: true,
+	}
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if loadedConfig == nil {
+		t.Fatalf("expected configuration %v, got nil", c)
+	}
+	if !loadedConfig.Debug {
+		t.Fatalf("expected debug to be copied from the common flags, got false")
+	}
+}
+
+func TestLoadDaemonCliConfigWithTLS(t *testing.T) {
+	c := &daemon.Config{}
+	common := &cli.CommonFlags{
+		TLS: true,
+		TLSOptions: &tlsconfig.Options{
+			CAFile: "/tmp/ca.pem",
+		},
+	}
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if loadedConfig == nil {
+		t.Fatalf("expected configuration %v, got nil", c)
+	}
+	if loadedConfig.TLSOptions.CAFile != "/tmp/ca.pem" {
+		t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.TLSOptions.CAFile, loadedConfig)
+	}
+}
+
+func TestLoadDaemonCliConfigWithConflicts(t *testing.T) {
+	c := &daemon.Config{}
+	common := &cli.CommonFlags{}
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"labels": ["l3=foo"]}`))
+	f.Close()
+
+	var labels []string
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	flags.String([]string{daemonConfigFileFlag}, "", "")
+	flags.Var(opts.NewNamedListOptsRef("labels", &labels, opts.ValidateLabel), []string{"-label"}, "")
+
+	flags.Set(daemonConfigFileFlag, configFile)
+	if err := flags.Set("-label", "l1=bar"); err != nil {
+		t.Fatal(err)
+	}
+	if err := flags.Set("-label", "l2=baz"); err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = loadDaemonCliConfig(c, flags, common, configFile)
+	if err == nil {
+		t.Fatalf("expected configuration error, got nil")
+	}
+	if !strings.Contains(err.Error(), "labels") {
+		t.Fatalf("expected labels conflict, got %v", err)
+	}
+}
diff --git a/docker/daemon_unix.go b/docker/daemon_unix.go
index 7754130..eba0bee 100644
--- a/docker/daemon_unix.go
+++ b/docker/daemon_unix.go
@@ -5,15 +5,19 @@
 import (
 	"fmt"
 	"os"
+	"os/signal"
 	"syscall"
 
 	apiserver "github.com/docker/docker/api/server"
 	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/system"
 
 	_ "github.com/docker/docker/daemon/execdriver/native"
 )
 
+const defaultDaemonConfigFile = "/etc/docker/daemon.json"
+
 func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config {
 	serverConfig.SocketGroup = daemonCfg.SocketGroup
 	serverConfig.EnableCors = daemonCfg.EnableCors
@@ -48,3 +52,14 @@
 func getDaemonConfDir() string {
 	return "/etc/docker"
 }
+
+// setupConfigReloadTrap configures the USR2 signal to reload the configuration.
+func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, syscall.SIGHUP)
+	go func() {
+		for range c {
+			daemon.ReloadConfiguration(configFile, flags, reload)
+		}
+	}()
+}
diff --git a/docker/daemon_windows.go b/docker/daemon_windows.go
index a930152..307bbcc 100644
--- a/docker/daemon_windows.go
+++ b/docker/daemon_windows.go
@@ -3,12 +3,19 @@
 package main
 
 import (
+	"fmt"
 	"os"
+	"syscall"
 
+	"github.com/Sirupsen/logrus"
 	apiserver "github.com/docker/docker/api/server"
 	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/system"
 )
 
+var defaultDaemonConfigFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker" + string(os.PathSeparator) + "config" + string(os.PathSeparator) + "daemon.json"
+
 func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config {
 	return serverConfig
 }
@@ -31,3 +38,20 @@
 // notifySystem sends a message to the host when the server is ready to be used
 func notifySystem() {
 }
+
+// setupConfigReloadTrap configures a Win32 event to reload the configuration.
+func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) {
+	go func() {
+		sa := syscall.SecurityAttributes{
+			Length: 0,
+		}
+		ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid())
+		if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 {
+			logrus.Debugf("Config reload - waiting signal at %s", ev)
+			for {
+				syscall.WaitForSingleObject(h, syscall.INFINITE)
+				daemon.ReloadConfiguration(configFile, flags, reload)
+			}
+		}
+	}()
+}
diff --git a/docs/articles/ambassador_pattern_linking.md b/docs/articles/ambassador_pattern_linking.md
index ab09f01..7684f37 100644
--- a/docs/articles/ambassador_pattern_linking.md
+++ b/docs/articles/ambassador_pattern_linking.md
@@ -157,4 +157,4 @@
     	apk add socat && \
     	rm -r /var/cache/
 
-    CMD	env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \& wait/' | sh
+    CMD	env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh
diff --git a/docs/extend/authorization.md b/docs/extend/authorization.md
index 80fa6c9..4879083 100644
--- a/docs/extend/authorization.md
+++ b/docs/extend/authorization.md
@@ -12,7 +12,7 @@
 
 # Create an authorization plugin
 
-Docker’s out-of-the-box authorization model is all or nothing. Any user with
+Docker's out-of-the-box authorization model is all or nothing. Any user with
 permission to access the Docker daemon can run any Docker client command. The
 same is true for callers using Docker's remote API to contact the daemon. If you
 require greater access control, you can create authorization plugins and add
@@ -45,6 +45,9 @@
 Each plugin must reside within directories described under the 
 [Plugin discovery](plugin_api.md#plugin-discovery) section.
 
+**Note**: the abbreviations `AuthZ` and `AuthN` mean authorization and authentication
+respectively.
+
 ## Basic architecture
 
 You are responsible for registering your plugin as part of the Docker daemon
@@ -93,14 +96,14 @@
 ### Setting up Docker daemon
 
 Enable the authorization plugin with a dedicated command line flag in the
-`--authz-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID` value.
-This value can be the plugin’s socket or a path to a specification file.
+`--authorization-plugin=PLUGIN_ID` format. The flag supplies a `PLUGIN_ID`
+value. This value can be the plugin’s socket or a path to a specification file.
 
 ```bash
-$ docker daemon --authz-plugin=plugin1 --authz-plugin=plugin2,...
+$ docker daemon --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
 ```
 
-Docker's authorization subsystem supports multiple `--authz-plugin` parameters.
+Docker's authorization subsystem supports multiple `--authorization-plugin` parameters.
 
 ### Calling authorized command (allow)
 
diff --git a/docs/installation/binaries.md b/docs/installation/binaries.md
index 2f1d55d..b5f56d0 100644
--- a/docs/installation/binaries.md
+++ b/docs/installation/binaries.md
@@ -186,7 +186,7 @@
 
 > **Warning**: 
 > The *docker* group (or the group specified with `-G`) is root-equivalent;
-> see [*Docker Daemon Attack Surface*](../articles/security.md#docker-daemon-attack-surface) details.
+> see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
 
 ## Upgrades
 
diff --git a/docs/installation/centos.md b/docs/installation/centos.md
index e447e21..84a9b79 100644
--- a/docs/installation/centos.md
+++ b/docs/installation/centos.md
@@ -134,7 +134,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/debian.md b/docs/installation/debian.md
index 154650f..2621507 100644
--- a/docs/installation/debian.md
+++ b/docs/installation/debian.md
@@ -17,8 +17,8 @@
  - [*Debian 8.0 Jessie (64-bit)*](#debian-jessie-80-64-bit)
  - [*Debian 7.7 Wheezy (64-bit)*](#debian-wheezy-stable-7-x-64-bit)
 
- >**Note**: If you previously installed Docker using `apt`, make sure you update
- your `apt` sources to the new `apt` repository.
+ >**Note**: If you previously installed Docker using `APT`, make sure you update
+ your `APT` sources to the new `APT` repository.
 
 ## Prerequisites
 
@@ -37,7 +37,7 @@
 
 ### Update your apt repository
 
-Docker's `apt` repository contains Docker 1.7.1 and higher. To set `apt` to use
+Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use
 from the new repository:
 
  1. If you haven't already done so, log into your machine as a user with `sudo` or `root` privileges.
@@ -49,17 +49,22 @@
          $ apt-get purge lxc-docker*
          $ apt-get purge docker.io*
 
- 4. Add the new `gpg` key.
+ 4. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed.
+
+         $ apt-get update
+         $ apt-get install apt-transport-https ca-certificates
+
+ 5. Add the new `GPG` key.
 
          $ apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
 
- 5. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
+ 6. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
 
      If the file doesn't exist, create it.
 
- 6. Remove any existing entries.
+ 7. Remove any existing entries.
 
- 7. Add an entry for your Debian operating system.
+ 8. Add an entry for your Debian operating system.
 
      The possible entries are:
 
@@ -80,23 +85,23 @@
     > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources)
     > for details.
 
- 8. Save and close the file.
+ 9. Save and close the file.
 
- 9. Update the `apt` package index.
+ 10. Update the `APT` package index.
 
          $ apt-get update
 
- 10. Verify that `apt` is pulling from the right repository.
+ 11. Verify that `APT` is pulling from the right repository.
 
          $ apt-cache policy docker-engine
 
-     From now on when you run `apt-get upgrade`, `apt` pulls from the new apt repository.  
+     From now on when you run `apt-get upgrade`, `APT` pulls from the new apt repository.
 
 ## Install Docker
 
-Before installing Docker, make sure you have set your `apt` repository correctly as described in the prerequisites.
+Before installing Docker, make sure you have set your `APT` repository correctly as described in the prerequisites.
 
-1. Update the `apt` package index.
+1. Update the `APT` package index.
 
         $ sudo apt-get update
 
@@ -133,7 +138,7 @@
 
 > **Warning**:
 > The `docker` group (or the group specified with the `-G` flag) is
-> `root`-equivalent; see [*Docker Daemon Attack Surface*](../articles/security.md#docker-daemon-attack-surface) details.
+> `root`-equivalent; see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
 
 **Example:**
 
diff --git a/docs/installation/fedora.md b/docs/installation/fedora.md
index 3e9dd5d..b45a5de 100644
--- a/docs/installation/fedora.md
+++ b/docs/installation/fedora.md
@@ -128,7 +128,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/oracle.md b/docs/installation/oracle.md
index e189558..56c96aa 100644
--- a/docs/installation/oracle.md
+++ b/docs/installation/oracle.md
@@ -99,7 +99,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/rhel.md b/docs/installation/rhel.md
index 6c20f27..b550a37 100644
--- a/docs/installation/rhel.md
+++ b/docs/installation/rhel.md
@@ -126,7 +126,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/ubuntulinux.md b/docs/installation/ubuntulinux.md
index 78731da..b888e33 100644
--- a/docs/installation/ubuntulinux.md
+++ b/docs/installation/ubuntulinux.md
@@ -22,7 +22,7 @@
 of Docker. If you wish to install using Ubuntu-managed packages, consult your
 Ubuntu documentation.
 
->**Note**: Ubuntu Utopic 14.10 and 15.04 exist in Docker's `apt` repository but
+>**Note**: Ubuntu Utopic 14.10 and 15.04 exist in Docker's `APT` repository but
 > are no longer officially supported.
 
 ## Prerequisites
@@ -41,29 +41,34 @@
     $ uname -r
     3.11.0-15-generic
 
->**Note**: If you previously installed Docker using `apt`, make sure you update
-your `apt` sources to the new Docker repository.
+>**Note**: If you previously installed Docker using `APT`, make sure you update
+your `APT` sources to the new Docker repository.
 
 ### Update your apt sources
 
-Docker's `apt` repository contains Docker 1.7.1 and higher. To set `apt` to use
+Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use
 packages from the new repository:
 
 1. If you haven't already done so, log into your Ubuntu instance as a privileged user.
 
 2. Open a terminal window.
 
-3. Add the new `gpg` key.
+3. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed.
+
+         $ apt-get update
+         $ apt-get install apt-transport-https ca-certificates
+
+4. Add the new `GPG` key.
 
         $ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
 
-4. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
+5. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
 
     If the file doesn't exist, create it.
 
-5. Remove any existing entries.
+6. Remove any existing entries.
 
-6. Add an entry for your Ubuntu operating system.
+7. Add an entry for your Ubuntu operating system.
 
     The possible entries are:
 
@@ -84,21 +89,21 @@
     > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources)
     > for details.
 
-7. Save and close the `/etc/apt/sources.list.d/docker.list` file.
+8. Save and close the `/etc/apt/sources.list.d/docker.list` file.
 
-8. Update the `apt` package index.
+9. Update the `APT` package index.
 
         $ apt-get update
 
-9. Purge the old repo if it exists.
+10. Purge the old repo if it exists.
 
         $ apt-get purge lxc-docker
 
-10. Verify that `apt` is pulling from the right repository.
+11. Verify that `APT` is pulling from the right repository.
 
         $ apt-cache policy docker-engine
 
-    From now on when you run `apt-get upgrade`, `apt` pulls from the new repository.  
+    From now on when you run `apt-get upgrade`, `APT` pulls from the new repository.
 
 ### Prerequisites by Ubuntu Version
 
@@ -183,7 +188,7 @@
 
 1. Log into your Ubuntu installation as a user with `sudo` privileges.
 
-2. Update your `apt` package index.
+2. Update your `APT` package index.
 
         $ sudo apt-get update
 
@@ -225,7 +230,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/introduction/understanding-docker.md b/docs/introduction/understanding-docker.md
index be9999d..ec1eed6 100644
--- a/docs/introduction/understanding-docker.md
+++ b/docs/introduction/understanding-docker.md
@@ -278,10 +278,9 @@
 
 ### Container format 
 Docker combines these components into a wrapper we call a container format. The
-default container format is called `libcontainer`. Docker also supports
-traditional Linux containers using [LXC](https://linuxcontainers.org/). In the 
-future, Docker may support other container formats, for example, by integrating with
-BSD Jails or Solaris Zones.
+default container format is called `libcontainer`. In the future, Docker may
+support other container formats, for example, by integrating with BSD Jails
+or Solaris Zones.
 
 ## Next steps
 ### Installing Docker
diff --git a/docs/reference/api/docker_remote_api.md b/docs/reference/api/docker_remote_api.md
index 028c41e..a7d8fdb 100644
--- a/docs/reference/api/docker_remote_api.md
+++ b/docs/reference/api/docker_remote_api.md
@@ -95,6 +95,7 @@
 
 [Docker Remote API v1.22](docker_remote_api_v1.22.md) documentation
 
+* `POST /container/(name)/update` updates the resources of a container.
 * `GET /containers/json` supports filter `isolation` on Windows.
 * `GET /containers/json` now returns the list of networks of containers.
 * `GET /info` Now returns `Architecture` and `OSType` fields, providing information
@@ -112,6 +113,15 @@
 * `GET /networks` now supports filtering by `name`, `id` and `type`.
 * `POST /containers/create` now allows you to set the static IPv4 and/or IPv6 address for the container.
 * `POST /networks/(id)/connect` now allows you to set the static IPv4 and/or IPv6 address for the container.
+* `GET /info` now includes the number of containers running, stopped, and paused.
+* `POST /networks/create` now supports restricting external access to the network by setting the `internal` field.
+* `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network
+* `GET /containers/(id)/json` now returns the `NetworkID` of containers.
+* `POST /networks/create` Now supports an options field in the IPAM config that provides options 
+  for custom IPAM plugins.
+* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any
+  are available.
+* `GET /networks/<network-id>` now returns subnets info for user-defined networks.
 
 ### v1.21 API changes
 
@@ -120,7 +130,7 @@
 * `GET /volumes` lists volumes from all volume drivers.
 * `POST /volumes/create` to create a volume.
 * `GET /volumes/(name)` get low-level information about a volume.
-* `DELETE /volumes/(name)`remove a volume with the specified name.
+* `DELETE /volumes/(name)` remove a volume with the specified name.
 * `VolumeDriver` was moved from `config` to `HostConfig` to make the configuration portable.
 * `GET /images/(name)/json` now returns information about an image's `RepoTags` and `RepoDigests`.
 * The `config` option now accepts the field `StopSignal`, which specifies the signal to use to kill a container.
diff --git a/docs/reference/api/docker_remote_api_v1.22.md b/docs/reference/api/docker_remote_api_v1.22.md
index 9483dd4..d523b95 100644
--- a/docs/reference/api/docker_remote_api_v1.22.md
+++ b/docs/reference/api/docker_remote_api_v1.22.md
@@ -61,6 +61,7 @@
                  "NetworkSettings": {
                          "Networks": {
                                  "bridge": {
+                                          "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
                                           "EndpointID": "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f",
                                           "Gateway": "172.17.0.1",
                                           "IPAddress": "172.17.0.2",
@@ -88,6 +89,7 @@
                  "NetworkSettings": {
                          "Networks": {
                                  "bridge": {
+                                          "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
                                           "EndpointID": "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a",
                                           "Gateway": "172.17.0.1",
                                           "IPAddress": "172.17.0.8",
@@ -116,6 +118,7 @@
                  "NetworkSettings": {
                          "Networks": {
                                  "bridge": {
+                                          "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
                                           "EndpointID": "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d",
                                           "Gateway": "172.17.0.1",
                                           "IPAddress": "172.17.0.6",
@@ -144,6 +147,7 @@
                  "NetworkSettings": {
                          "Networks": {
                                  "bridge": {
+                                          "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
                                           "EndpointID": "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9",
                                           "Gateway": "172.17.0.1",
                                           "IPAddress": "172.17.0.5",
@@ -220,10 +224,13 @@
            },
            "Mounts": [
              {
+               "Name": "fac362...80535",
                "Source": "/data",
                "Destination": "/data",
+               "Driver": "local",
                "Mode": "ro,Z",
-               "RW": false
+               "RW": false,
+               "Propagation": ""
              }
            ],
            "WorkingDir": "",
@@ -542,14 +549,15 @@
 			"MacAddress": "",
 			"Networks": {
 				"bridge": {
-					"EndpointID": "",
-					"Gateway": "",
-					"IPAddress": "",
-					"IPPrefixLen": 0,
+					"NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
+					"EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d",
+					"Gateway": "172.17.0.1",
+					"IPAddress": "172.17.0.2",
+					"IPPrefixLen": 16,
 					"IPv6Gateway": "",
 					"GlobalIPv6Address": "",
 					"GlobalIPv6PrefixLen": 0,
-					"MacAddress": ""
+					"MacAddress": "02:42:ac:12:00:02"
 				}
 			}
 		},
@@ -572,10 +580,13 @@
 		},
 		"Mounts": [
 			{
+				"Name": "fac362...80535",
 				"Source": "/data",
 				"Destination": "/data",
+				"Driver": "local",
 				"Mode": "ro,Z",
-				"RW": false
+				"RW": false,
+				"Propagation": ""
 			}
 		]
 	}
@@ -1023,7 +1034,7 @@
        Content-Type: application/json
 
        {
-           "HostConfig": {
+           "UpdateConfig": {
                "Resources": {
                    "BlkioWeight": 300,
                    "CpuShares": 512,
@@ -2081,6 +2092,9 @@
     {
         "Architecture": "x86_64",
         "Containers": 11,
+        "ContainersRunning": 7,
+        "ContainersStopped": 3,
+        "ContainersPaused": 1,
         "CpuCfsPeriod": true,
         "CpuCfsQuota": true,
         "Debug": false,
@@ -2678,14 +2692,15 @@
             "MacAddress": "",
             "Networks": {
                 "bridge": {
-                    "EndpointID": "",
-                    "Gateway": "",
-                    "IPAddress": "",
-                    "IPPrefixLen": 0,
+                    "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
+                    "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d",
+                    "Gateway": "172.17.0.1",
+                    "IPAddress": "172.17.0.2",
+                    "IPPrefixLen": 16,
                     "IPv6Gateway": "",
                     "GlobalIPv6Address": "",
                     "GlobalIPv6PrefixLen": 0,
-                    "MacAddress": ""
+                    "MacAddress": "02:42:ac:12:00:02"
                 }
             }
         },
@@ -2922,7 +2937,7 @@
 
 **Example request**:
 
-    GET /networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1
+    GET /networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1
 
 **Example response**:
 
@@ -2931,24 +2946,28 @@
 Content-Type: application/json
 
 {
-  "Name": "bridge",
-  "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566",
+  "Name": "net01",
+  "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99",
   "Scope": "local",
   "Driver": "bridge",
   "IPAM": {
     "Driver": "default",
     "Config": [
       {
-        "Subnet": "172.17.0.0/16"
+        "Subnet": "172.19.0.0/16",
+        "Gateway": "172.19.0.1/16"
       }
-    ]
+    ],
+    "Options": {
+        "foo": "bar"
+    }
   },
   "Containers": {
-    "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": {
-      "Name": "mad_mclean",
-      "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda",
-      "MacAddress": "02:42:ac:11:00:02",
-      "IPv4Address": "172.17.0.2/16",
+    "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": {
+      "Name": "test",
+      "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a",
+      "MacAddress": "02:42:ac:13:00:02",
+      "IPv4Address": "172.19.0.2/16",
       "IPv6Address": ""
     }
   },
@@ -2982,13 +3001,18 @@
 
 {
   "Name":"isolated_nw",
-  "Driver":"bridge"
+  "Driver":"bridge",
   "IPAM":{
     "Config":[{
       "Subnet":"172.20.0.0/16",
       "IPRange":"172.20.10.0/24",
       "Gateway":"172.20.10.11"
-    }]
+    }],
+    "Options": {
+        "foo": "bar"
+    }
+  },
+  "Internal":true
 }
 ```
 
@@ -3032,7 +3056,7 @@
 
 {
   "Container":"3613f73ba0e4",
-  "endpoint_config": {
+  "EndpointConfig": {
     "test_nw": {
         "IPv4Address":"172.24.56.89",
         "IPv6Address":"2001:db8::5689"
@@ -3068,7 +3092,8 @@
 Content-Type: application/json
 
 {
-  "Container":"3613f73ba0e4"
+  "Container":"3613f73ba0e4",
+  "Force":false
 }
 ```
 
@@ -3085,6 +3110,7 @@
 JSON Parameters:
 
 - **Container** - container-id/name to be disconnected from a network
+- **Force** - Force the container to disconnect from a network
 
 ### Remove a network
 
diff --git a/docs/reference/api/remote_api_client_libraries.md b/docs/reference/api/remote_api_client_libraries.md
index d8246c3..c86e697 100644
--- a/docs/reference/api/remote_api_client_libraries.md
+++ b/docs/reference/api/remote_api_client_libraries.md
@@ -57,6 +57,12 @@
     </tr>
     <tr>
       <td>Go</td>
+      <td>engine-api</td>
+      <td><a class="reference external" href="https://github.com/docker/engine-api">https://github.com/docker/engine-api</a></td>
+      <td>Active</td>
+    </tr>
+    <tr>
+      <td>Go</td>
       <td>go-dockerclient</td>
       <td><a class="reference external" href="https://github.com/fsouza/go-dockerclient">https://github.com/fsouza/go-dockerclient</a></td>
       <td>Active</td>
diff --git a/docs/reference/builder.md b/docs/reference/builder.md
index 5663e83..355c5e3 100644
--- a/docs/reference/builder.md
+++ b/docs/reference/builder.md
@@ -93,7 +93,7 @@
     Step 2 : RUN apk update &&      apk add socat &&        rm -r /var/cache/
      ---> Using cache
      ---> 21ed6e7fbb73
-    Step 3 : CMD env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \& wait/' | sh
+    Step 3 : CMD env | grep _TCP= | (sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat -t 100000000 TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' && echo wait) | sh
      ---> Using cache
      ---> 7ea8aef582cc
     Successfully built 7ea8aef582cc
diff --git a/docs/reference/commandline/cli.md b/docs/reference/commandline/cli.md
index e3773f7..26d2469 100644
--- a/docs/reference/commandline/cli.md
+++ b/docs/reference/commandline/cli.md
@@ -107,8 +107,8 @@
 Once attached to a container, users detach from it and leave it running using
 the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable
 using the `detachKeys` property. Specify a `<sequence>` value for the
-property. The format of the `<sequence>` is either a letter [a-Z], or the `ctrl-`
-combined with any of the following:
+property. The format of the `<sequence>` is a comma-separated list of either 
+a letter [a-Z], or the `ctrl-` combined with any of the following:
 
 * `a-z` (a single lowercase alpha character )
 * `@` (ampersand)
diff --git a/docs/reference/commandline/create.md b/docs/reference/commandline/create.md
index 0a5e247..ad23995 100644
--- a/docs/reference/commandline/create.md
+++ b/docs/reference/commandline/create.md
@@ -46,6 +46,8 @@
       -h, --hostname=""             Container host name
       --help                        Print usage
       -i, --interactive             Keep STDIN open even if not attached
+      --ip=""                       Container IPv4 address (e.g. 172.30.100.104)
+      --ip6=""                      Container IPv6 address (e.g. 2001:db8::33)
       --ipc=""                      IPC namespace to use
       --isolation=""                Container isolation technology
       --kernel-memory=""            Kernel memory limit
@@ -66,6 +68,7 @@
                                     'container:<name|id>': reuse another container's network stack
                                     'host': use the Docker host network stack
                                     '<network-name>|<network-id>': connect to a user-defined network
+      --net-alias=[]                Add network-scoped alias for the container
       --oom-kill-disable            Whether to disable OOM Killer for the container or not
       --oom-score-adj=0             Tune the host's OOM preferences for containers (accepts -1000 to 1000)
       -P, --publish-all             Publish all exposed ports to random ports
diff --git a/docs/reference/commandline/daemon.md b/docs/reference/commandline/daemon.md
index a460626..856d913 100644
--- a/docs/reference/commandline/daemon.md
+++ b/docs/reference/commandline/daemon.md
@@ -17,7 +17,7 @@
 
     Options:
       --api-cors-header=""                   Set CORS headers in the remote API
-      --authz-plugin=[]                      Set authorization plugins to load
+      --authorization-plugin=[]              Set authorization plugins to load
       -b, --bridge=""                        Attach containers to a network bridge
       --bip=""                               Specify network bridge IP
       --cgroup-parent=                       Set parent cgroup for all containers
@@ -27,6 +27,7 @@
       --cluster-store=""                     URL of the distributed storage backend
       --cluster-advertise=""                 Address of the daemon instance on the cluster
       --cluster-store-opt=map[]              Set cluster options
+      --config-file=/etc/docker/daemon.json  Daemon configuration file
       --dns=[]                               DNS server to use
       --dns-opt=[]                           DNS options to use
       --dns-search=[]                        DNS search domains to use
@@ -62,6 +63,7 @@
       --tlscert="~/.docker/cert.pem"         Path to TLS certificate file
       --tlskey="~/.docker/key.pem"           Path to TLS key file
       --tlsverify                            Use TLS and verify the remote
+      --userns-remap="default"               Enable user namespace remapping
       --userland-proxy=true                  Use userland proxy for loopback traffic
 
 Options with [] may be specified multiple times.
@@ -212,11 +214,23 @@
 *  `dm.basesize`
 
     Specifies the size to use when creating the base device, which limits the
-    size of images and containers. The default value is 100G. Note, thin devices
-    are inherently "sparse", so a 100G device which is mostly empty doesn't use
-    100 GB of space on the pool. However, the filesystem will use more space for
+    size of images and containers. The default value is 10G. Note, thin devices
+    are inherently "sparse", so a 10G device which is mostly empty doesn't use
+    10 GB of space on the pool. However, the filesystem will use more space for
     the empty case the larger the device is.
 
+    The base device size can be increased at daemon restart which will allow
+    all future images and containers (based on those new images) to be of the 
+    new base device size.
+
+    Example use: 
+
+        $ docker daemon --storage-opt dm.basesize=50G
+
+    This will increase the base device size to 50G. The Docker daemon will throw an 
+    error if existing base device size is larger than 50G. A user can use 
+    this option to expand the base device size however shrinking is not permitted.
+
     This value affects the system-wide "base" empty filesystem
     that may already be initialized and inherited by pulled images. Typically,
     a change to this value requires additional steps to take effect:
@@ -612,10 +626,10 @@
 Docker's access authorization can be extended by authorization plugins that your
 organization can purchase or build themselves. You can install one or more
 authorization plugins when you start the Docker `daemon` using the
-`--authz-plugin=PLUGIN_ID` option.
+`--authorization-plugin=PLUGIN_ID` option.
 
 ```bash
-docker daemon --authz-plugin=plugin1 --authz-plugin=plugin2,...
+docker daemon --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
 ```
 
 The `PLUGIN_ID` value is either the plugin's name or a path to its specification
@@ -632,6 +646,133 @@
 plugin](../../extend/authorization.md) section in the Docker extend section of this documentation.
 
 
+## Daemon user namespace options
+
+The Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling
+a process, and therefore a container, to have a unique range of user and
+group IDs which are outside the traditional user and group range utilized by
+the host system. Potentially the most important security improvement is that,
+by default, container processes running as the `root` user will have expected
+administrative privilege (with some restrictions) inside the container but will
+effectively be mapped to an unprivileged `uid` on the host.
+
+When user namespace support is enabled, Docker creates a single daemon-wide mapping
+for all containers running on the same engine instance. The mappings will
+utilize the existing subordinate user and group ID feature available on all modern
+Linux distributions.
+The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and
+[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be
+read for the user, and optional group, specified to the `--userns-remap`
+parameter.  If you do not wish to specify your own user and/or group, you can
+provide `default` as the value to this flag, and a user will be created on your behalf
+and provided subordinate uid and gid ranges. This default user will be named
+`dockremap`, and entries will be created for it in `/etc/passwd` and
+`/etc/group` using your distro's standard user and group creation tools.
+
+> **Note**: The single mapping per-daemon restriction is in place for now
+> because Docker shares image layers from its local cache across all
+> containers running on the engine instance.  Since file ownership must be
+> the same for all containers sharing the same layer content, the decision
+> was made to map the file ownership on `docker pull` to the daemon's user and
+> group mappings so that there is no delay for running containers once the
+> content is downloaded. This design preserves the same performance for `docker
+> pull`, `docker push`, and container startup as users expect with
+> user namespaces disabled.
+
+### Starting the daemon with user namespaces enabled
+
+To enable user namespace support, start the daemon with the
+`--userns-remap` flag, which accepts values in the following formats:
+
+ - uid
+ - uid:gid
+ - username
+ - username:groupname
+
+If numeric IDs are provided, translation back to valid user or group names
+will occur so that the subordinate uid and gid information can be read, given
+these resources are name-based, not id-based.  If the numeric ID information
+provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon
+startup will fail with an error message.
+
+*Example: starting with default Docker user management:*
+
+```
+     $ docker daemon --userns-remap=default
+```    
+When `default` is provided, Docker will create - or find the existing - user and group
+named `dockremap`. If the user is created, and the Linux distribution has
+appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated
+with a contiguous 65536 length range of subordinate user and group IDs, starting
+at an offset based on prior entries in those files.  For example, Ubuntu will
+create the following range, based on an existing user named `user1` already owning
+the first 65536 range:
+
+```
+     $ cat /etc/subuid
+     user1:100000:65536
+     dockremap:165536:65536
+```
+
+> **Note:** On a fresh Fedora install, we had to `touch` the
+> `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users
+> were created.  Once these files existed, range assignment on user creation
+> worked properly.
+
+If you have a preferred/self-managed user with subordinate ID mappings already
+configured, you can provide that username or uid to the `--userns-remap` flag.
+If you have a group that doesn't match the username, you may provide the `gid`
+or group name as well; otherwise the username will be used as the group name
+when querying the system for the subordinate group ID range.
+
+### Detailed information on `subuid`/`subgid` ranges
+
+Given potential advanced use of the subordinate ID ranges by power users, the 
+following paragraphs define how the Docker daemon currently uses the range entries
+found within the subordinate range files.
+
+The simplest case is that only one contiguous range is defined for the
+provided user or group. In this case, Docker will use that entire contiguous
+range for the mapping of host uids and gids to the container process.  This
+means that the first ID in the range will be the remapped root user, and the
+IDs above that initial ID will map host ID 1 through the end of the range.
+
+From the example `/etc/subid` content shown above, the remapped root
+user would be uid 165536.
+
+If the system administrator has set up multiple ranges for a single user or
+group, the Docker daemon will read all the available ranges and use the
+following algorithm to create the mapping ranges:
+
+1. The range segments found for the particular user will be sorted by *start ID* ascending.
+2. Map segments will be created from each range in increasing value with a length matching the length of each segment. Therefore the range segment with the lowest numeric starting value will be equal to the remapped root, and continue up through host uid/gid equal to the range segment length. As an example, if the lowest segment starts at ID 1000 and has a length of 100, then a map of 1000 -> 0 (the remapped root) up through 1100 -> 100 will be created from this segment. If the next segment starts at ID 10000, then the next map will start with mapping 10000 -> 101 up to the length of this second segment. This will continue until no more segments are found in the subordinate files for this user.
+3. If more than five range segments exist for a single user, only the first five will be utilized, matching the kernel's limitation of only five entries in `/proc/self/uid_map` and `proc/self/gid_map`.
+
+### User namespace known restrictions
+
+The following standard Docker features are currently incompatible when
+running a Docker daemon with user namespaces enabled:
+
+ - sharing PID or NET namespaces with the host (`--pid=host` or `--net=host`)
+ - sharing a network namespace with an existing container (`--net=container:*other*`)
+ - sharing an IPC namespace with an existing container (`--ipc=container:*other*`)
+ - A `--readonly` container filesystem (this is a Linux kernel restriction against remounting with modified flags of a currently mounted filesystem when inside a user namespace)
+ - external (volume or graph) drivers which are unaware/incapable of using daemon user mappings
+ - Using `--privileged` mode flag on `docker run`
+
+In general, user namespaces are an advanced feature and will require
+coordination with other capabilities. For example, if volumes are mounted from
+the host, file ownership will have to be pre-arranged if the user or
+administrator wishes the containers to have expected access to the volume
+contents.
+
+Finally, while the `root` user inside a user namespaced container process has
+many of the expected admin privileges that go along with being the superuser, the
+Linux kernel has restrictions based on internal knowledge that this is a user namespaced
+process. The most notable restriction that we are aware of at this time is the
+inability to use `mknod`. Permission will be denied for device creation even as
+container `root` inside a user namespace.
+
 ## Miscellaneous options
 
 IP masquerading uses address translation to allow containers without a public
@@ -648,7 +789,7 @@
     /usr/local/bin/docker daemon -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1
 
 
-# Default cgroup parent
+## Default cgroup parent
 
 The `--cgroup-parent` option allows you to set the default cgroup parent
 to use for containers. If this option is not set, it defaults to `/docker` for
@@ -666,3 +807,79 @@
 This setting can also be set per container, using the `--cgroup-parent`
 option on `docker create` and `docker run`, and takes precedence over
 the `--cgroup-parent` option on the daemon.
+
+## Daemon configuration file
+
+The `--config-file` option allows you to set any configuration option
+for the daemon in a JSON format. This file uses the same flag names as keys,
+except for flags that allow several entries, where it uses the plural
+of the flag name, e.g., `labels` for the `label` flag. By default,
+docker tries to load a configuration file from `/etc/docker/daemon.json`
+on Linux and `%programdata%\docker\config\daemon.json` on Windows.
+
+The options set in the configuration file must not conflict with options set
+via flags. The docker daemon fails to start if an option is duplicated between
+the file and the flags, regardless their value. We do this to avoid
+silently ignore changes introduced in configuration reloads.
+For example, the daemon fails to start if you set daemon labels
+in the configuration file and also set daemon labels via the `--label` flag.
+
+Options that are not present in the file are ignored when the daemon starts.
+This is a full example of the allowed configuration options in the file:
+
+```json
+{
+	"authorization-plugins": [],
+	"dns": [],
+	"dns-opts": [],
+	"dns-search": [],
+	"exec-opts": [],
+	"exec-root": "",
+	"storage-driver": "",
+	"storage-opts": "",
+	"labels": [],
+	"log-config": {
+		"log-driver": "",
+		"log-opts": []
+	},
+	"mtu": 0,
+	"pidfile": "",
+	"graph": "",
+	"cluster-store": "",
+	"cluster-store-opts": [],
+	"cluster-advertise": "",
+	"debug": true,
+	"hosts": [],
+	"log-level": "",
+	"tls": true,
+	"tls-verify": true,
+	"tls-opts": {
+		"tlscacert": "",
+		"tlscert": "",
+		"tlskey": ""
+	},
+	"api-cors-headers": "",
+	"selinux-enabled": false,
+	"userns-remap": "",
+	"group": "",
+	"cgroup-parent": "",
+	"default-ulimits": {}
+}
+```
+
+### Configuration reloading
+
+Some options can be reconfigured when the daemon is running without requiring
+to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event
+in Windows with the key `Global\docker-daemon-config-$PID`. The options can
+be modified in the configuration file but still will check for conflicts with
+the provided flags. The daemon fails to reconfigure itself
+if there are conflicts, but it won't stop execution.
+
+The list of currently supported options that can be reconfigured is this:
+
+- `debug`: it changes the daemon to debug mode when set to true.
+- `label`: it replaces the daemon labels with a new set of labels.
+- `cluster-store`: it reloads the discovery store with the new address.
+- `cluster-store-opts`: it uses the new options to reload the discovery store.
+- `cluster-advertise`: it modifies the address advertised after reloading.
diff --git a/docs/reference/commandline/info.md b/docs/reference/commandline/info.md
index c776ced..51ae269 100644
--- a/docs/reference/commandline/info.md
+++ b/docs/reference/commandline/info.md
@@ -21,6 +21,9 @@
 
     $ docker -D info
     Containers: 14
+     Running: 3
+     Paused: 1
+     Stopped: 10
     Images: 52
     Server Version: 1.9.0
     Storage Driver: aufs
diff --git a/docs/reference/commandline/login.md b/docs/reference/commandline/login.md
index b79c18b..faf3615 100644
--- a/docs/reference/commandline/login.md
+++ b/docs/reference/commandline/login.md
@@ -30,7 +30,7 @@
 `docker login` requires user to use `sudo` or be `root`, except when: 
 
 1.  connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`.
-2.  user is added to the `docker` group.  This will impact the security of your system; the `docker` group is `root` equivalent.  See [Docker Daemon Attack Surface](https://docs.docker.com/articles/security/#docker-daemon-attack-surface) for details. 
+2.  user is added to the `docker` group.  This will impact the security of your system; the `docker` group is `root` equivalent.  See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. 
 
 You can log into any public or private repository for which you have
 credentials.  When you log in, the command stores encoded credentials in
diff --git a/docs/reference/commandline/network_connect.md b/docs/reference/commandline/network_connect.md
index dbecda6..b08dec3 100644
--- a/docs/reference/commandline/network_connect.md
+++ b/docs/reference/commandline/network_connect.md
@@ -14,9 +14,13 @@
 
     Connects a container to a network
 
+      --alias=[]         Add network-scoped alias for the container
       --help             Print usage
+      --ip               IPv4 Address
+      --ip6              IPv6 Address
+      --link=[]          Add a link to another container
 
-Connects a running container to a network. You can connect a container by name
+Connects a container to a network. You can connect a container by name
 or by ID. Once connected, the container can communicate with other containers in
 the same network.
 
@@ -33,15 +37,39 @@
 You can specify the IP address you want to be assigned to the container's interface.
 
 ```bash
-$ docker network connect multi-host-network --ip 10.10.36.122 container2
+$ docker network connect --ip 10.10.36.122 multi-host-network container2
+```
+
+You can use `--link` option to link another container with a prefered alias
+
+```bash
+$ docker network connect --link container1:c1 multi-host-network container2
+```
+
+`--alias` option can be used to resolve the container by another name in the network
+being connected to.
+
+```bash
+$ docker network connect --alias db --alias mysql multi-host-network container2
 ```
 
 You can pause, restart, and stop containers that are connected to a network.
-Paused containers remain connected and a revealed by a `network inspect`. When
-the container is stopped, it does not appear on the network until you restart
-it. The container's IP address is not guaranteed to remain the same when a
-stopped container rejoins the network, unless you specified one when you run
-`docker network connect` command.
+Paused containers remain connected and can be revealed by a `network inspect`.
+When the container is stopped, it does not appear on the network until you restart
+it. If specified, the container's IP address(es) will be reapplied (if still available)
+when a stopped container rejoins the network. One way to guarantee that the container
+will be assigned the same IP addresses when it rejoins the network after a stop
+or a disconnect, is to specify the `--ip-range` when creating the network, and choose
+the static IP address(es) from outside the range. This will ensure that the IP address
+will not be given to other dynamic containers while this container is not on the network.
+
+```bash
+$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network
+```
+
+```bash
+$ docker network connect --ip 172.20.128.2 multi-host-network container2
+```
 
 To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network.
 
@@ -60,3 +88,4 @@
 * [network ls](network_ls.md)
 * [network rm](network_rm.md)
 * [Understand Docker container networks](../../userguide/networking/dockernetworks.md)
+* [Work with networks](../../userguide/networking/work-with-networks.md)
diff --git a/docs/reference/commandline/network_create.md b/docs/reference/commandline/network_create.md
index 0a9ac6f..a1bfdf5 100644
--- a/docs/reference/commandline/network_create.md
+++ b/docs/reference/commandline/network_create.md
@@ -18,9 +18,11 @@
     -d --driver=DRIVER       Driver to manage the Network bridge or overlay. The default is bridge.
     --gateway=[]             ipv4 or ipv6 Gateway for the master subnet
     --help                   Print usage
+    --internal               Restricts external access to the network
     --ip-range=[]            Allocate container ip from a sub-range
     --ipam-driver=default    IP Address Management Driver
     -o --opt=map[]           Set custom network plugin options
+    --ipam-opt=map[]         Set custom IPAM plugin options
     --subnet=[]              Subnet in CIDR format that represents a network segment
 
 Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the
@@ -120,6 +122,11 @@
 ```
 Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error.
 
+### Network internal mode
+
+By default, when you connect a container to an `overlay` network, Docker also connects a bridge network to it to provide external connectivity.
+If you want to create an externally isolated `overlay` network, you can specify the `--internal` option.
+
 ## Related information
 
 * [network inspect](network_inspect.md)
diff --git a/docs/reference/commandline/network_disconnect.md b/docs/reference/commandline/network_disconnect.md
index 7fb7a7c..10c4f16 100644
--- a/docs/reference/commandline/network_disconnect.md
+++ b/docs/reference/commandline/network_disconnect.md
@@ -12,8 +12,10 @@
 
     Usage:  docker network disconnect [OPTIONS] NETWORK CONTAINER
 
+
     Disconnects a container from a network
 
+      -f, --force        Force the container to disconnect from a network
       --help             Print usage
 
 Disconnects a container from a network. The container must be running to disconnect it from the network.
diff --git a/docs/reference/commandline/network_inspect.md b/docs/reference/commandline/network_inspect.md
index 00b886d..9e0d87f 100644
--- a/docs/reference/commandline/network_inspect.md
+++ b/docs/reference/commandline/network_inspect.md
@@ -17,7 +17,7 @@
       -f, --format=       Format the output using the given go template.
       --help             Print usage
 
-Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to a network:
+Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network:
 
 ```bash
 $ sudo docker run -itd --name=container1 busybox
@@ -78,6 +78,32 @@
 ]
 ```
 
+Returns the information about the user-defined network:
+
+```bash
+$ docker network create simple-network
+69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
+$ docker network inspect simple-network
+[
+    {
+        "Name": "simple-network",
+        "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
+        "Scope": "local",
+        "Driver": "bridge",
+        "IPAM": {
+            "Driver": "default",
+            "Config": [
+                {
+                    "Subnet": "172.22.0.0/16",
+                    "Gateway": "172.22.0.1/16"
+                }
+            ]
+        },
+        "Containers": {},
+        "Options": {}
+    }
+]
+```
 
 ## Related information
 
diff --git a/docs/reference/commandline/rmi.md b/docs/reference/commandline/rmi.md
index 69e8ef8..022a415 100644
--- a/docs/reference/commandline/rmi.md
+++ b/docs/reference/commandline/rmi.md
@@ -19,8 +19,9 @@
       --no-prune           Do not delete untagged parents
 
 You can remove an image using its short or long ID, its tag, or its digest. If
-an image has one or more tag or digest reference, you must remove all of them
-before the image is removed.
+an image has one or more tag referencing it, you must remove all of them before
+the image is removed. Digest references are removed automatically when an image
+is removed by tag.
 
     $ docker images
     REPOSITORY                TAG                 IMAGE ID            CREATED             SIZE
diff --git a/docs/reference/commandline/run.md b/docs/reference/commandline/run.md
index 78643e5..a36bbfb 100644
--- a/docs/reference/commandline/run.md
+++ b/docs/reference/commandline/run.md
@@ -46,6 +46,8 @@
       -h, --hostname=""             Container host name
       --help                        Print usage
       -i, --interactive             Keep STDIN open even if not attached
+      --ip=""                       Container IPv4 address (e.g. 172.30.100.104)
+      --ip6=""                      Container IPv6 address (e.g. 2001:db8::33)
       --ipc=""                      IPC namespace to use
       --isolation=""                Container isolation technology
       --kernel-memory=""            Kernel memory limit
@@ -56,8 +58,6 @@
       --log-opt=[]                  Log driver specific options
       -m, --memory=""               Memory limit
       --mac-address=""              Container MAC address (e.g. 92:d0:c6:0a:29:33)
-      --ip=""                       Container IPv4 address (e.g. 172.30.100.104)
-      --ip6=""                      Container IPv6 address (e.g. 2001:db8::33)
       --memory-reservation=""       Memory soft limit
       --memory-swap=""              A positive integer equal to memory plus swap. Specify -1 to enable unlimited swap.
       --memory-swappiness=""        Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
@@ -68,6 +68,7 @@
                                     'container:<name|id>': reuse another container's network stack
                                     'host': use the Docker host network stack
                                     '<network-name>|<network-id>': connect to a user-defined network
+      --net-alias=[]                Add network-scoped alias for the container
       --oom-kill-disable            Whether to disable OOM Killer for the container or not
       --oom-score-adj=0             Tune the host's OOM preferences for containers (accepts -1000 to 1000)
       -P, --publish-all             Publish all exposed ports to random ports
@@ -330,6 +331,13 @@
 $ docker run -itd --net=my-multihost-network busybox
 ```
 
+You can also choose the IP addresses for the container with `--ip` and `--ip6`
+flags when you start the container on a user-defined network.
+
+```bash
+$ docker run -itd --net=my-multihost-network --ip=10.10.9.75 busybox
+```
+
 If you want to add a running container to a network use the `docker network connect` subcommand.
 
 You can connect multiple containers to the same network. Once connected, the
diff --git a/docs/reference/logging/overview.md b/docs/reference/logging/overview.md
index 8d91b0d..4ef937b 100644
--- a/docs/reference/logging/overview.md
+++ b/docs/reference/logging/overview.md
@@ -69,9 +69,13 @@
 
 The following logging options are supported for the `syslog` logging driver:
 
-    --log-opt syslog-address=[tcp|udp]://host:port
+    --log-opt syslog-address=[tcp|udp|tcp+tls]://host:port
     --log-opt syslog-address=unix://path
     --log-opt syslog-facility=daemon
+    --log-opt syslog-tls-ca-cert=/etc/ca-certificates/custom/ca.pem
+    --log-opt syslog-tls-cert=/etc/ca-certificates/custom/cert.pem
+    --log-opt syslog-tls-key=/etc/ca-certificates/custom/key.pem
+    --log-opt syslog-tls-skip-verify=true
     --log-opt tag="mailer"
 
 `syslog-address` specifies the remote syslog server address where the driver connects to.
@@ -107,6 +111,19 @@
 * `local6`
 * `local7`
 
+`syslog-tls-ca-cert` specifies the absolute path to the trust certificates
+signed by the CA. This option is ignored if the address protocol is not `tcp+tls`.
+
+`syslog-tls-cert` specifies the absolute path to the TLS certificate file.
+This option is ignored if the address protocol is not `tcp+tls`.
+
+`syslog-tls-key` specifies the absolute path to the TLS key file.
+This option is ignored if the address protocol is not `tcp+tls`.
+
+`syslog-tls-skip-verify` configures the TLS verification.
+This verification is enabled by default, but it can be overriden by setting
+this option to `true`. This option is ignored if the address protocol is not `tcp+tls`.
+
 By default, Docker uses the first 12 characters of the container ID to tag log messages.
 Refer to the [log tag option documentation](log_tags.md) for customizing
 the log tag format.
diff --git a/docs/reference/run.md b/docs/reference/run.md
index e802f3e..95e0e0a 100644
--- a/docs/reference/run.md
+++ b/docs/reference/run.md
@@ -273,10 +273,11 @@
                         'container:<name|id>': reuse another container's network stack
                         'host': use the Docker host network stack
                         '<network-name>|<network-id>': connect to a user-defined network
+    --net-alias=[]   : Add network-scoped alias for the container
     --add-host=""    : Add a line to /etc/hosts (host:IP)
     --mac-address="" : Sets the container's Ethernet device's MAC address
     --ip=""          : Sets the container's Ethernet device's IPv4 address
-    --ip6=""          : Sets the container's Ethernet device's IPv6 address
+    --ip6=""         : Sets the container's Ethernet device's IPv6 address
 
 By default, all containers have networking enabled and they can make any
 outgoing connections. The operator can completely disable networking
@@ -1301,12 +1302,12 @@
 bound to 42800 on the host. To find the mapping between the host ports
 and the exposed ports, use `docker port`.
 
-If the operator uses `--link` when starting a new client container, then the
-client container can access the exposed port via a private networking interface.
-Linking is a legacy feature that is only supported on the default bridge
-network. You should prefer the Docker networks feature instead. For more
-information on this feature, see the [*Docker network
-overview*""](../userguide/networking/index.md)).
+If the operator uses `--link` when starting a new client container in the
+default bridge network, then the client container can access the exposed
+port via a private networking interface.
+If `--link` is used when starting a container in a user-defined network as
+described in [*Docker network overview*""](../userguide/networking/index.md)),
+it will provide a named alias for the container being linked to.
 
 ### ENV (environment variables)
 
diff --git a/docs/security/apparmor.md b/docs/security/apparmor.md
index 07cd62c..c33240d 100644
--- a/docs/security/apparmor.md
+++ b/docs/security/apparmor.md
@@ -1,47 +1,74 @@
 <!-- [metadata]>
 +++
-draft = true
+title = "AppArmor security profiles for Docker"
+description = "Enabling AppArmor in Docker"
+keywords = ["AppArmor, security, docker, documentation"]
+[menu.main]
+parent= "smn_secure_docker"
 +++
 <![end-metadata]-->
 
-AppArmor security profiles for Docker
---------------------------------------
+# AppArmor security profiles for Docker
 
-AppArmor (Application Armor) is a security module that allows a system
-administrator to associate a security profile with each program. Docker
+AppArmor (Application Armor) is a Linux security module that protects an
+operating system and its applications from security threats. To use it, a system
+administrator associates an AppArmor security profile with each program. Docker
 expects to find an AppArmor policy loaded and enforced.
 
-Container profiles are loaded automatically by Docker. A profile
-for the Docker Engine itself also exists and is installed
-with the official *.deb* packages. Advanced users and package
-managers may find the profile for */usr/bin/docker* underneath
-[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
-in the Docker Engine source repository.
+Docker automatically loads container profiles. A profile for the Docker Engine
+itself also exists and is installed with the official *.deb* packages in
+`/etc/apparmor.d/docker` file.
 
 
-Understand the policies
-------------------------
+## Understand the policies
 
-The `docker-default` profile the default for running
-containers. It is moderately protective while
-providing wide application compatibility.
-
-The system's standard `unconfined` profile inherits all
-system-wide policies, applying path-based policies
-intended for the host system inside of containers.
-This was the default for privileged containers
-prior to Docker 1.8.
-
-
-Overriding the profile for a container
----------------------------------------
-
-Users may override the AppArmor profile using the
-`security-opt` option (per-container).
-
-For example, the following explicitly specifies the default policy:
+The `docker-default` profile is the default for running containers. It is
+moderately protective while providing wide application compatibility. The
+profile is the following:
 
 ```
+#include <tunables/global>
+
+
+profile docker-default flags=(attach_disconnected,mediate_deleted) {
+
+  #include <abstractions/base>
+
+
+  network,
+  capability,
+  file,
+  umount,
+
+  deny @{PROC}/{*,**^[0-9*],sys/kernel/shm*} wkx,
+  deny @{PROC}/sysrq-trigger rwklx,
+  deny @{PROC}/mem rwklx,
+  deny @{PROC}/kmem rwklx,
+  deny @{PROC}/kcore rwklx,
+
+  deny mount,
+
+  deny /sys/[^f]*/** wklx,
+  deny /sys/f[^s]*/** wklx,
+  deny /sys/fs/[^c]*/** wklx,
+  deny /sys/fs/c[^g]*/** wklx,
+  deny /sys/fs/cg[^r]*/** wklx,
+  deny /sys/firmware/efi/efivars/** rwklx,
+  deny /sys/kernel/security/** rwklx,
+}
+```
+
+When you run a container, it uses the `docker-default` policy unless you
+override it with the `security-opt` option. For example, the following
+explicitly specifies the default policy:
+
+```bash
 $ docker run --rm -it --security-opt apparmor:docker-default hello-world
 ```
 
+## Contributing to AppArmor code in Docker
+
+Advanced users and package managers can find a profile for `/usr/bin/docker`
+underneath
+[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
+in the Docker Engine source repository.
diff --git a/docs/security/index.md b/docs/security/index.md
new file mode 100644
index 0000000..6948b09
--- /dev/null
+++ b/docs/security/index.md
@@ -0,0 +1,20 @@
+<!-- [metadata]>
++++
+title = "Work with Docker security"
+description = "Sec"
+keywords = ["seccomp, security, docker, documentation"]
+[menu.main]
+identifier="smn_secure_docker"
+parent= "mn_use_docker"
++++
+<![end-metadata]-->
+
+# Work with Docker security
+
+This section discusses the security features you can configure and use within your Docker Engine installation.
+
+* You can configure Docker's trust features so that your users can push and pull trusted images. To learn how to do this, see [Use trusted images](trust/index.md) in this section.
+
+* You can configure secure computing mode (Seccomp) policies to secure system calls in a container. For more information, see [Seccomp security profiles for Docker](seccomp.md).
+
+* An AppArmor profile for Docker is installed with the official *.deb* packages. For information about this profile and overriding it, see [AppArmor security profiles for Docker](apparmor.md).
diff --git a/docs/security/seccomp.md b/docs/security/seccomp.md
index c8b7bde..b683be0 100644
--- a/docs/security/seccomp.md
+++ b/docs/security/seccomp.md
@@ -3,27 +3,26 @@
 title = "Seccomp security profiles for Docker"
 description = "Enabling seccomp in Docker"
 keywords = ["seccomp, security, docker, documentation"]
+[menu.main]
+parent= "smn_secure_docker"
 +++
 <![end-metadata]-->
 
-Seccomp security profiles for Docker
-------------------------------------
+# Seccomp security profiles for Docker
 
-The seccomp() system call operates on the Secure Computing (seccomp)
-state of the calling process.
+Secure computing mode (Seccomp) is a Linux kernel feature. You can use it to
+restrict the actions available within the container. The `seccomp()` system
+call operates on the seccomp state of the calling process. You can use this
+feature to restrict your application's access.
 
-This operation is available only if the kernel is configured
-with `CONFIG_SECCOMP` enabled.
+This feature is available only if the kernel is configured with `CONFIG_SECCOMP`
+enabled.
 
-This allows for allowing or denying of certain syscalls in a container.
+## Passing a profile for a container
 
-Passing a profile for a container
----------------------------------
-
-Users may pass a seccomp profile using the `security-opt` option
-(per-container).
-
-The profile has layout in the following form:
+The default seccomp profile provides a sane default for running containers with
+seccomp. It is moderately protective while providing wide application
+compatibility. The default Docker profile has layout in the following form:
 
 ```
 {
@@ -57,30 +56,14 @@
 }
 ```
 
-Then you can run with:
+When you run a container, it uses the default profile unless you override
+it with the `security-opt` option. For example, the following explicitly
+specifies the default policy:
 
 ```
 $ docker run --rm -it --security-opt seccomp:/path/to/seccomp/profile.json hello-world
 ```
 
-Default Profile
----------------
-
-The default seccomp profile provides a sane default for running
-containers with seccomp. It is moderately protective while
-providing wide application compatibility.
-
-
-### Overriding the default profile for a container
-
-You can pass `unconfined` to run a container without the default seccomp
-profile.
-
-```
-$ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \
-    unshare --map-root-user --user sh -c whoami
-```
-
 ### Syscalls blocked by the default profile
 
 Docker's default seccomp profile is a whitelist which specifies the calls that
@@ -91,55 +74,65 @@
 | Syscall             | Description                                                                                                                           |
 |---------------------|---------------------------------------------------------------------------------------------------------------------------------------|
 | `acct`              | Accounting syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_PACCT`. |
-| `add_key`           | Prevent containers from using the kernel keyring, which is not namespaced.                                                            |
-| `adjtimex`          | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced.                                                           |
-| `bpf`               | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`.                                       |
-| `clock_adjtime`     | Time/date is not namespaced.                                                                                                          |
-| `clock_settime`     | Time/date is not namespaced.                                                                                                          |
-| `clone`             | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`.                                  |
-| `create_module`     | Deny manipulation and functions on kernel modules.                                                                                    |
-| `delete_module`     | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                                                    |
-| `finit_module`      | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                                                    |
-| `get_kernel_syms`   | Deny retrieval of exported kernel and module symbols.                                                                                 |
-| `get_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                                               |
-| `init_module`       | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                                                    |
-| `ioperm`            | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.                                      |
-| `iopl`              | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.                                      |
-| `kcmp`              | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                                                   |
-| `kexec_file_load`   | Sister syscall of `kexec_load` that does the same thing, slightly different arguments.                                                |
-| `kexec_load`        | Deny loading a new kernel for later execution.                                                                                        |
-| `keyctl`            | Prevent containers from using the kernel keyring, which is not namespaced.                                                            |
-| `lookup_dcookie`    | Tracing/profiling syscall, which could leak a lot of information on the host.                                                         |
-| `mbind`             | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                                               |
-| `modify_ldt`        | Old syscall only used in 16-bit code and a potential information leak.                                                                |
-| `mount`             | Deny mounting, already gated by `CAP_SYS_ADMIN`.                                                                                      |
-| `move_pages`        | Syscall that modifies kernel memory and NUMA settings.                                                                                |
-| `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`.                                                               |
-| `nfsservctl`        | Deny interaction with the kernel nfs daemon.                                                                                          |
-| `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`.                                                              |
-| `perf_event_open`   | Tracing/profiling syscall, which could leak a lot of information on the host.                                                         |
-| `personality`       | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns.      |
-| `pivot_root`        | Deny `pivot_root`, should be privileged operation.                                                                                    |
-| `process_vm_readv`  | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                                                   |
-| `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                                                   |
-| `ptrace`            | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`.               |
-| `query_module`      | Deny manipulation and functions on kernel modules.                                                                                    |
-| `quotactl`          | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`.      |
-| `reboot`            | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`.                                                                   |
+| `add_key`           | Prevent containers from using the kernel keyring, which is not namespaced.                                   |
+| `adjtimex`          | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced.                                  |
+| `bpf`               | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`.              |
+| `clock_adjtime`     | Time/date is not namespaced.                                                                                 |
+| `clock_settime`     | Time/date is not namespaced.                                                                                 |
+| `clone`             | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`.         |
+| `create_module`     | Deny manipulation and functions on kernel modules.                                                           |
+| `delete_module`     | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                           |
+| `finit_module`      | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                           |
+| `get_kernel_syms`   | Deny retrieval of exported kernel and module symbols.                                                        |
+| `get_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                      |
+| `init_module`       | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                           |
+| `ioperm`            | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.             |
+| `iopl`              | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.             |
+| `kcmp`              | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                          |
+| `kexec_file_load`   | Sister syscall of `kexec_load` that does the same thing, slightly different arguments.                       |
+| `kexec_load`        | Deny loading a new kernel for later execution.                                                               |
+| `keyctl`            | Prevent containers from using the kernel keyring, which is not namespaced.                                   |
+| `lookup_dcookie`    | Tracing/profiling syscall, which could leak a lot of information on the host.                                |
+| `mbind`             | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                      |
+| `modify_ldt`        | Old syscall only used in 16-bit code and a potential information leak.                                       |
+| `mount`             | Deny mounting, already gated by `CAP_SYS_ADMIN`.                                                             |
+| `move_pages`        | Syscall that modifies kernel memory and NUMA settings.                                                       |
+| `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`.                                      |
+| `nfsservctl`        | Deny interaction with the kernel nfs daemon.                                                                 |
+| `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`.                                     |
+| `perf_event_open`   | Tracing/profiling syscall, which could leak a lot of information on the host.                                |
+| `personality`       | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns. |
+| `pivot_root`        | Deny `pivot_root`, should be privileged operation.                                                           |
+| `process_vm_readv`  | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                          |
+| `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                          |
+| `ptrace`            | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`. |
+| `query_module`      | Deny manipulation and functions on kernel modules.                                                            |
+| `quotactl`          | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`. |
+| `reboot`            | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`.                                           |
 | `restart_syscall`   | Don't allow containers to restart a syscall. Possible seccomp bypass see: https://code.google.com/p/chromium/issues/detail?id=408827. |
-| `request_key`       | Prevent containers from using the kernel keyring, which is not namespaced.                                                            |
-| `set_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                                               |
-| `setns`             | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`.                                                            |
-| `settimeofday`      | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                                            |
-| `stime`             | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                                            |
-| `swapon`            | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                                               |
-| `swapoff`           | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                                               |
-| `sysfs`             | Obsolete syscall.                                                                                                                     |
-| `_sysctl`           | Obsolete, replaced by /proc/sys.                                                                                                      |
-| `umount`            | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`.                                                                      |
-| `umount2`           | Should be a privileged operation.                                                                                                     |
-| `unshare`           | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`.                     |
-| `uselib`            | Older syscall related to shared libraries, unused for a long time.                                                                    |
-| `ustat`             | Obsolete syscall.                                                                                                                     |
-| `vm86`              | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                                               |
-| `vm86old`           | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                                               |
+| `request_key`       | Prevent containers from using the kernel keyring, which is not namespaced.                                    |
+| `set_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                       |
+| `setns`             | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`.                                    |
+| `settimeofday`      | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                    |
+| `stime`             | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                    |
+| `swapon`            | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                       |
+| `swapoff`           | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                       |
+| `sysfs`             | Obsolete syscall.                                                                                             |
+| `_sysctl`           | Obsolete, replaced by /proc/sys.                                                                              |
+| `umount`            | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`.                                              |
+| `umount2`           | Should be a privileged operation.                                                                             |
+| `unshare`           | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`. |
+| `uselib`            | Older syscall related to shared libraries, unused for a long time.                                            |
+| `ustat`             | Obsolete syscall.                                                                                             |
+| `vm86`              | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                       |
+| `vm86old`           | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                       |
+
+## Run without the default seccomp profile
+
+You can pass `unconfined` to run a container without the default seccomp
+profile.
+
+```
+$ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \
+    unshare --map-root-user --user sh -c whoami
+```
diff --git a/docs/articles/security.md b/docs/security/security.md
similarity index 97%
rename from docs/articles/security.md
rename to docs/security/security.md
index 92f02dc..d6b11e4 100644
--- a/docs/articles/security.md
+++ b/docs/security/security.md
@@ -1,11 +1,12 @@
 <!--[metadata]>
 +++
+aliases = ["/engine/articles/security/"]
 title = "Docker security"
 description = "Review of the Docker Daemon attack surface"
 keywords = ["Docker, Docker documentation,  security"]
 [menu.main]
-parent = "smn_administrate"
-weight = 2
+parent = "smn_secure_docker"
+weight =-99
 +++
 <![end-metadata]-->
 
@@ -277,8 +278,9 @@
 be implemented in Docker as well. We welcome users to submit issues,
 pull requests, and communicate via the mailing list.
 
-References:
+## Related Information
 
-* [Docker Containers: How Secure Are They? (2013)](
-http://blog.docker.com/2013/08/containers-docker-how-secure-are-they/).
-* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e).
+* [Use trusted images](../security/trust/index.md)
+* [Seccomp security profiles for Docker](../security/seccomp.md)
+* [AppArmor security profiles for Docker](../security/apparmor.md)
+* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e)
diff --git a/docs/userguide/dockervolumes.md b/docs/userguide/dockervolumes.md
index 0e7b275..5ddc9c1 100644
--- a/docs/userguide/dockervolumes.md
+++ b/docs/userguide/dockervolumes.md
@@ -73,7 +73,8 @@
             "Destination": "/webapp",
             "Driver": "local",
             "Mode": "",
-            "RW": true
+            "RW": true,
+            "Propagation": ""
         }
     ]
     ...
diff --git a/docs/userguide/labels-custom-metadata.md b/docs/userguide/labels-custom-metadata.md
index 1a57eaf..ae6a3c5 100644
--- a/docs/userguide/labels-custom-metadata.md
+++ b/docs/userguide/labels-custom-metadata.md
@@ -192,6 +192,9 @@
 
     $ docker -D info
     Containers: 12
+     Running: 5
+     Paused: 2
+     Stopped: 5
     Images: 672
     Server Version: 1.9.0
     Storage Driver: aufs
diff --git a/docs/userguide/networking/default_network/build-bridges.md b/docs/userguide/networking/default_network/build-bridges.md
index a17d7fa..73f35e3 100644
--- a/docs/userguide/networking/default_network/build-bridges.md
+++ b/docs/userguide/networking/default_network/build-bridges.md
@@ -10,7 +10,7 @@
 
 # Build your own bridge
 
-This section explains building your own bridge to replaced the Docker default
+This section explains how to build your own bridge to replace the Docker default
 bridge. This is a `bridge` network named `bridge` created automatically when you
 install Docker.
 
@@ -18,9 +18,10 @@
 create user-defined networks in addition to the default bridge network.
 
 You can set up your own bridge before starting Docker and use `-b BRIDGE` or
-`--bridge=BRIDGE` to tell Docker to use your bridge instead.  If you already
-have Docker up and running with its default `docker0` still configured, you will
-probably want to begin by stopping the service and removing the interface:
+`--bridge=BRIDGE` to tell Docker to use your bridge instead. If you already
+have Docker up and running with its default `docker0` still configured,
+you can directly create your bridge and restart Docker with it or want to begin by
+stopping the service and removing the interface:
 
 ```
 # Stopping Docker and removing docker0
@@ -32,7 +33,7 @@
 ```
 
 Then, before starting the Docker service, create your own bridge and give it
-whatever configuration you want.  Here we will create a simple enough bridge
+whatever configuration you want. Here we will create a simple enough bridge
 that we really could just have used the options in the previous section to
 customize `docker0`, but it will be enough to illustrate the technique.
 
@@ -66,7 +67,7 @@
 ```
 
 The result should be that the Docker server starts successfully and is now
-prepared to bind containers to the new bridge.  After pausing to verify the
+prepared to bind containers to the new bridge. After pausing to verify the
 bridge's configuration, try creating a container -- you will see that its IP
 address is in your new IP address range, which Docker will have auto-detected.
 
diff --git a/docs/userguide/networking/default_network/dockerlinks.md b/docs/userguide/networking/default_network/dockerlinks.md
index cfd77a6..0c71d97 100644
--- a/docs/userguide/networking/default_network/dockerlinks.md
+++ b/docs/userguide/networking/default_network/dockerlinks.md
@@ -17,14 +17,11 @@
 Docker link feature to allow containers to discover each other and securely
 transfer information about one container to another container. With the
 introduction of the Docker networks feature, you can still create links but they
-are only supported on the default `bridge` network named `bridge` and appearing
-in your network stack as `docker0`.
+behave differently between default `bridge` network and
+[user defined networks](../work-with-networks.md#linking-containers-in-user-defined-networks)
 
 This section briefly discusses connecting via a network port and then goes into
-detail on container linking. While links are still supported on Docker's default
-network (`bridge`), you should avoid them in preference of the Docker
-networks feature. Linking is expected to be deprecated and removed in a future
-release.
+detail on container linking in default `bridge` network.
 
 ## Connect using network port mapping
 
diff --git a/docs/userguide/networking/dockernetworks.md b/docs/userguide/networking/dockernetworks.md
index 3f8b9a4..6e76884 100644
--- a/docs/userguide/networking/dockernetworks.md
+++ b/docs/userguide/networking/dockernetworks.md
@@ -305,19 +305,22 @@
 
 ```
 $ docker network create --driver bridge isolated_nw
-c5ee82f76de30319c75554a57164c682e7372d2c694fec41e42ac3b77e570f6b
+1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b
 
 $ docker network inspect isolated_nw
 [
     {
         "Name": "isolated_nw",
-        "Id": "c5ee82f76de30319c75554a57164c682e7372d2c694fec41e42ac3b77e570f6b",
+        "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
         "Scope": "local",
         "Driver": "bridge",
         "IPAM": {
             "Driver": "default",
             "Config": [
-                {}
+                {
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
+                }
             ]
         },
         "Containers": {},
@@ -338,13 +341,13 @@
 
 ```
 $ docker run --net=isolated_nw -itd --name=container3 busybox
-885b7b4f792bae534416c95caa35ba272f201fa181e18e59beba0c80d7d77c1d
+8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c
 
 $ docker network inspect isolated_nw
 [
     {
         "Name": "isolated_nw",
-        "Id": "c5ee82f76de30319c75554a57164c682e7372d2c694fec41e42ac3b77e570f6b",
+        "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
         "Scope": "local",
         "Driver": "bridge",
         "IPAM": {
@@ -354,8 +357,8 @@
             ]
         },
         "Containers": {
-            "885b7b4f792bae534416c95caa35ba272f201fa181e18e59beba0c80d7d77c1d": {
-                "EndpointID": "514e1b419074397ea92bcfaa6698d17feb62db49d1320a27393b853ec65319c3",
+            "8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c": {
+                "EndpointID": "93b2db4a9b9a997beb912d28bcfc117f7b0eb924ff91d48cfa251d473e6a9b08",
                 "MacAddress": "02:42:ac:15:00:02",
                 "IPv4Address": "172.21.0.2/16",
                 "IPv6Address": ""
diff --git a/docs/userguide/networking/work-with-networks.md b/docs/userguide/networking/work-with-networks.md
index 3655e47..d5fac70 100644
--- a/docs/userguide/networking/work-with-networks.md
+++ b/docs/userguide/networking/work-with-networks.md
@@ -36,18 +36,21 @@
 
 ```bash
 $ docker network create simple-network
-de792b8258895cf5dc3b43835e9d61a9803500b991654dacb1f4f0546b1c88f8
+69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
 $ docker network inspect simple-network
 [
     {
         "Name": "simple-network",
-        "Id": "de792b8258895cf5dc3b43835e9d61a9803500b991654dacb1f4f0546b1c88f8",
+        "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
         "Scope": "local",
         "Driver": "bridge",
         "IPAM": {
             "Driver": "default",
             "Config": [
-                {}
+                {
+                    "Subnet": "172.22.0.0/16",
+                    "Gateway": "172.22.0.1/16"
+                }
             ]
         },
         "Containers": {},
@@ -134,7 +137,8 @@
             "Driver": "default",
             "Config": [
                 {
-                    "Subnet": "172.25.0.0/16"
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
                 }
             ]
         },
@@ -174,7 +178,8 @@
 
 ```bash
 $ docker inspect --format='{{json .NetworkSettings.Networks}}'  container3
-{"isolated_nw":{"IPAMConfig":{"IPv4Address":"172.25.3.3"},"EndpointID":"dffc7ec2915af58cc827d995e6ebdc897342be0420123277103c40ae35579103","Gateway":"172.25.0.1","IPAddress":"172.25.3.3","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:19:03:03"}}
+{"isolated_nw":{"IPAMConfig":{"IPv4Address":"172.25.3.3"},"NetworkID":"1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
+"EndpointID":"dffc7ec2915af58cc827d995e6ebdc897342be0420123277103c40ae35579103","Gateway":"172.25.0.1","IPAddress":"172.25.3.3","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:19:03:03"}}
 ```
 Repeat this command for `container2`. If you have Python installed, you can pretty print the output.
 
@@ -182,6 +187,7 @@
 $ docker inspect --format='{{json .NetworkSettings.Networks}}'  container2 | python -m json.tool
 {
     "bridge": {
+        "NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
         "EndpointID": "0099f9efb5a3727f6a554f176b1e96fca34cae773da68b3b6a26d046c12cb365",
         "Gateway": "172.17.0.1",
         "GlobalIPv6Address": "",
@@ -193,6 +199,7 @@
         "MacAddress": "02:42:ac:11:00:03"
     },
     "isolated_nw": {
+        "NetworkID":"1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
         "EndpointID": "11cedac1810e864d6b1589d92da12af66203879ab89f4ccd8c8fdaa9b1c48b1d",
         "Gateway": "172.25.0.1",
         "GlobalIPv6Address": "",
@@ -268,7 +275,7 @@
 round-trip min/avg/max = 0.070/0.081/0.097 ms
 ```
 
-This isn't the case for the default bridge network. Both `container2` and  `container1` are connected to the default bridge network. Docker does not support automatic service discovery on this network. For this reason, pinging  `container1` by name fails as you would expect based on the `/etc/hosts` file:
+This isn't the case for the default `bridge` network. Both `container2` and  `container1` are connected to the default bridge network. Docker does not support automatic service discovery on this network. For this reason, pinging  `container1` by name fails as you would expect based on the `/etc/hosts` file:
 
 ```bash
 / # ping -w 4 container1
@@ -311,9 +318,318 @@
 
 ```
 
-To connect a container to a network, the container must be running. If you stop
-a container and inspect a network it belongs to, you won't see that container.
-The `docker network inspect` command only shows running containers.
+You can connect both running and non-running containers to a network. However,
+`docker network inspect` only displays information on running containers.
+
+### Linking containers in user-defined networks
+
+In the above example, container_2 was able to resolve container_3's name automatically
+in the user defined network `isolated_nw`, but the name resolution did not succeed
+automatically in the default `bridge` network. This is expected in order to maintain
+backward compatibility with [legacy link](default_network/dockerlinks.md).
+
+The `legacy link` provided 4 major functionalities to the default `bridge` network.
+
+* name resolution
+* name alias for the linked container using `--link=CONTAINER-NAME:ALIAS`
+* secured container connectivity (in isolation via `--icc=false`)
+* environment variable injection
+
+Comparing the above 4 functionalities with the non-default user-defined networks such as
+`isolated_nw` in this example, without any additional config, `docker network` provides
+
+* automatic name resolution using DNS
+* automatic secured isolated environment for the containers in a network
+* ability to dynamically attach and detach to multiple networks
+* supports the `--link` option to provide name alias for the linked container
+
+Continuing with the above example, create another container `container_4` in `isolated_nw`
+with `--link` to provide additional name resolution using alias for other containers in
+the same network.
+
+```bash
+$ docker run --net=isolated_nw -itd --name=container4 --link container5:c5 busybox
+01b5df970834b77a9eadbaff39051f237957bd35c4c56f11193e0594cfd5117c
+```
+
+With the help of `--link` container4 will be able to reach container5 using the
+aliased name `c5` as well.
+
+Please note that while creating container4, we linked to a container named `container5`
+which is not created yet. That is one of the differences in behavior between the
+`legacy link` in default `bridge` network and the new `link` functionality in user defined
+networks. The `legacy link` is static in nature and it hard-binds the container with the
+alias and it doesnt tolerate linked container restarts. While the new `link` functionality
+in user defined networks are dynamic in nature and supports linked container restarts
+including tolerating ip-address changes on the linked container.
+
+Now let us launch another container named `container5` linking container4 to c4.
+
+```bash
+$ docker run --net=isolated_nw -itd --name=container5 --link container4:c4 busybox
+72eccf2208336f31e9e33ba327734125af00d1e1d2657878e2ee8154fbb23c7a
+```
+
+As expected, container4 will be able to reach container5 by both its container name and
+its alias c5 and container5 will be able to reach container4 by its container name and
+its alias c4.
+
+```bash
+$ docker attach container4
+/ # ping -w 4 c5
+PING c5 (172.25.0.5): 56 data bytes
+64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.5: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.5: seq=3 ttl=64 time=0.097 ms
+
+--- c5 ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+
+/ # ping -w 4 container5
+PING container5 (172.25.0.5): 56 data bytes
+64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.5: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.5: seq=3 ttl=64 time=0.097 ms
+
+--- container5 ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+```
+
+```bash
+$ docker attach container5
+/ # ping -w 4 c4
+PING c4 (172.25.0.4): 56 data bytes
+64 bytes from 172.25.0.4: seq=0 ttl=64 time=0.065 ms
+64 bytes from 172.25.0.4: seq=1 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.4: seq=2 ttl=64 time=0.067 ms
+64 bytes from 172.25.0.4: seq=3 ttl=64 time=0.082 ms
+
+--- c4 ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.065/0.070/0.082 ms
+
+/ # ping -w 4 container4
+PING container4 (172.25.0.4): 56 data bytes
+64 bytes from 172.25.0.4: seq=0 ttl=64 time=0.065 ms
+64 bytes from 172.25.0.4: seq=1 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.4: seq=2 ttl=64 time=0.067 ms
+64 bytes from 172.25.0.4: seq=3 ttl=64 time=0.082 ms
+
+--- container4 ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.065/0.070/0.082 ms
+```
+
+Similar to the legacy link functionality the new link alias is localized to a container
+and the aliased name has no meaning outside of the container using the `--link`.
+
+Also, it is important to note that if a container belongs to multiple networks, the
+linked alias is scoped within a given network. Hence the containers can be linked to
+different aliases in different networks.
+
+Extending the example, let us create another network named `local_alias`
+
+```bash
+$ docker network create -d bridge --subnet 172.26.0.0/24 local_alias
+76b7dc932e037589e6553f59f76008e5b76fa069638cd39776b890607f567aaa
+```
+
+let us connect container4 and container5 to the new network `local_alias`
+
+```
+$ docker network connect --link container5:foo local_alias container4
+$ docker network connect --link container4:bar local_alias container5
+```
+
+```bash
+$ docker attach container4
+
+/ # ping -w 4 foo
+PING foo (172.26.0.3): 56 data bytes
+64 bytes from 172.26.0.3: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.26.0.3: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.26.0.3: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.26.0.3: seq=3 ttl=64 time=0.097 ms
+
+--- foo ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+
+/ # ping -w 4 c5
+PING c5 (172.25.0.5): 56 data bytes
+64 bytes from 172.25.0.5: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.5: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.5: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.5: seq=3 ttl=64 time=0.097 ms
+
+--- c5 ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+```
+
+Note that the ping succeeds for both the aliases but on different networks.
+Let us conclude this section by disconnecting container5 from the `isolated_nw`
+and observe the results
+
+```
+$ docker network disconnect isolated_nw container5
+
+$ docker attach container4
+
+/ # ping -w 4 c5
+ping: bad address 'c5'
+
+/ # ping -w 4 foo
+PING foo (172.26.0.3): 56 data bytes
+64 bytes from 172.26.0.3: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.26.0.3: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.26.0.3: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.26.0.3: seq=3 ttl=64 time=0.097 ms
+
+--- foo ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+
+```
+
+In conclusion, the new link functionality in user defined networks provides all the
+benefits of legacy links while avoiding most of the well-known issues with `legacy links`.
+
+One notable missing functionality compared to `legacy links` is the injection of
+environment variables. Though very useful, environment variable injection is static
+in nature and must be injected when the container is started. One cannot inject
+environment variables into a running container without significant effort and hence
+it is not compatible with `docker network` which provides a dynamic way to connect/
+disconnect containers to/from a network.
+
+### Network-scoped alias
+
+While `links` provide private name resolution that is localized within a container,
+the network-scoped alias provides a way for a container to be discovered by an
+alternate name by any other container within the scope of a particular network.
+Unlike the `link` alias, which is defined by the consumer of a service, the
+network-scoped alias is defined by the container that is offering the service
+to the network.
+
+Continuing with the above example, create another container in `isolated_nw` with a
+network alias.
+
+```bash
+$ docker run --net=isolated_nw -itd --name=container6 --net-alias app busybox
+8ebe6767c1e0361f27433090060b33200aac054a68476c3be87ef4005eb1df17
+```
+
+```bash
+$ docker attach container4
+/ # ping -w 4 app
+PING app (172.25.0.6): 56 data bytes
+64 bytes from 172.25.0.6: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.6: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.6: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.6: seq=3 ttl=64 time=0.097 ms
+
+--- app ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+
+/ # ping -w 4 container6
+PING container5 (172.25.0.6): 56 data bytes
+64 bytes from 172.25.0.6: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.6: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.6: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.6: seq=3 ttl=64 time=0.097 ms
+
+--- container6 ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+```
+
+Now let us connect `container6` to the `local_alias` network with a different network-scoped
+alias.
+
+```
+$ docker network connect --alias scoped-app local_alias container6
+```
+
+`container6` in this example now is aliased as `app` in network `isolated_nw` and
+as `scoped-app` in network `local_alias`.
+
+Let's try to reach these aliases from `container4` (which is connected to both these networks)
+and `container5` (which is connected only to `isolated_nw`).
+
+```bash
+$ docker attach container4
+
+/ # ping -w 4 scoped-app
+PING foo (172.26.0.5): 56 data bytes
+64 bytes from 172.26.0.5: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.26.0.5: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.26.0.5: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.26.0.5: seq=3 ttl=64 time=0.097 ms
+
+--- foo ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+
+$ docker attach container5
+
+/ # ping -w 4 scoped-app
+ping: bad address 'scoped-app'
+
+```
+
+As you can see, the alias is scoped to the network it is defined on and hence only
+those containers that are connected to that network can access the alias.
+
+In addition to the above features, multiple containers can share the same network-scoped
+alias within the same network. For example, let's launch `container7` in `isolated_nw` with
+the same alias as `container6`
+
+```bash
+$ docker run --net=isolated_nw -itd --name=container7 --net-alias app busybox
+3138c678c123b8799f4c7cc6a0cecc595acbdfa8bf81f621834103cd4f504554
+```
+
+When multiple containers share the same alias, name resolution to that alias will happen
+to one of the containers (typically the first container that is aliased). When the container
+that backs the alias goes down or disconnected from the network, the next container that
+backs the alias will be resolved.
+
+Let us ping the alias `app` from `container4` and bring down `container6` to verify that
+`container7` is resolving the `app` alias.
+
+```bash
+$ docker attach container4
+/ # ping -w 4 app
+PING app (172.25.0.6): 56 data bytes
+64 bytes from 172.25.0.6: seq=0 ttl=64 time=0.070 ms
+64 bytes from 172.25.0.6: seq=1 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.6: seq=2 ttl=64 time=0.080 ms
+64 bytes from 172.25.0.6: seq=3 ttl=64 time=0.097 ms
+
+--- app ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.070/0.081/0.097 ms
+
+$ docker stop container6
+
+$ docker attach container4
+/ # ping -w 4 app
+PING app (172.25.0.7): 56 data bytes
+64 bytes from 172.25.0.7: seq=0 ttl=64 time=0.095 ms
+64 bytes from 172.25.0.7: seq=1 ttl=64 time=0.075 ms
+64 bytes from 172.25.0.7: seq=2 ttl=64 time=0.072 ms
+64 bytes from 172.25.0.7: seq=3 ttl=64 time=0.101 ms
+
+--- app ping statistics ---
+4 packets transmitted, 4 packets received, 0% packet loss
+round-trip min/avg/max = 0.072/0.085/0.101 ms
+
+```
 
 ## Disconnecting containers
 
@@ -326,6 +642,7 @@
 docker inspect --format='{{json .NetworkSettings.Networks}}'  container2 | python -m json.tool
 {
     "bridge": {
+        "NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
         "EndpointID": "9e4575f7f61c0f9d69317b7a4b92eefc133347836dd83ef65deffa16b9985dc0",
         "Gateway": "172.17.0.1",
         "GlobalIPv6Address": "",
@@ -349,7 +666,8 @@
             "Driver": "default",
             "Config": [
                 {
-                    "Subnet": "172.25.0.0/16"
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
                 }
             ]
         },
@@ -433,7 +751,8 @@
             "Driver": "default",
             "Config": [
                 {
-                    "Subnet": "172.25.0.0/16"
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
                 }
             ]
         },
diff --git a/docs/userguide/networkingcontainers.md b/docs/userguide/networkingcontainers.md
index c0b9dc8..bf0b71e 100644
--- a/docs/userguide/networkingcontainers.md
+++ b/docs/userguide/networkingcontainers.md
@@ -189,7 +189,8 @@
 You can also inspect your container to see where it is connected:
 
     $ docker inspect --format='{{json .NetworkSettings.Networks}}'  db
-    {"bridge":{"EndpointID":"508b170d56b2ac9e4ef86694b0a76a22dd3df1983404f7321da5649645bf7043","Gateway":"172.18.0.1","IPAddress":"172.18.0.2","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:11:00:02"}}
+    {"my-bridge-network":{"NetworkID":"7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99",
+    "EndpointID":"508b170d56b2ac9e4ef86694b0a76a22dd3df1983404f7321da5649645bf7043","Gateway":"172.18.0.1","IPAddress":"172.18.0.2","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:11:00:02"}}
 
 Now, go ahead and start your by now familiar web application. This time leave off the `-P` flag and also don't specify a network.
 
@@ -198,7 +199,8 @@
 Which network is your `web` application running under? Inspect the application and you'll find it is running in the default `bridge` network.
 
     $ docker inspect --format='{{json .NetworkSettings.Networks}}'  web
-    {"bridge":{"EndpointID":"508b170d56b2ac9e4ef86694b0a76a22dd3df1983404f7321da5649645bf7043","Gateway":"172.17.0.1","IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:11:00:02"}}
+    {"bridge":{"NetworkID":"7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
+    "EndpointID":"508b170d56b2ac9e4ef86694b0a76a22dd3df1983404f7321da5649645bf7043","Gateway":"172.17.0.1","IPAddress":"172.17.0.2","IPPrefixLen":16,"IPv6Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"MacAddress":"02:42:ac:11:00:02"}}
 
 Then, get the IP address of your `web`
 
diff --git a/docs/userguide/storagedriver/device-mapper-driver.md b/docs/userguide/storagedriver/device-mapper-driver.md
index 20af7e0..4d81d71 100644
--- a/docs/userguide/storagedriver/device-mapper-driver.md
+++ b/docs/userguide/storagedriver/device-mapper-driver.md
@@ -249,11 +249,11 @@
     NAME                       MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
     xvda                       202:0    0    8G  0 disk
     └─xvda1                    202:1    0    8G  0 part /
-    xvdf                       202:80   0  100G  0 disk
+    xvdf                       202:80   0   10G  0 disk
     ├─vg--docker-data          253:0    0   90G  0 lvm
-    │ └─docker-202:1-1032-pool 253:2    0  100G  0 dm
+    │ └─docker-202:1-1032-pool 253:2    0   10G  0 dm
     └─vg--docker-metadata      253:1    0    4G  0 lvm
-      └─docker-202:1-1032-pool 253:2    0  100G  0 dm
+      └─docker-202:1-1032-pool 253:2    0   10G  0 dm
 
 The diagram below shows the image from prior examples updated with the detail from the `lsblk` command above.
 
diff --git a/errors/daemon.go b/errors/daemon.go
index 1cb0da5..278e712 100644
--- a/errors/daemon.go
+++ b/errors/daemon.go
@@ -46,6 +46,15 @@
 		HTTPStatusCode: http.StatusInternalServerError,
 	})
 
+	// ErrorCodeRemovalContainer is generated when we attempt to connect or disconnect a
+	// container but it's marked for removal.
+	ErrorCodeRemovalContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
+		Value:          "REMOVALCONTAINER",
+		Message:        "Container %s is marked for removal and cannot be connected or disconnected to the network",
+		Description:    "The specified container is marked for removal and cannot be connected or disconnected to the network",
+		HTTPStatusCode: http.StatusInternalServerError,
+	})
+
 	// ErrorCodePausedContainer is generated when we attempt to attach a
 	// container but its paused.
 	ErrorCodePausedContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
diff --git a/experimental/README.md b/experimental/README.md
index d2eff37..659780e 100644
--- a/experimental/README.md
+++ b/experimental/README.md
@@ -72,7 +72,7 @@
 ## Current experimental features
 
  * [External graphdriver plugins](plugins_graphdriver.md)
- * [User namespaces](userns.md)
+ * The user namespaces feature has graduated from experimental.
 
 ## How to comment on an experimental feature
 
diff --git a/experimental/userns.md b/experimental/userns.md
deleted file mode 100644
index cb713f7..0000000
--- a/experimental/userns.md
+++ /dev/null
@@ -1,119 +0,0 @@
-# Experimental: User namespace support
-
-Linux kernel [user namespace support](http://man7.org/linux/man-pages/man7/user_namespaces.7.html) provides additional security by enabling
-a process--and therefore a container--to have a unique range of user and
-group IDs which are outside the traditional user and group range utilized by
-the host system. Potentially the most important security improvement is that,
-by default, container processes running as the `root` user will have expected
-administrative privilege (with some restrictions) inside the container but will
-effectively be mapped to an unprivileged `uid` on the host.
-
-In this experimental phase, the Docker daemon creates a single daemon-wide mapping
-for all containers running on the same engine instance. The mappings will
-utilize the existing subordinate user and group ID feature available on all modern
-Linux distributions.
-The [`/etc/subuid`](http://man7.org/linux/man-pages/man5/subuid.5.html) and
-[`/etc/subgid`](http://man7.org/linux/man-pages/man5/subgid.5.html) files will be
-read for the user, and optional group, specified to the `--userns-remap`
-parameter.  If you do not wish to specify your own user and/or group, you can
-provide `default` as the value to this flag, and a user will be created on your behalf
-and provided subordinate uid and gid ranges. This default user will be named
-`dockremap`, and entries will be created for it in `/etc/passwd` and
-`/etc/group` using your distro's standard user and group creation tools.
-
-> **Note**: The single mapping per-daemon restriction exists for this experimental
-> phase because Docker shares image layers from its local cache across all
-> containers running on the engine instance.  Since file ownership must be
-> the same for all containers sharing the same layer content, the decision
-> was made to map the file ownership on `docker pull` to the daemon's user and
-> group mappings so that there is no delay for running containers once the
-> content is downloaded--exactly the same performance characteristics as with
-> user namespaces disabled.
-
-## Starting the daemon with user namespaces enabled
-To enable this experimental user namespace support for a Docker daemon instance,
-start the daemon with the aforementioned `--userns-remap` flag, which accepts
-values in the following formats:
-
- - uid
- - uid:gid
- - username
- - username:groupname
-
-If numeric IDs are provided, translation back to valid user or group names
-will occur so that the subordinate uid and gid information can be read, given
-these resources are name-based, not id-based.  If the numeric ID information
-provided does not exist as entries in `/etc/passwd` or `/etc/group`, daemon
-startup will fail with an error message.
-
-*An example: starting with default Docker user management:*
-
-```
-     $ docker daemon --userns-remap=default
-```    
-In this case, Docker will create--or find the existing--user and group
-named `dockremap`. If the user is created, and the Linux distribution has
-appropriate support, the `/etc/subuid` and `/etc/subgid` files will be populated
-with a contiguous 65536 length range of subordinate user and group IDs, starting
-at an offset based on prior entries in those files.  For example, Ubuntu will
-create the following range, based on an existing user already having the first
-65536 range:
-
-```
-     $ cat /etc/subuid
-     user1:100000:65536
-     dockremap:165536:65536
-```
-
-> **Note:** On a fresh Fedora install, we found that we had to `touch` the
-> `/etc/subuid` and `/etc/subgid` files to have ranges assigned when users
-> were created.  Once these files existed, range assignment on user creation
-> worked properly.
-
-If you have a preferred/self-managed user with subordinate ID mappings already
-configured, you can provide that username or uid to the `--userns-remap` flag.
-If you have a group that doesn't match the username, you may provide the `gid`
-or group name as well; otherwise the username will be used as the group name
-when querying the system for the subordinate group ID range.
-
-## Detailed information on `subuid`/`subgid` ranges
-
-Given there may be advanced use of the subordinate ID ranges by power users, we will
-describe how the Docker daemon uses the range entries within these files under the
-current experimental user namespace support.
-
-The simplest case exists where only one contiguous range is defined for the
-provided user or group. In this case, Docker will use that entire contiguous
-range for the mapping of host uids and gids to the container process.  This
-means that the first ID in the range will be the remapped root user, and the
-IDs above that initial ID will map host ID 1 through the end of the range.
-
-From the example `/etc/subid` content shown above, that means the remapped root
-user would be uid 165536.
-
-If the system administrator has set up multiple ranges for a single user or
-group, the Docker daemon will read all the available ranges and use the
-following algorithm to create the mapping ranges:
-
-1. The ranges will be sorted by *start ID* ascending
-2. Maps will be created from each range with where the host ID will increment starting at 0 for the first range, 0+*range1* length for the second, and so on.  This means that the lowest range start ID will be the remapped root, and all further ranges will map IDs from 1 through the uid or gid that equals the sum of all range lengths.
-3. Ranges segments above five will be ignored as the kernel ignores any ID maps after five (in `/proc/self/{u,g}id_map`)
-
-## User namespace known restrictions
-
-The following standard Docker features are currently incompatible when
-running a Docker daemon with experimental user namespaces enabled:
-
- - sharing namespaces with the host (--pid=host, --net=host, etc.)
- - sharing namespaces with other containers (--net=container:*other*)
- - A `--readonly` container filesystem (a Linux kernel restriction on remount with new flags of a currently mounted filesystem when inside a user namespace)
- - external (volume/graph) drivers which are unaware/incapable of using daemon user mappings
- - Using `--privileged` mode containers
- - volume use without pre-arranging proper file ownership in mounted volumes
-
-Additionally, while the `root` user inside a user namespaced container
-process has many of the privileges of the administrative root user, the
-following operations will fail:
-
- - Use of `mknod` - permission is denied for device creation by the container root
- - others will be listed here when fully tested
diff --git a/hack/make.sh b/hack/make.sh
index 5aa044d..78f25cd 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -99,7 +99,7 @@
 	exit 1
 fi
 
-if [ "$DOCKER_EXPERIMENTAL" ] || [ "$DOCKER_REMAP_ROOT" ]; then
+if [ "$DOCKER_EXPERIMENTAL" ]; then
 	echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features'
 	echo >&2
 	DOCKER_BUILDTAGS+=" experimental pkcs11"
diff --git a/hack/make/.build-rpm/docker-engine-selinux.spec b/hack/make/.build-rpm/docker-engine-selinux.spec
index c642180..69e5faa 100644
--- a/hack/make/.build-rpm/docker-engine-selinux.spec
+++ b/hack/make/.build-rpm/docker-engine-selinux.spec
@@ -101,3 +101,6 @@
 %attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if
 
 %changelog
+* Tue Dec 1 2015 Jessica Frazelle <acidburn@docker.com> 1.9.1-1
+- add licence to rpm
+- add selinux-policy and docker-engine-selinux rpm
diff --git a/hack/make/.detect-daemon-osarch b/hack/make/.detect-daemon-osarch
new file mode 100644
index 0000000..f95afc4
--- /dev/null
+++ b/hack/make/.detect-daemon-osarch
@@ -0,0 +1,18 @@
+#!/bin/bash
+set -e
+
+# Retrieve OS/ARCH of docker daemon, eg. linux/amd64
+export DOCKER_ENGINE_OSARCH="$(docker version | awk '
+	$1 == "Client:" { server = 0; next }
+	$1 == "Server:" { server = 1; next }
+	server && $1 == "OS/Arch:" { print $2 }
+')"
+export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}"
+export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}"
+
+# and the client, just in case
+export DOCKER_CLIENT_OSARCH="$(docker version | awk '
+	$1 == "Client:" { client = 1; next }
+	$1 == "Server:" { client = 0; next }
+	client && $1 == "OS/Arch:" { print $2 }
+')"
diff --git a/hack/make/.ensure-frozen-images b/hack/make/.ensure-frozen-images
index 6c4ae7f..6e0036c 100644
--- a/hack/make/.ensure-frozen-images
+++ b/hack/make/.ensure-frozen-images
@@ -1,38 +1,32 @@
 #!/bin/bash
 set -e
 
-# image lists for different archs that should match what's in the Dockerfile (minus the explicit images IDs)
+# image list should match what's in the Dockerfile (minus the explicit images IDs)
+images=(
+	busybox:latest
+	debian:jessie
+	hello-world:latest
+)
+
+imagePrefix=
 case "$DOCKER_ENGINE_OSARCH" in
 	linux/arm)
-		images=(
-			hypriot/armhf-busybox:latest
-			hypriot/armhf-hello-world:latest
-			hypriot/armhf-unshare:latest
-		)
+		imagePrefix='armhf'
 		;;
 	linux/ppc64le)
-		images=(
-			ppc64le/busybox:latest
-			ppc64le/hello-world:frozen
-			ppc64le/unshare:latest
-		)
+		imagePrefix='ppc64le'
 		;;
 	linux/s390x)
-		images=(
-			s390x/busybox:latest
-			s390x/hello-world:frozen
-			s390x/unshare:latest
-		)
-		;;
-	*)
-		images=(
-			busybox:latest
-			debian:jessie
-			hello-world:latest
-		)
+		imagePrefix='s390x'
 		;;
 esac
 
+if [ "$imagePrefix" ]; then
+	for (( i = 0; i < ${#images[@]}; i++ )); do
+		images[$i]="$imagePrefix/${images[$i]}"
+	done
+fi
+
 if ! docker inspect "${images[@]}" &> /dev/null; then
 	hardCodedDir='/docker-frozen-images'
 	if [ -d "$hardCodedDir" ]; then
@@ -60,7 +54,7 @@
 					inCont = 0;
 				}
 			}
-		' ${DOCKER_FILE:="Dockerfile"} | sh -x
+		' "${DOCKERFILE:=Dockerfile}" | sh -x
 		# Do not use a subshell for the following command. Windows CI
 		# runs bash 3.x so will not trap an error in a subshell.
 		# http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells
@@ -68,35 +62,18 @@
 	fi
 fi
 
-# tag images to ensure that all integrations work with the defined image names
-# then remove original tags as these make problems with later tests (e.g., TestInspectApiImageResponse)
-case "$DOCKER_ENGINE_OSARCH" in
-	linux/arm)
-		docker tag hypriot/armhf-busybox:latest busybox:latest
-		docker tag hypriot/armhf-hello-world:latest hello-world:frozen
-		docker tag hypriot/armhf-unshare:latest jess/unshare:latest
-		docker rmi hypriot/armhf-busybox:latest
-		docker rmi hypriot/armhf-hello-world:latest
-		docker rmi hypriot/armhf-unshare:latest
-		;;
-	linux/ppc64le)
-		docker tag ppc64le/busybox:latest busybox:latest
-		docker tag ppc64le/hello-world:frozen hello-world:frozen
-		docker tag ppc64le/unshare:latest jess/unshare:latest
-		docker rmi ppc64le/busybox:latest
-		docker rmi ppc64le/hello-world:frozen
-		docker rmi ppc64le/unshare:latest
-		;;
-	linux/s390x)
-		docker tag s390x/busybox:latest busybox:latest
-		docker tag s390x/hello-world:frozen hello-world:frozen
-		docker tag s390x/unshare:latest jess/unshare:latest
-		docker rmi s390x/busybox:latest
-		docker rmi s390x/hello-world:frozen
-		docker rmi s390x/unshare:latest
-		;;
-	*)
-		docker tag hello-world:latest hello-world:frozen
-		docker rmi hello-world:latest
-		;;
-esac
+if [ "$imagePrefix" ]; then
+	for image in "${images[@]}"; do
+		target="${image#$imagePrefix/}"
+		if [ "$target" != "$image" ]; then
+			# tag images to ensure that all integrations work with the defined image names
+			docker tag "$image" "$target"
+			# then remove original tags as these make problems with later tests (e.g., TestInspectApiImageResponse)
+			docker rmi "$image"
+		fi
+	done
+fi
+
+# explicitly rename "hello-world:latest" to ":frozen" for the test that uses it
+docker tag hello-world:latest hello-world:frozen
+docker rmi hello-world:latest
diff --git a/hack/make/.integration-daemon-setup b/hack/make/.integration-daemon-setup
index 4703685..9732486 100644
--- a/hack/make/.integration-daemon-setup
+++ b/hack/make/.integration-daemon-setup
@@ -1,12 +1,7 @@
 #!/bin/bash
+set -e
 
-# Retrieve OS/ARCH of docker daemon, eg. linux/amd64
-export DOCKER_ENGINE_OSARCH=$(docker version | grep 'OS/Arch' | tail -1 | cut -d':' -f2 | tr -d '[[:space:]]')
-# Retrieve OS of docker daemon, eg. linux
-export DOCKER_ENGINE_GOOS=$(echo $DOCKER_ENGINE_OSARCH | cut -d'/' -f1)
-# Retrieve ARCH of docker daemon, eg. amd64
-export DOCKER_ENGINE_GOARCH=$(echo $DOCKER_ENGINE_OSARCH | cut -d'/' -f2)
-
+bundle .detect-daemon-osarch
 bundle .ensure-emptyfs
 bundle .ensure-frozen-images
 bundle .ensure-httpserver
diff --git a/hack/make/build-rpm b/hack/make/build-rpm
index b8e8e8d..36abc6f 100644
--- a/hack/make/build-rpm
+++ b/hack/make/build-rpm
@@ -98,7 +98,6 @@
 		if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then
 			cat >> "$DEST/$version/Dockerfile.build" <<-EOF
 				RUN tar -cz -C /usr/src/${rpmName}/contrib -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux
-				RUN { echo '* $rpmDate $rpmPackager $rpmVersion-$rpmRelease'; echo '* Version: $VERSION'; } >> ${rpmName}-selinux.spec && tail >&2 ${rpmName}-selinux.spec
 				RUN rpmbuild -ba \
 						--define '_gitcommit $DOCKER_GITCOMMIT' \
 						--define '_release $rpmRelease' \
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 70e96a6..091c977 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -22,10 +22,12 @@
 clone git golang.org/x/net 47990a1ba55743e6ef1affd3a14e5bac8553615d https://github.com/golang/net.git
 clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
 clone git github.com/docker/go-connections v0.1.2
-clone git github.com/docker/engine-api v0.1.3
+clone git github.com/docker/engine-api v0.2.2
+clone git github.com/RackSec/srslog 6eb773f331e46fbba8eecb8e794e635e75fc04de
+clone git github.com/imdario/mergo 0.2.1
 
 #get libnetwork packages
-clone git github.com/docker/libnetwork c8ec4bd24e1e76feb4f79e3924c68cd2ce89938a
+clone git github.com/docker/libnetwork v0.5.6
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
 clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
@@ -44,11 +46,11 @@
 clone git github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
 
 # get graph and distribution packages
-clone git github.com/docker/distribution 568bf038af6d65b376165d02886b1c7fcaef1f61
+clone git github.com/docker/distribution cb08de17d74bef86ce6c5abe8b240e282f5750be
 clone git github.com/vbatts/tar-split v0.9.11
 
 # get desired notary commit, might also need to be updated in Dockerfile
-clone git github.com/docker/notary docker-v1.10-2
+clone git github.com/docker/notary docker-v1.10-3
 
 clone git google.golang.org/grpc 174192fc93efcb188fc8f46ca447f0da606b6885 https://github.com/grpc/grpc-go.git
 clone git github.com/miekg/pkcs11 80f102b5cac759de406949c47f0928b99bd64cdf
@@ -56,7 +58,6 @@
 clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
 
 clone git github.com/opencontainers/runc d97d5e8b007e4657316eed76ea30bc0f690230cf # libcontainer
-clone git github.com/opencontainers/specs 46d949ea81080c5f60dfb72ee91468b1e9fb2998 # specs
 clone git github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
 # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
 clone git github.com/coreos/go-systemd v4
diff --git a/image/fs.go b/image/fs.go
index 7c1c4c2..72c9ab4 100644
--- a/image/fs.go
+++ b/image/fs.go
@@ -101,11 +101,7 @@
 	}
 
 	// todo: maybe optional
-	validated, err := digest.FromBytes(content)
-	if err != nil {
-		return nil, err
-	}
-	if ID(validated) != id {
+	if ID(digest.FromBytes(content)) != id {
 		return nil, fmt.Errorf("failed to verify image: %v", id)
 	}
 
@@ -121,11 +117,7 @@
 		return "", fmt.Errorf("Invalid empty data")
 	}
 
-	dgst, err := digest.FromBytes(data)
-	if err != nil {
-		return "", err
-	}
-	id := ID(dgst)
+	id := ID(digest.FromBytes(data))
 	filePath := s.contentFile(id)
 	tempFilePath := s.contentFile(id) + ".tmp"
 	if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
diff --git a/image/fs_test.go b/image/fs_test.go
index 35ea3c2..1a6f849 100644
--- a/image/fs_test.go
+++ b/image/fs_test.go
@@ -67,10 +67,7 @@
 		t.Fatal(err)
 	}
 
-	id, err := digest.FromBytes([]byte("foobar"))
-	if err != nil {
-		t.Fatal(err)
-	}
+	id := digest.FromBytes([]byte("foobar"))
 	err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700)
 	if err != nil {
 		t.Fatal(err)
@@ -160,11 +157,7 @@
 		t.Fatal("Expected error for getting metadata for unknown key")
 	}
 
-	id3, err := digest.FromBytes([]byte("baz"))
-	if err != nil {
-		t.Fatal(err)
-	}
-
+	id3 := digest.FromBytes([]byte("baz"))
 	err = store.SetMetadata(ID(id3), "tkey", []byte("tval"))
 	if err == nil {
 		t.Fatal("Expected error for setting metadata for unknown ID.")
diff --git a/image/rootfs_windows.go b/image/rootfs_windows.go
index 45db04b..10b8549 100644
--- a/image/rootfs_windows.go
+++ b/image/rootfs_windows.go
@@ -27,7 +27,7 @@
 
 // ChainID returns the ChainID for the top layer in RootFS.
 func (r *RootFS) ChainID() layer.ChainID {
-	baseDiffID, _ := digest.FromBytes([]byte(r.BaseLayerID())) // can never error
+	baseDiffID := digest.FromBytes([]byte(r.BaseLayerID()))
 	return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
 }
 
diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go
index 4a67c01..cdea0e7 100644
--- a/image/v1/imagev1.go
+++ b/image/v1/imagev1.go
@@ -63,7 +63,7 @@
 	}
 	logrus.Debugf("CreateV1ID %s", configJSON)
 
-	return digest.FromBytes(configJSON)
+	return digest.FromBytes(configJSON), nil
 }
 
 // MakeConfigFromV1Config creates an image config from the legacy V1 config format.
diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go
index 030b07f..cac8176 100644
--- a/integration-cli/check_test.go
+++ b/integration-cli/check_test.go
@@ -28,6 +28,7 @@
 }
 
 func (s *DockerSuite) TearDownTest(c *check.C) {
+	unpauseAllContainers()
 	deleteAllContainers()
 	deleteAllImages()
 	deleteAllVolumes()
@@ -48,7 +49,7 @@
 
 func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
 	testRequires(c, DaemonIsLinux)
-	s.reg = setupRegistry(c)
+	s.reg = setupRegistry(c, false)
 	s.d = NewDaemon(c)
 }
 
@@ -56,10 +57,38 @@
 	if s.reg != nil {
 		s.reg.Close()
 	}
-	if s.ds != nil {
-		s.ds.TearDownTest(c)
+	if s.d != nil {
+		s.d.Stop()
 	}
-	s.d.Stop()
+	s.ds.TearDownTest(c)
+}
+
+func init() {
+	check.Suite(&DockerSchema1RegistrySuite{
+		ds: &DockerSuite{},
+	})
+}
+
+type DockerSchema1RegistrySuite struct {
+	ds  *DockerSuite
+	reg *testRegistryV2
+	d   *Daemon
+}
+
+func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	s.reg = setupRegistry(c, true)
+	s.d = NewDaemon(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) {
+	if s.reg != nil {
+		s.reg.Close()
+	}
+	if s.d != nil {
+		s.d.Stop()
+	}
+	s.ds.TearDownTest(c)
 }
 
 func init() {
@@ -80,7 +109,9 @@
 
 func (s *DockerDaemonSuite) TearDownTest(c *check.C) {
 	testRequires(c, DaemonIsLinux)
-	s.d.Stop()
+	if s.d != nil {
+		s.d.Stop()
+	}
 	s.ds.TearDownTest(c)
 }
 
@@ -97,12 +128,16 @@
 }
 
 func (s *DockerTrustSuite) SetUpTest(c *check.C) {
-	s.reg = setupRegistry(c)
+	s.reg = setupRegistry(c, false)
 	s.not = setupNotary(c)
 }
 
 func (s *DockerTrustSuite) TearDownTest(c *check.C) {
-	s.reg.Close()
-	s.not.Close()
+	if s.reg != nil {
+		s.reg.Close()
+	}
+	if s.not != nil {
+		s.not.Close()
+	}
 	s.ds.TearDownTest(c)
 }
diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go
index aa1326c..ca5de07 100644
--- a/integration-cli/docker_api_containers_test.go
+++ b/integration-cli/docker_api_containers_test.go
@@ -652,10 +652,14 @@
 	c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname"))
 }
 
-func (s *DockerSuite) TestContainerApiCreateNetworkMode(c *check.C) {
+func (s *DockerSuite) TestContainerApiCreateBridgeNetworkMode(c *check.C) {
 	testRequires(c, DaemonIsLinux)
-	UtilCreateNetworkMode(c, "host")
 	UtilCreateNetworkMode(c, "bridge")
+}
+
+func (s *DockerSuite) TestContainerApiCreateOtherNetworkModes(c *check.C) {
+	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	UtilCreateNetworkMode(c, "host")
 	UtilCreateNetworkMode(c, "container:web1")
 }
 
@@ -1083,9 +1087,9 @@
 	c.Assert(err, checker.IsNil)
 	c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers"))
 
-	status, _, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil)
-	c.Assert(err, checker.IsNil)
-	c.Assert(status, checker.Equals, http.StatusNoContent)
+	status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil)
+	c.Assert(err, check.IsNil)
+	c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b)))
 
 	linksPostRm, err := inspectFieldJSON(id2, "HostConfig.Links")
 	c.Assert(err, checker.IsNil)
diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go
index 479bf5f..9e6af66 100644
--- a/integration-cli/docker_api_info_test.go
+++ b/integration-cli/docker_api_info_test.go
@@ -18,6 +18,9 @@
 	stringsToCheck := []string{
 		"ID",
 		"Containers",
+		"ContainersRunning",
+		"ContainersPaused",
+		"ContainersStopped",
 		"Images",
 		"ExecutionDriver",
 		"LoggingDriver",
diff --git a/integration-cli/docker_cli_authz_unix_test.go b/integration-cli/docker_cli_authz_unix_test.go
index 92b4a19..71a64f3 100644
--- a/integration-cli/docker_cli_authz_unix_test.go
+++ b/integration-cli/docker_cli_authz_unix_test.go
@@ -168,7 +168,7 @@
 	c.Assert(s.d.StartWithBusybox(), check.IsNil)
 	// restart the daemon and enable the plugin, otherwise busybox loading
 	// is blocked by the plugin itself
-	c.Assert(s.d.Restart("--authz-plugin="+testAuthZPlugin), check.IsNil)
+	c.Assert(s.d.Restart("--authorization-plugin="+testAuthZPlugin), check.IsNil)
 
 	s.ctrl.reqRes.Allow = true
 	s.ctrl.resRes.Allow = true
@@ -189,7 +189,7 @@
 }
 
 func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) {
-	err := s.d.Start("--authz-plugin=" + testAuthZPlugin)
+	err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
 	c.Assert(err, check.IsNil)
 	s.ctrl.reqRes.Allow = false
 	s.ctrl.reqRes.Msg = unauthorizedMessage
@@ -205,7 +205,7 @@
 }
 
 func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) {
-	err := s.d.Start("--authz-plugin=" + testAuthZPlugin)
+	err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
 	c.Assert(err, check.IsNil)
 	s.ctrl.reqRes.Allow = true
 	s.ctrl.resRes.Allow = false
@@ -222,7 +222,7 @@
 }
 
 func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) {
-	err := s.d.Start("--authz-plugin=" + testAuthZPlugin)
+	err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
 	c.Assert(err, check.IsNil)
 	s.ctrl.reqRes.Allow = true
 	s.ctrl.resRes.Err = errorMessage
@@ -235,7 +235,7 @@
 }
 
 func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) {
-	err := s.d.Start("--authz-plugin=" + testAuthZPlugin)
+	err := s.d.Start("--authorization-plugin=" + testAuthZPlugin)
 	c.Assert(err, check.IsNil)
 	s.ctrl.reqRes.Err = errorMessage
 
@@ -247,7 +247,7 @@
 }
 
 func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) {
-	c.Assert(s.d.Start("--authz-plugin="+testAuthZPlugin, "--authz-plugin="+testAuthZPlugin), check.IsNil)
+	c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil)
 
 	s.ctrl.reqRes.Allow = true
 	s.ctrl.resRes.Allow = true
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index ba0a6bd..b60b24d 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -6170,8 +6170,8 @@
 	if err != nil {
 		c.Fatal(err)
 	}
-	if res != wdVal {
-		c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", wdVal, res)
+	if res != filepath.ToSlash(filepath.Clean(wdVal)) {
+		c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res)
 	}
 
 	err = inspectFieldAndMarshall(imgName, "Config.Env", &resArr)
diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go
index 446a28e..9cc0073 100644
--- a/integration-cli/docker_cli_by_digest_test.go
+++ b/integration-cli/docker_cli_by_digest_test.go
@@ -10,6 +10,7 @@
 
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/manifest/schema1"
+	"github.com/docker/distribution/manifest/schema2"
 	"github.com/docker/docker/pkg/integration/checker"
 	"github.com/docker/docker/pkg/stringutils"
 	"github.com/docker/engine-api/types"
@@ -56,7 +57,7 @@
 	return digest.Digest(pushDigest), nil
 }
 
-func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
+func testPullByTagDisplaysDigest(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	pushDigest, err := setupImage(c)
 	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
@@ -73,7 +74,15 @@
 	c.Assert(pushDigest.String(), checker.Equals, pullDigest)
 }
 
-func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
+func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
+	testPullByTagDisplaysDigest(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) {
+	testPullByTagDisplaysDigest(c)
+}
+
+func testPullByDigest(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	pushDigest, err := setupImage(c)
 	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
@@ -91,7 +100,15 @@
 	c.Assert(pushDigest.String(), checker.Equals, pullDigest)
 }
 
-func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
+func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) {
+	testPullByDigest(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) {
+	testPullByDigest(c)
+}
+
+func testPullByDigestNoFallback(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	// pull from the registry using the <name>@<digest> reference
 	imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName)
@@ -100,6 +117,14 @@
 	c.Assert(out, checker.Contains, "manifest unknown", check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image"))
 }
 
+func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) {
+	testPullByDigestNoFallback(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) {
+	testPullByDigestNoFallback(c)
+}
+
 func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) {
 	pushDigest, err := setupImage(c)
 	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
@@ -370,8 +395,38 @@
 	dockerCmd(c, "rmi", imageID)
 }
 
+func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) {
+	pushDigest, err := setupImage(c)
+	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
+
+	// pull from the registry using the <name>@<digest> reference
+	imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest)
+	dockerCmd(c, "pull", imageReference)
+
+	imageID, err := inspectField(imageReference, "Id")
+	c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id"))
+
+	repoTag := repoName + ":sometag"
+	repoTag2 := repoName + ":othertag"
+	dockerCmd(c, "tag", imageReference, repoTag)
+	dockerCmd(c, "tag", imageReference, repoTag2)
+
+	dockerCmd(c, "rmi", repoTag2)
+
+	// rmi should have deleted only repoTag2, because there's another tag
+	_, err = inspectField(repoTag, "Id")
+	c.Assert(err, checker.IsNil, check.Commentf("repoTag should not have been removed"))
+
+	dockerCmd(c, "rmi", repoTag)
+
+	// rmi should have deleted the tag, the digest reference, and the image itself
+	_, err = inspectField(imageID, "Id")
+	c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted"))
+}
+
 // TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
 // we have modified a manifest blob and its digest cannot be verified.
+// This is the schema2 version of the test.
 func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	manifestDigest, err := setupImage(c)
@@ -380,6 +435,46 @@
 	// Load the target manifest blob.
 	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
 
+	var imgManifest schema2.Manifest
+	err = json.Unmarshal(manifestBlob, &imgManifest)
+	c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob"))
+
+	// Change a layer in the manifest.
+	imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
+
+	// Move the existing data file aside, so that we can replace it with a
+	// malicious blob of data. NOTE: we defer the returned undo func.
+	undo := s.reg.tempMoveBlobData(c, manifestDigest)
+	defer undo()
+
+	alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", "   ")
+	c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON"))
+
+	s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob)
+
+	// Now try pulling that image by digest. We should get an error about
+	// digest verification for the manifest digest.
+
+	// Pull from the registry using the <name>@<digest> reference.
+	imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
+	out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
+	c.Assert(exitStatus, checker.Not(check.Equals), 0)
+
+	expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest)
+	c.Assert(out, checker.Contains, expectedErrorMsg)
+}
+
+// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when
+// we have modified a manifest blob and its digest cannot be verified.
+// This is the schema1 version of the test.
+func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	manifestDigest, err := setupImage(c)
+	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
+
+	// Load the target manifest blob.
+	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
+
 	var imgManifest schema1.Manifest
 	err = json.Unmarshal(manifestBlob, &imgManifest)
 	c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob"))
@@ -413,6 +508,7 @@
 
 // TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
 // we have modified a layer blob and its digest cannot be verified.
+// This is the schema2 version of the test.
 func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	manifestDigest, err := setupImage(c)
@@ -421,6 +517,49 @@
 	// Load the target manifest blob.
 	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
 
+	var imgManifest schema2.Manifest
+	err = json.Unmarshal(manifestBlob, &imgManifest)
+	c.Assert(err, checker.IsNil)
+
+	// Next, get the digest of one of the layers from the manifest.
+	targetLayerDigest := imgManifest.Layers[0].Digest
+
+	// Move the existing data file aside, so that we can replace it with a
+	// malicious blob of data. NOTE: we defer the returned undo func.
+	undo := s.reg.tempMoveBlobData(c, targetLayerDigest)
+	defer undo()
+
+	// Now make a fake data blob in this directory.
+	s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for."))
+
+	// Now try pulling that image by digest. We should get an error about
+	// digest verification for the target layer digest.
+
+	// Remove distribution cache to force a re-pull of the blobs
+	if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil {
+		c.Fatalf("error clearing distribution cache: %v", err)
+	}
+
+	// Pull from the registry using the <name>@<digest> reference.
+	imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest)
+	out, exitStatus, _ := dockerCmdWithError("pull", imageReference)
+	c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a zero exit status"))
+
+	expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest)
+	c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out))
+}
+
+// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when
+// we have modified a layer blob and its digest cannot be verified.
+// This is the schema1 version of the test.
+func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	manifestDigest, err := setupImage(c)
+	c.Assert(err, checker.IsNil)
+
+	// Load the target manifest blob.
+	manifestBlob := s.reg.readBlobContents(c, manifestDigest)
+
 	var imgManifest schema1.Manifest
 	err = json.Unmarshal(manifestBlob, &imgManifest)
 	c.Assert(err, checker.IsNil)
diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go
index 4cb1ed1..096074c 100644
--- a/integration-cli/docker_cli_create_test.go
+++ b/integration-cli/docker_cli_create_test.go
@@ -312,7 +312,7 @@
 	s.trustedCmd(createCmd)
 	out, _, err := runCommandWithOutput(createCmd)
 	c.Assert(err, check.Not(check.IsNil))
-	c.Assert(string(out), checker.Contains, "does not have trust data for", check.Commentf("Missing expected output on trusted create:\n%s", out))
+	c.Assert(string(out), checker.Contains, "trust data unavailable.  Has a notary repository been initialized?", check.Commentf("Missing expected output on trusted create:\n%s", out))
 
 }
 
@@ -402,7 +402,7 @@
 	s.trustedCmd(createCmd)
 	out, _, err = runCommandWithOutput(createCmd)
 	c.Assert(err, check.Not(check.IsNil))
-	c.Assert(string(out), checker.Contains, "failed to validate data with current trusted certificates", check.Commentf("Missing expected output on trusted push:\n%s", out))
+	c.Assert(string(out), checker.Contains, "valid signatures did not meet threshold", check.Commentf("Missing expected output on trusted push:\n%s", out))
 
 }
 
@@ -415,3 +415,11 @@
 	c.Assert(res, checker.Contains, "9")
 
 }
+
+func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	name := "foo"
+	dir := "/home/foo/bar"
+	dockerCmd(c, "create", "--name", name, "-w", dir, "busybox")
+	dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), "/tmp")
+}
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index 5dee35f..2fa39da 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -1439,6 +1439,29 @@
 	}
 }
 
+// TestHttpsRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints.
+// https://github.com/docker/docker/issues/19280
+func (s *DockerDaemonSuite) TestHttpsRun(c *check.C) {
+	const (
+		testDaemonHTTPSAddr = "tcp://localhost:4271"
+	)
+
+	if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem",
+		"--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil {
+		c.Fatalf("Could not start daemon with busybox: %v", err)
+	}
+
+	daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"}
+	out, err := s.d.CmdWithArgs(daemonArgs, "run", "busybox", "echo", "TLS response")
+	if err != nil {
+		c.Fatalf("Error Occurred: %s and output: %s", err, out)
+	}
+
+	if !strings.Contains(out, "TLS response") {
+		c.Fatalf("expected output to include `TLS response`, got %v", out)
+	}
+}
+
 // TestTlsVerify verifies that --tlsverify=false turns on tls
 func (s *DockerDaemonSuite) TestTlsVerify(c *check.C) {
 	out, err := exec.Command(dockerBinary, "daemon", "--tlsverify=false").CombinedOutput()
@@ -1960,3 +1983,104 @@
 	}
 	c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths))
 }
+
+func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) {
+	testRequires(c, DaemonIsLinux) // Windows does not support links
+	err := s.d.StartWithBusybox()
+	c.Assert(err, check.IsNil)
+
+	out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	c.Assert(s.d.Restart(), check.IsNil)
+
+	// should fail since test is not running yet
+	out, err = s.d.Cmd("start", "test2")
+	c.Assert(err, check.NotNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("start", "test")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	out, err = s.d.Cmd("start", "-a", "test2")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out))
+}
+
+func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) {
+	testRequires(c, DaemonIsLinux) // Windows does not support links
+	err := s.d.StartWithBusybox()
+	c.Assert(err, check.IsNil)
+
+	out, err := s.d.Cmd("create", "--name=test", "busybox")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	test2ID := strings.TrimSpace(out)
+
+	out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top")
+	test3ID := strings.TrimSpace(out)
+
+	c.Assert(s.d.Restart(), check.IsNil)
+
+	out, err = s.d.Cmd("create", "--name=test", "busybox")
+	c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name"))
+	// this one is no longer needed, removing simplifies the remainder of the test
+	out, err = s.d.Cmd("rm", "-f", "test")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("ps", "-a", "--no-trunc")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	lines := strings.Split(strings.TrimSpace(out), "\n")[1:]
+
+	test2validated := false
+	test3validated := false
+	for _, line := range lines {
+		fields := strings.Fields(line)
+		names := fields[len(fields)-1]
+		switch fields[0] {
+		case test2ID:
+			c.Assert(names, check.Equals, "test2,test3/abc")
+			test2validated = true
+		case test3ID:
+			c.Assert(names, check.Equals, "test3")
+			test3validated = true
+		}
+	}
+
+	c.Assert(test2validated, check.Equals, true)
+	c.Assert(test3validated, check.Equals, true)
+}
+
+// TestRunLinksChanged checks that creating a new container with the same name does not update links
+// this ensures that the old, pre gh#16032 functionality continues on
+func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) {
+	testRequires(c, DaemonIsLinux) // Windows does not support links
+	err := s.d.StartWithBusybox()
+	c.Assert(err, check.IsNil)
+
+	out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received")
+
+	out, err = s.d.Cmd("rm", "-f", "test")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top")
+	c.Assert(err, check.IsNil, check.Commentf(out))
+	out, err = s.d.Cmd("start", "-a", "test2")
+	c.Assert(err, check.NotNil, check.Commentf(out))
+	c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received")
+
+	err = s.d.Restart()
+	c.Assert(err, check.IsNil)
+	out, err = s.d.Cmd("start", "-a", "test2")
+	c.Assert(err, check.NotNil, check.Commentf(out))
+	c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received")
+}
diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go
index 7d9a902..c8ebfd3 100644
--- a/integration-cli/docker_cli_help_test.go
+++ b/integration-cli/docker_cli_help_test.go
@@ -133,7 +133,7 @@
 			// Check each line for lots of stuff
 			lines := strings.Split(out, "\n")
 			for _, line := range lines {
-				c.Assert(len(line), checker.LessOrEqualThan, 103, check.Commentf("Help for %q is too long:\n%s", cmd, line))
+				c.Assert(len(line), checker.LessOrEqualThan, 107, check.Commentf("Help for %q is too long:\n%s", cmd, line))
 
 				if scanForHome && strings.Contains(line, `"`+home) {
 					c.Fatalf("Help for %q should use ~ instead of %q on:\n%s",
diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go
index c44b9e1..dbceddf 100644
--- a/integration-cli/docker_cli_images_test.go
+++ b/integration-cli/docker_cli_images_test.go
@@ -176,6 +176,15 @@
 	out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true")
 	// Expect one dangling image
 	c.Assert(strings.Count(out, imageID), checker.Equals, 1)
+
+	out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false")
+	//dangling=false would not include dangling images
+	c.Assert(out, checker.Not(checker.Contains), imageID)
+
+	out, _ = dockerCmd(c, "images")
+	//docker images still include dangling images
+	c.Assert(out, checker.Contains, imageID)
+
 }
 
 func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) {
diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go
index 6d83d33..7d2b40d 100644
--- a/integration-cli/docker_cli_info_test.go
+++ b/integration-cli/docker_cli_info_test.go
@@ -3,6 +3,7 @@
 import (
 	"fmt"
 	"net"
+	"strings"
 
 	"github.com/docker/docker/pkg/integration/checker"
 	"github.com/docker/docker/utils"
@@ -17,6 +18,9 @@
 	stringsToCheck := []string{
 		"ID:",
 		"Containers:",
+		" Running:",
+		" Paused:",
+		" Stopped:",
 		"Images:",
 		"Execution Driver:",
 		"OSType:",
@@ -101,3 +105,44 @@
 	c.Assert(out, checker.Contains, fmt.Sprintf("Cluster store: %s\n", discoveryBackend))
 	c.Assert(out, checker.Contains, fmt.Sprintf("Cluster advertise: %s:2375\n", ip.String()))
 }
+
+func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+
+	dockerCmd(c, "run", "-d", "busybox", "top")
+	out, _ := dockerCmd(c, "info")
+	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0))
+}
+
+func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
+	cleanedContainerID := strings.TrimSpace(out)
+
+	dockerCmd(c, "pause", cleanedContainerID)
+
+	out, _ = dockerCmd(c, "info")
+	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0))
+}
+
+func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
+	cleanedContainerID := strings.TrimSpace(out)
+
+	dockerCmd(c, "stop", cleanedContainerID)
+
+	out, _ = dockerCmd(c, "info")
+	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1))
+}
diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go
index 4945323..c39b2a0 100644
--- a/integration-cli/docker_cli_inspect_test.go
+++ b/integration-cli/docker_cli_inspect_test.go
@@ -388,3 +388,30 @@
 	c.Assert(err, check.IsNil)
 	c.Assert(out, checker.Contains, "test comment")
 }
+
+func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+
+	contName := "test1"
+	dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top")
+	netOut, _ := dockerCmd(c, "network", "inspect", "--format='{{.ID}}'", "bridge")
+	out, err := inspectField(contName, "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "bridge")
+	out, err = inspectField(contName, "NetworkSettings.Networks.bridge.NetworkID")
+	c.Assert(err, checker.IsNil)
+	c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut))
+}
+
+func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+
+	netOut, _ := dockerCmd(c, "network", "create", "net1")
+	dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top")
+	out, err := inspectField("container1", "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "net1")
+	out, err = inspectField("container1", "NetworkSettings.Networks.net1.NetworkID")
+	c.Assert(err, checker.IsNil)
+	c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut))
+}
diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go
index 1b1e374..d09c2f2 100644
--- a/integration-cli/docker_cli_network_unix_test.go
+++ b/integration-cli/docker_cli_network_unix_test.go
@@ -282,11 +282,11 @@
 	defer func() {
 		dockerCmd(c, "network", "rm", "dev")
 	}()
-	containerID := strings.TrimSpace(out)
+	networkID := strings.TrimSpace(out)
 
 	// filter with partial ID and partial name
 	// only show 'bridge' and 'dev' network
-	out, _ = dockerCmd(c, "network", "ls", "-f", "id="+containerID[0:5], "-f", "name=dge")
+	out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5], "-f", "name=dge")
 	assertNwList(c, out, []string{"dev", "bridge"})
 
 	// only show built-in network (bridge, none, host)
@@ -324,10 +324,11 @@
 	dockerCmd(c, "network", "create", "testDelMulti2")
 	assertNwIsAvailable(c, "testDelMulti2")
 	out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top")
-	waitRun(strings.TrimSpace(out))
+	containerID := strings.TrimSpace(out)
+	waitRun(containerID)
 
 	// delete three networks at the same time, since testDelMulti2
-	// contains active container, it's deletion should fail.
+	// contains active container, its deletion should fail.
 	out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2")
 	// err should not be nil due to deleting testDelMulti2 failed.
 	c.Assert(err, checker.NotNil, check.Commentf("out: %s", out))
@@ -335,7 +336,7 @@
 	c.Assert(out, checker.Contains, "has active endpoints")
 	assertNwNotAvailable(c, "testDelMulti0")
 	assertNwNotAvailable(c, "testDelMulti1")
-	// testDelMulti2 can't be deleted, so it should exists
+	// testDelMulti2 can't be deleted, so it should exist
 	assertNwIsAvailable(c, "testDelMulti2")
 }
 
@@ -448,10 +449,21 @@
 	c.Assert(nr.Name, checker.Equals, "test")
 	c.Assert(len(nr.Containers), checker.Equals, 0)
 
-	// check if network connect fails for inactive containers
-	dockerCmd(c, "stop", containerID)
-	_, _, err = dockerCmdWithError("network", "connect", "test", containerID)
-	c.Assert(err, check.NotNil)
+	// run another container
+	out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top")
+	c.Assert(waitRun("test2"), check.IsNil)
+	containerID = strings.TrimSpace(out)
+
+	nr = getNwResource(c, "test")
+	c.Assert(nr.Name, checker.Equals, "test")
+	c.Assert(len(nr.Containers), checker.Equals, 1)
+
+	// force disconnect the container to the test network
+	dockerCmd(c, "network", "disconnect", "-f", "test", containerID)
+
+	nr = getNwResource(c, "test")
+	c.Assert(nr.Name, checker.Equals, "test")
+	c.Assert(len(nr.Containers), checker.Equals, 0)
 
 	dockerCmd(c, "network", "rm", "test")
 	assertNwNotAvailable(c, "test")
@@ -512,8 +524,58 @@
 	assertNwNotAvailable(c, "br0")
 }
 
-func (s *DockerNetworkSuite) TestDockerNetworkInspect(c *check.C) {
-	// if unspecified, network gateway will be selected from inside preferred pool
+func (s *DockerNetworkSuite) TestDockerNetworkIpamOptions(c *check.C) {
+	// Create a bridge network using custom ipam driver and options
+	dockerCmd(c, "network", "create", "--ipam-driver", dummyIpamDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0")
+	assertNwIsAvailable(c, "br0")
+
+	// Verify expected network ipam options
+	nr := getNetworkResource(c, "br0")
+	opts := nr.IPAM.Options
+	c.Assert(opts["opt1"], checker.Equals, "drv1")
+	c.Assert(opts["opt2"], checker.Equals, "drv2")
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) {
+	nr := getNetworkResource(c, "none")
+	c.Assert(nr.Driver, checker.Equals, "null")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 0)
+
+	nr = getNetworkResource(c, "host")
+	c.Assert(nr.Driver, checker.Equals, "host")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 0)
+
+	nr = getNetworkResource(c, "bridge")
+	c.Assert(nr.Driver, checker.Equals, "bridge")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 1)
+	c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil)
+	c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil)
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) {
+	// if unspecified, network subnet will be selected from inside preferred pool
+	dockerCmd(c, "network", "create", "test01")
+	assertNwIsAvailable(c, "test01")
+
+	nr := getNetworkResource(c, "test01")
+	c.Assert(nr.Driver, checker.Equals, "bridge")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 1)
+	c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil)
+	c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil)
+
+	dockerCmd(c, "network", "rm", "test01")
+	assertNwNotAvailable(c, "test01")
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) {
 	dockerCmd(c, "network", "create", "--driver=bridge", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0")
 	assertNwIsAvailable(c, "br0")
 
@@ -526,6 +588,7 @@
 	c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24")
 	c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254")
 	dockerCmd(c, "network", "rm", "br0")
+	assertNwNotAvailable(c, "test01")
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C) {
@@ -549,6 +612,7 @@
 	_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1")
 	c.Assert(err, check.NotNil)
 	dockerCmd(c, "network", "rm", "test0")
+	assertNwNotAvailable(c, "test0")
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) {
@@ -561,6 +625,7 @@
 	c.Assert(opts["opt1"], checker.Equals, "drv1")
 	c.Assert(opts["opt2"], checker.Equals, "drv2")
 	dockerCmd(c, "network", "rm", "testopt")
+	assertNwNotAvailable(c, "testopt")
 
 }
 
@@ -795,7 +860,7 @@
 	out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
-	// Attach the container to other three networks
+	// Attach the container to other networks
 	for _, nw := range nws {
 		out, err = d.Cmd("network", "create", nw)
 		c.Assert(err, checker.IsNil, check.Commentf(out))
@@ -805,7 +870,7 @@
 }
 
 func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) {
-	// Verify container is connected to all three networks
+	// Verify container is connected to all the networks
 	for _, nw := range nws {
 		out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName)
 		c.Assert(err, checker.IsNil, check.Commentf(out))
@@ -855,7 +920,8 @@
 
 func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) {
 	out, _ := dockerCmd(c, "network", "create", "one")
-	dockerCmd(c, "run", "-d", "--net", strings.TrimSpace(out), "busybox", "top")
+	containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top")
+	c.Assert(err, checker.IsNil, check.Commentf(containerOut))
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) {
@@ -929,7 +995,7 @@
 	c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network"))
 }
 
-func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMulipleNetworks(c *check.C) {
+func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) {
 	dockerCmd(c, "network", "create", "test")
 	dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top")
 	c.Assert(waitRun("foo"), checker.IsNil)
@@ -938,7 +1004,44 @@
 	networks, err := inspectField("foo", "NetworkSettings.Networks")
 	c.Assert(err, checker.IsNil)
 	c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network"))
-	c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' netwokr"))
+	c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network"))
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) {
+	dockerCmd(c, "network", "create", "test")
+	dockerCmd(c, "create", "--name=foo", "busybox", "top")
+	dockerCmd(c, "network", "connect", "test", "foo")
+	networks, err := inspectField("foo", "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network"))
+
+	// Restart docker daemon to test the config has persisted to disk
+	s.d.Restart()
+	networks, err = inspectField("foo", "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network"))
+
+	// start the container and test if we can ping it from another container in the same network
+	dockerCmd(c, "start", "foo")
+	c.Assert(waitRun("foo"), checker.IsNil)
+	ip, err := inspectField("foo", "NetworkSettings.Networks.test.IPAddress")
+	ip = strings.TrimSpace(ip)
+	dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip))
+
+	dockerCmd(c, "stop", "foo")
+
+	// Test disconnect
+	dockerCmd(c, "network", "disconnect", "test", "foo")
+	networks, err = inspectField("foo", "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network"))
+
+	// Restart docker daemon to test the config has persisted to disk
+	s.d.Restart()
+	networks, err = inspectField("foo", "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network"))
+
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) {
@@ -1008,3 +1111,124 @@
 	out, _ = dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.GlobalIPv6Address }}'", nwname), cName)
 	c.Assert(strings.TrimSpace(out), check.Equals, ipv6)
 }
+
+func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) {
+	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "foo1")
+	dockerCmd(c, "network", "create", "-d", "bridge", "foo2")
+
+	dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	// run a container in user-defined network udlinkNet with a link for an existing container
+	// and a link for a container that doesnt exist
+	dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1",
+		"--link=third:bar", "busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its alias FirstInFoo1 must succeed
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1")
+	c.Assert(err, check.IsNil)
+
+	// connect first container to foo2 network
+	dockerCmd(c, "network", "connect", "foo2", "first")
+	// connect second container to foo2 network with a different alias for first container
+	dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second")
+
+	// ping the new alias in network foo2
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2")
+	c.Assert(err, check.IsNil)
+
+	// disconnect first container from foo1 network
+	dockerCmd(c, "network", "disconnect", "foo1", "first")
+
+	// link in foo1 network must fail
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1")
+	c.Assert(err, check.NotNil)
+
+	// link in foo2 network must succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2")
+	c.Assert(err, check.IsNil)
+}
+
+// #19100 This is a deprecated feature test, it should be remove in Docker 1.12
+func (s *DockerNetworkSuite) TestDockerNetworkStartAPIWithHostconfig(c *check.C) {
+	netName := "test"
+	conName := "foo"
+	dockerCmd(c, "network", "create", netName)
+	dockerCmd(c, "create", "--name", conName, "busybox", "top")
+
+	config := map[string]interface{}{
+		"HostConfig": map[string]interface{}{
+			"NetworkMode": netName,
+		},
+	}
+	_, _, err := sockRequest("POST", "/containers/"+conName+"/start", config)
+	c.Assert(err, checker.IsNil)
+	c.Assert(waitRun(conName), checker.IsNil)
+	networks, err := inspectField(conName, "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName)))
+	c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network"))
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) {
+	netWorkName1 := "test1"
+	netWorkName2 := "test2"
+	containerName := "foo"
+
+	dockerCmd(c, "network", "create", netWorkName1)
+	dockerCmd(c, "network", "create", netWorkName2)
+	dockerCmd(c, "create", "--name", containerName, "busybox", "top")
+	dockerCmd(c, "network", "connect", netWorkName1, containerName)
+	dockerCmd(c, "network", "connect", netWorkName2, containerName)
+	dockerCmd(c, "network", "disconnect", "bridge", containerName)
+
+	dockerCmd(c, "start", containerName)
+	c.Assert(waitRun(containerName), checker.IsNil)
+	networks, err := inspectField(containerName, "NetworkSettings.Networks")
+	c.Assert(err, checker.IsNil)
+	c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1)))
+	c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2)))
+	c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network"))
+}
+
+func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) {
+	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "net1")
+	dockerCmd(c, "network", "create", "-d", "bridge", "net2")
+
+	dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping first container and its alias
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+
+	// connect first container to net2 network
+	dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first")
+	// connect second container to foo2 network with a different alias for first container
+	dockerCmd(c, "network", "connect", "net2", "second")
+
+	// ping the new alias in network foo2
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
+	c.Assert(err, check.IsNil)
+
+	// disconnect first container from net1 network
+	dockerCmd(c, "network", "disconnect", "net1", "first")
+
+	// ping to net1 scoped alias "foo" must fail
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.NotNil)
+
+	// ping to net2 scoped alias "bar" must still succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
+	c.Assert(err, check.IsNil)
+}
diff --git a/integration-cli/docker_cli_pull_local_test.go b/integration-cli/docker_cli_pull_local_test.go
index 8e7da2f..1037951 100644
--- a/integration-cli/docker_cli_pull_local_test.go
+++ b/integration-cli/docker_cli_pull_local_test.go
@@ -1,19 +1,29 @@
 package main
 
 import (
+	"encoding/json"
 	"fmt"
+	"io/ioutil"
+	"os"
 	"os/exec"
+	"path/filepath"
+	"runtime"
 	"strings"
 
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest"
+	"github.com/docker/distribution/manifest/manifestlist"
+	"github.com/docker/distribution/manifest/schema2"
 	"github.com/docker/docker/pkg/integration/checker"
 	"github.com/go-check/check"
 )
 
-// TestPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other
+// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other
 // tags for the same image) are not also pulled down.
 //
 // Ref: docker/docker#8141
-func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
+func testPullImageWithAliases(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 
 	repos := []string{}
@@ -40,8 +50,16 @@
 	}
 }
 
-// TestConcurrentPullWholeRepo pulls the same repo concurrently.
-func (s *DockerRegistrySuite) TestConcurrentPullWholeRepo(c *check.C) {
+func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
+	testPullImageWithAliases(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) {
+	testPullImageWithAliases(c)
+}
+
+// testConcurrentPullWholeRepo pulls the same repo concurrently.
+func testConcurrentPullWholeRepo(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 
 	repos := []string{}
@@ -89,8 +107,16 @@
 	}
 }
 
-// TestConcurrentFailingPull tries a concurrent pull that doesn't succeed.
-func (s *DockerRegistrySuite) TestConcurrentFailingPull(c *check.C) {
+func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
+	testConcurrentPullWholeRepo(c)
+}
+
+func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
+	testConcurrentPullWholeRepo(c)
+}
+
+// testConcurrentFailingPull tries a concurrent pull that doesn't succeed.
+func testConcurrentFailingPull(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 
 	// Run multiple pulls concurrently
@@ -112,9 +138,17 @@
 	}
 }
 
-// TestConcurrentPullMultipleTags pulls multiple tags from the same repo
+func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) {
+	testConcurrentFailingPull(c)
+}
+
+func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) {
+	testConcurrentFailingPull(c)
+}
+
+// testConcurrentPullMultipleTags pulls multiple tags from the same repo
 // concurrently.
-func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
+func testConcurrentPullMultipleTags(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 
 	repos := []string{}
@@ -161,9 +195,17 @@
 	}
 }
 
-// TestPullIDStability verifies that pushing an image and pulling it back
+func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
+	testConcurrentPullMultipleTags(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
+	testConcurrentPullMultipleTags(c)
+}
+
+// testPullIDStability verifies that pushing an image and pulling it back
 // preserves the image ID.
-func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
+func testPullIDStability(c *check.C) {
 	derivedImage := privateRegistryURL + "/dockercli/id-stability"
 	baseImage := "busybox"
 
@@ -229,6 +271,14 @@
 	}
 }
 
+func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
+	testPullIDStability(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) {
+	testPullIDStability(c)
+}
+
 // TestPullFallbackOn404 tries to pull a nonexistent manifest and confirms that
 // the pull falls back to the v1 protocol.
 //
@@ -240,3 +290,85 @@
 
 	c.Assert(out, checker.Contains, "v1 ping attempt")
 }
+
+func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) {
+	pushDigest, err := setupImage(c)
+	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
+
+	// Inject a manifest list into the registry
+	manifestList := &manifestlist.ManifestList{
+		Versioned: manifest.Versioned{
+			SchemaVersion: 2,
+			MediaType:     manifestlist.MediaTypeManifestList,
+		},
+		Manifests: []manifestlist.ManifestDescriptor{
+			{
+				Descriptor: distribution.Descriptor{
+					Digest:    "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
+					Size:      3253,
+					MediaType: schema2.MediaTypeManifest,
+				},
+				Platform: manifestlist.PlatformSpec{
+					Architecture: "bogus_arch",
+					OS:           "bogus_os",
+				},
+			},
+			{
+				Descriptor: distribution.Descriptor{
+					Digest:    pushDigest,
+					Size:      3253,
+					MediaType: schema2.MediaTypeManifest,
+				},
+				Platform: manifestlist.PlatformSpec{
+					Architecture: runtime.GOARCH,
+					OS:           runtime.GOOS,
+				},
+			},
+		},
+	}
+
+	manifestListJSON, err := json.MarshalIndent(manifestList, "", "   ")
+	c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list"))
+
+	manifestListDigest := digest.FromBytes(manifestListJSON)
+	hexDigest := manifestListDigest.Hex()
+
+	registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2")
+
+	// Write manifest list to blob store
+	blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest)
+	err = os.MkdirAll(blobDir, 0755)
+	c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir"))
+	blobPath := filepath.Join(blobDir, "data")
+	err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644)
+	c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list"))
+
+	// Add to revision store
+	revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest)
+	err = os.Mkdir(revisionDir, 0755)
+	c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir"))
+	revisionPath := filepath.Join(revisionDir, "link")
+	err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644)
+	c.Assert(err, checker.IsNil, check.Commentf("error writing revision link"))
+
+	// Update tag
+	tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link")
+	err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644)
+	c.Assert(err, checker.IsNil, check.Commentf("error writing tag link"))
+
+	// Verify that the image can be pulled through the manifest list.
+	out, _ := dockerCmd(c, "pull", repoName)
+
+	// The pull output includes "Digest: <digest>", so find that
+	matches := digestRegex.FindStringSubmatch(out)
+	c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out))
+	pullDigest := matches[1]
+
+	// Make sure the pushed and pull digests match
+	c.Assert(manifestListDigest.String(), checker.Equals, pullDigest)
+
+	// Was the image actually created?
+	dockerCmd(c, "inspect", repoName)
+
+	dockerCmd(c, "rmi", repoName)
+}
diff --git a/integration-cli/docker_cli_pull_trusted_test.go b/integration-cli/docker_cli_pull_trusted_test.go
index 0241081..fbd50b5 100644
--- a/integration-cli/docker_cli_pull_trusted_test.go
+++ b/integration-cli/docker_cli_pull_trusted_test.go
@@ -4,6 +4,7 @@
 	"fmt"
 	"io/ioutil"
 	"os/exec"
+	"strings"
 	"time"
 
 	"github.com/docker/docker/pkg/integration/checker"
@@ -58,7 +59,7 @@
 	out, _, err := runCommandWithOutput(pullCmd)
 
 	c.Assert(err, check.NotNil, check.Commentf(out))
-	c.Assert(string(out), checker.Contains, "Error: remote trust data repository not initialized", check.Commentf(out))
+	c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out))
 }
 
 func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) {
@@ -140,7 +141,7 @@
 	out, _, err = runCommandWithOutput(pullCmd)
 
 	c.Assert(err, check.NotNil, check.Commentf(out))
-	c.Assert(string(out), checker.Contains, "failed to validate data with current trusted certificates", check.Commentf(out))
+	c.Assert(string(out), checker.Contains, "valid signatures did not meet threshold", check.Commentf(out))
 }
 
 func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) {
@@ -200,3 +201,55 @@
 	c.Assert(err, check.IsNil, check.Commentf(out))
 	c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out))
 }
+
+func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) {
+	repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete")
+	// tag the image and upload it to the private registry
+	_, err := buildImage(repoName, `
+                    FROM busybox
+                    CMD echo trustedpulldelete
+                `, true)
+
+	pushCmd := exec.Command(dockerBinary, "push", repoName)
+	s.trustedCmd(pushCmd)
+	out, _, err := runCommandWithOutput(pushCmd)
+	if err != nil {
+		c.Fatalf("Error running trusted push: %s\n%s", err, out)
+	}
+	if !strings.Contains(string(out), "Signing and pushing trust metadata") {
+		c.Fatalf("Missing expected output on trusted push:\n%s", out)
+	}
+
+	if out, status := dockerCmd(c, "rmi", repoName); status != 0 {
+		c.Fatalf("Error removing image %q\n%s", repoName, out)
+	}
+
+	// Try pull
+	pullCmd := exec.Command(dockerBinary, "pull", repoName)
+	s.trustedCmd(pullCmd)
+	out, _, err = runCommandWithOutput(pullCmd)
+
+	c.Assert(err, check.IsNil, check.Commentf(out))
+
+	matches := digestRegex.FindStringSubmatch(out)
+	c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out))
+	pullDigest := matches[1]
+
+	imageID, err := inspectField(repoName, "Id")
+	c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id"))
+
+	imageByDigest := repoName + "@" + pullDigest
+	byDigestID, err := inspectField(imageByDigest, "Id")
+	c.Assert(err, checker.IsNil, check.Commentf("error inspecting image id"))
+
+	c.Assert(byDigestID, checker.Equals, imageID)
+
+	// rmi of tag should also remove the digest reference
+	dockerCmd(c, "rmi", repoName)
+
+	_, err = inspectField(imageByDigest, "Id")
+	c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed"))
+
+	_, err = inspectField(imageID, "Id")
+	c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted"))
+}
diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
index 333ea03..c100772 100644
--- a/integration-cli/docker_cli_push_test.go
+++ b/integration-cli/docker_cli_push_test.go
@@ -16,7 +16,7 @@
 )
 
 // Pushing an image to a private registry.
-func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
+func testPushBusyboxImage(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 	// tag the image to upload it to the private registry
 	dockerCmd(c, "tag", "busybox", repoName)
@@ -24,13 +24,21 @@
 	dockerCmd(c, "push", repoName)
 }
 
+func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) {
+	testPushBusyboxImage(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) {
+	testPushBusyboxImage(c)
+}
+
 // pushing an image without a prefix should throw an error
 func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) {
 	out, _, err := dockerCmdWithError("push", "busybox")
 	c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out))
 }
 
-func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
+func testPushUntagged(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 	expected := "Repository does not exist"
 
@@ -39,7 +47,15 @@
 	c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed"))
 }
 
-func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
+func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) {
+	testPushUntagged(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) {
+	testPushUntagged(c)
+}
+
+func testPushBadTag(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL)
 	expected := "does not exist"
 
@@ -48,7 +64,15 @@
 	c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed"))
 }
 
-func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
+func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) {
+	testPushBadTag(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) {
+	testPushBadTag(c)
+}
+
+func testPushMultipleTags(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
 	repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL)
 	repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL)
@@ -85,7 +109,15 @@
 	}
 }
 
-func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
+func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) {
+	testPushMultipleTags(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) {
+	testPushMultipleTags(c)
+}
+
+func testPushEmptyLayer(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL)
 	emptyTarball, err := ioutil.TempFile("", "empty_tarball")
 	c.Assert(err, check.IsNil, check.Commentf("Unable to create test file"))
@@ -107,6 +139,66 @@
 	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out))
 }
 
+func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) {
+	testPushEmptyLayer(c)
+}
+
+func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) {
+	testPushEmptyLayer(c)
+}
+
+func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
+	sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", sourceRepoName)
+	// push the image to the registry
+	out1, _, err := dockerCmdWithError("push", sourceRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1))
+	// ensure that none of the layers were mounted from another repository during push
+	c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false)
+
+	destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
+	// retag the image to upload the same layers to another repo in the same registry
+	dockerCmd(c, "tag", "busybox", destRepoName)
+	// push the image to the registry
+	out2, _, err := dockerCmdWithError("push", destRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2))
+	// ensure that layers were mounted from the first repo during push
+	c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true)
+
+	// ensure that we can pull and run the cross-repo-pushed repository
+	dockerCmd(c, "rmi", destRepoName)
+	dockerCmd(c, "pull", destRepoName)
+	out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
+	c.Assert(out3, check.Equals, "hello world")
+}
+
+func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) {
+	sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", sourceRepoName)
+	// push the image to the registry
+	out1, _, err := dockerCmdWithError("push", sourceRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1))
+	// ensure that none of the layers were mounted from another repository during push
+	c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false)
+
+	destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
+	// retag the image to upload the same layers to another repo in the same registry
+	dockerCmd(c, "tag", "busybox", destRepoName)
+	// push the image to the registry
+	out2, _, err := dockerCmdWithError("push", destRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2))
+	// schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen
+	c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, false)
+
+	// ensure that we can pull and run the second pushed repository
+	dockerCmd(c, "rmi", destRepoName)
+	dockerCmd(c, "pull", destRepoName)
+	out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
+	c.Assert(out3, check.Equals, "hello world")
+}
+
 func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
 	// tag the image and upload it to the private registry
diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go
index dc57a94..2bc28fd 100644
--- a/integration-cli/docker_cli_restart_test.go
+++ b/integration-cli/docker_cli_restart_test.go
@@ -153,3 +153,46 @@
 	err = waitInspect(id, "{{.State.Status}}", "running", 5*time.Second)
 	c.Assert(err, check.IsNil)
 }
+
+func (s *DockerSuite) TestUserDefinedNetworkWithRestartPolicy(c *check.C) {
+	testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "udNet")
+
+	dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second",
+		"--link=first:foo", "busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its alias foo must succeed
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+
+	// Now kill the second container and let the restart policy kick in
+	pidStr, err := inspectField("second", "State.Pid")
+	c.Assert(err, check.IsNil)
+
+	pid, err := strconv.Atoi(pidStr)
+	c.Assert(err, check.IsNil)
+
+	p, err := os.FindProcess(pid)
+	c.Assert(err, check.IsNil)
+	c.Assert(p, check.NotNil)
+
+	err = p.Kill()
+	c.Assert(err, check.IsNil)
+
+	err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second)
+	c.Assert(err, check.IsNil)
+
+	err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second)
+
+	// ping to first and its alias foo must still succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+}
diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go
index 2f456e8..923c9da 100644
--- a/integration-cli/docker_cli_rmi_test.go
+++ b/integration-cli/docker_cli_rmi_test.go
@@ -337,3 +337,20 @@
 
 	dockerCmd(c, "rmi", imageID)
 }
+
+// #18873
+func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	dockerCmd(c, "create", "busybox")
+
+	imgID, err := inspectField("busybox:latest", "Id")
+	c.Assert(err, checker.IsNil)
+
+	_, _, err = dockerCmdWithError("rmi", imgID[:12])
+	c.Assert(err, checker.NotNil)
+
+	// check that tag was not removed
+	imgID2, err := inspectField("busybox:latest", "Id")
+	c.Assert(err, checker.IsNil)
+	c.Assert(imgID, checker.Equals, imgID2)
+}
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index bf056b9..f34c235 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -199,6 +199,111 @@
 	}
 }
 
+func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) {
+	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
+
+	dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	// run a container in user-defined network udlinkNet with a link for an existing container
+	// and a link for a container that doesnt exist
+	dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo",
+		"--link=third:bar", "busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its alias foo must succeed
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+
+	// ping to third and its alias must fail
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third")
+	c.Assert(err, check.NotNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
+	c.Assert(err, check.NotNil)
+
+	// start third container now
+	dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top")
+	c.Assert(waitRun("third"), check.IsNil)
+
+	// ping to third and its alias must succeed now
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar")
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) {
+	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet")
+
+	dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo",
+		"busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its alias foo must succeed
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+
+	// Restart first container
+	dockerCmd(c, "restart", "first")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	// ping to first and its alias foo must still succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+
+	// Restart second container
+	dockerCmd(c, "restart", "second")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its alias foo must still succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+}
+
+func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) {
+	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "net1")
+
+	dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its network-scoped aliases
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2")
+	c.Assert(err, check.IsNil)
+
+	// Restart first container
+	dockerCmd(c, "restart", "first")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	// ping to first and its network-scoped aliases must succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2")
+	c.Assert(err, check.IsNil)
+}
+
 // Issue 9677.
 func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) {
 	out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true")
@@ -3198,7 +3303,7 @@
 		c.Fatalf("Expected to fail on this run due to different remote data: %s\n%s", err, out)
 	}
 
-	if !strings.Contains(string(out), "failed to validate data with current trusted certificates") {
+	if !strings.Contains(string(out), "valid signatures did not meet threshold") {
 		c.Fatalf("Missing expected output on trusted push:\n%s", out)
 	}
 }
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index 1f98cf3..15f0d52 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -887,3 +887,18 @@
 		c.Fatalf("expected hello, got: %s, %v", out, err)
 	}
 }
+
+func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) {
+	testRequires(c, SameHostDaemon, Apparmor)
+
+	// running w seccomp unconfined tests the apparmor profile
+	runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp:unconfined", "debian:jessie", "chmod", "777", "/proc/1/cgroup")
+	if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) {
+		c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err)
+	}
+
+	runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp:unconfined", "debian:jessie", "chmod", "777", "/proc/1/attr/current")
+	if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) {
+		c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err)
+	}
+}
diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go
index 55bd129..4734c89 100644
--- a/integration-cli/docker_utils.go
+++ b/integration-cli/docker_utils.go
@@ -1554,9 +1554,9 @@
 	return dt
 }
 
-func setupRegistry(c *check.C) *testRegistryV2 {
+func setupRegistry(c *check.C, schema1 bool) *testRegistryV2 {
 	testRequires(c, RegistryHosting)
-	reg, err := newTestRegistryV2(c)
+	reg, err := newTestRegistryV2(c, schema1)
 	c.Assert(err, check.IsNil)
 
 	// Wait for registry to be ready to serve requests.
diff --git a/integration-cli/registry.go b/integration-cli/registry.go
index 35e1b4e..26aebdf 100644
--- a/integration-cli/registry.go
+++ b/integration-cli/registry.go
@@ -12,14 +12,17 @@
 	"github.com/go-check/check"
 )
 
-const v2binary = "registry-v2"
+const (
+	v2binary        = "registry-v2"
+	v2binarySchema1 = "registry-v2-schema1"
+)
 
 type testRegistryV2 struct {
 	cmd *exec.Cmd
 	dir string
 }
 
-func newTestRegistryV2(c *check.C) (*testRegistryV2, error) {
+func newTestRegistryV2(c *check.C, schema1 bool) (*testRegistryV2, error) {
 	template := `version: 0.1
 loglevel: debug
 storage:
@@ -41,7 +44,11 @@
 		return nil, err
 	}
 
-	cmd := exec.Command(v2binary, confPath)
+	binary := v2binary
+	if schema1 {
+		binary = v2binarySchema1
+	}
+	cmd := exec.Command(binary, confPath)
 	if err := cmd.Start(); err != nil {
 		os.RemoveAll(tmp)
 		if os.IsNotExist(err) {
diff --git a/integration-cli/requirements_unix.go b/integration-cli/requirements_unix.go
index 5110b9b..e71ffd1 100644
--- a/integration-cli/requirements_unix.go
+++ b/integration-cli/requirements_unix.go
@@ -77,7 +77,7 @@
 	}
 	seccompEnabled = testRequirement{
 		func() bool {
-			return supportsSeccomp
+			return supportsSeccomp && SysInfo.Seccomp
 		},
 		"Test requires that seccomp support be enabled in the daemon.",
 	}
diff --git a/layer/filestore_test.go b/layer/filestore_test.go
index 4dae3f8..7b55e4d 100644
--- a/layer/filestore_test.go
+++ b/layer/filestore_test.go
@@ -15,12 +15,8 @@
 
 func randomLayerID(seed int64) ChainID {
 	r := rand.New(rand.NewSource(seed))
-	dgst, err := digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))
-	if err != nil {
-		panic(err)
-	}
 
-	return ChainID(dgst)
+	return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63()))))
 }
 
 func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) {
diff --git a/layer/layer.go b/layer/layer.go
index 0c6d60c..ef2ac7a 100644
--- a/layer/layer.go
+++ b/layer/layer.go
@@ -233,12 +233,7 @@
 		return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...)
 	}
 	// H = "H(n-1) SHA256(n)"
-	dgst, err := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
-	if err != nil {
-		// Digest calculation is not expected to throw an error,
-		// any error at this point is a program error
-		panic(err)
-	}
+	dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0])))
 	return createChainIDFromParent(ChainID(dgst), dgsts[1:]...)
 }
 
diff --git a/layer/layer_test.go b/layer/layer_test.go
index 5a96b7c..a0ecb53 100644
--- a/layer/layer_test.go
+++ b/layer/layer_test.go
@@ -548,10 +548,7 @@
 }
 
 func assertLayerDiff(t *testing.T, expected []byte, layer Layer) {
-	expectedDigest, err := digest.FromBytes(expected)
-	if err != nil {
-		t.Fatal(err)
-	}
+	expectedDigest := digest.FromBytes(expected)
 
 	if digest.Digest(layer.DiffID()) != expectedDigest {
 		t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected)
@@ -573,10 +570,7 @@
 		t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected))
 	}
 
-	actualDigest, err := digest.FromBytes(actual)
-	if err != nil {
-		t.Fatal(err)
-	}
+	actualDigest := digest.FromBytes(actual)
 
 	if actualDigest != expectedDigest {
 		logByteDiff(t, actual, expected)
diff --git a/layer/layer_windows.go b/layer/layer_windows.go
index e6396fa..e20311a 100644
--- a/layer/layer_windows.go
+++ b/layer/layer_windows.go
@@ -37,10 +37,7 @@
 
 func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) {
 	var err error // this is used for cleanup in existingLayer case
-	diffID, err := digest.FromBytes([]byte(graphID))
-	if err != nil {
-		return nil, err
-	}
+	diffID := digest.FromBytes([]byte(graphID))
 
 	// Create new roLayer
 	layer := &roLayer{
diff --git a/man/docker-create.1.md b/man/docker-create.1.md
index f0499e6..08074ac 100644
--- a/man/docker-create.1.md
+++ b/man/docker-create.1.md
@@ -35,6 +35,8 @@
 [**-h**|**--hostname**[=*HOSTNAME*]]
 [**--help**]
 [**-i**|**--interactive**]
+[**--ip**[=*IPv4-ADDRESS*]]
+[**--ip6**[=*IPv6-ADDRESS*]]
 [**--ipc**[=*IPC*]]
 [**--isolation**[=*default*]]
 [**--kernel-memory**[=*KERNEL-MEMORY*]]
@@ -50,6 +52,7 @@
 [**--memory-swappiness**[=*MEMORY-SWAPPINESS*]]
 [**--name**[=*NAME*]]
 [**--net**[=*"bridge"*]]
+[**--net-alias**[=*[]*]]
 [**--oom-kill-disable**]
 [**--oom-score-adj**[=*0*]]
 [**-P**|**--publish-all**]
@@ -174,6 +177,16 @@
 **-i**, **--interactive**=*true*|*false*
    Keep STDIN open even if not attached. The default is *false*.
 
+**--ip**=""
+   Sets the container's interface IPv4 address (e.g. 172.23.0.9)
+
+   It can only be used in conjunction with **--net** for user-defined networks
+
+**--ip6**=""
+   Sets the container's interface IPv6 address (e.g. 2001:db8::1b99)
+
+   It can only be used in conjunction with **--net** for user-defined networks
+
 **--ipc**=""
    Default is to create a private IPC namespace (POSIX SysV IPC) for the container
                                'container:<name|id>': reuses another container shared memory, semaphores and message queues
@@ -253,6 +266,9 @@
                                'host': use the Docker host network stack.  Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
                                '<network-name>|<network-id>': connect to a user-defined network
 
+**--net-alias**=[]
+   Add network-scoped alias for the container
+
 **--oom-kill-disable**=*true*|*false*
 	Whether to disable OOM Killer for the container or not.
 
diff --git a/man/docker-daemon.8.md b/man/docker-daemon.8.md
index 8001c72..02adaed 100644
--- a/man/docker-daemon.8.md
+++ b/man/docker-daemon.8.md
@@ -7,13 +7,14 @@
 # SYNOPSIS
 **docker daemon**
 [**--api-cors-header**=[=*API-CORS-HEADER*]]
-[**--authz-plugin**[=*[]*]]
+[**--authorization-plugin**[=*[]*]]
 [**-b**|**--bridge**[=*BRIDGE*]]
 [**--bip**[=*BIP*]]
 [**--cgroup-parent**[=*[]*]]
 [**--cluster-store**[=*[]*]]
 [**--cluster-advertise**[=*[]*]]
 [**--cluster-store-opt**[=*map[]*]]
+[**--config-file**[=*/etc/docker/daemon.json*]]
 [**-D**|**--debug**]
 [**--default-gateway**[=*DEFAULT-GATEWAY*]]
 [**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]]
@@ -53,6 +54,7 @@
 [**--tlskey**[=*~/.docker/key.pem*]]
 [**--tlsverify**]
 [**--userland-proxy**[=*true*]]
+[**--userns-remap**[=*default*]]
 
 # DESCRIPTION
 **docker** has two distinct functions. It is used for starting the Docker
@@ -72,7 +74,7 @@
 **--api-cors-header**=""
   Set CORS headers in the remote API. Default is cors disabled. Give urls like "http://foo, http://bar, ...". Give "*" to allow all.
 
-**--authz-plugin**=""
+**--authorization-plugin**=""
   Set authorization plugins to load
 
 **-b**, **--bridge**=""
@@ -95,6 +97,9 @@
 **--cluster-store-opt**=""
   Specifies options for the Key/Value store.
 
+**--config-file**="/etc/docker/daemon.json"
+  Specifies the JSON file path to load the configuration from.
+
 **-D**, **--debug**=*true*|*false*
   Enable debug mode. Default is false.
 
@@ -223,6 +228,9 @@
 **--userland-proxy**=*true*|*false*
     Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true.
 
+**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid*
+    Enable user namespaces for containers on the daemon. Specifying "default" will cause a new user and group to be created to handle UID and GID range remapping for the user namespace mappings used for contained processes. Specifying a user (or uid) and optionally a group (or gid) will cause the daemon to lookup the user and group's subordinate ID ranges for use as the user namespace mappings for contained processes.
+
 # STORAGE DRIVER OPTIONS
 
 Docker uses storage backends (known as "graphdrivers" in the Docker
@@ -267,12 +275,22 @@
 #### dm.basesize
 
 Specifies the size to use when creating the base device, which limits
-the size of images and containers. The default value is 100G. Note,
-thin devices are inherently "sparse", so a 100G device which is mostly
-empty doesn't use 100 GB of space on the pool. However, the filesystem
+the size of images and containers. The default value is 10G. Note,
+thin devices are inherently "sparse", so a 10G device which is mostly
+empty doesn't use 10 GB of space on the pool. However, the filesystem
 will use more space for base images the larger the device
 is.
 
+The base device size can be increased at daemon restart which will allow
+all future images and containers (based on those new images) to be of the 
+new base device size.
+
+Example use: `docker daemon --storage-opt dm.basesize=50G` 
+
+This will increase the base device size to 50G. The Docker daemon will throw an 
+error if existing base device size is larger than 50G. A user can use 
+this option to expand the base device size however shrinking is not permitted.
+
 This value affects the system-wide "base" empty filesystem that may already
 be initialized and inherited by pulled images. Typically, a change to this
 value requires additional steps to take effect:
@@ -469,10 +487,10 @@
 Docker's access authorization can be extended by authorization plugins that your
 organization can purchase or build themselves. You can install one or more
 authorization plugins when you start the Docker `daemon` using the
-`--authz-plugin=PLUGIN_ID` option.
+`--authorization-plugin=PLUGIN_ID` option.
 
 ```bash
-docker daemon --authz-plugin=plugin1 --authz-plugin=plugin2,...
+docker daemon --authorization-plugin=plugin1 --authorization-plugin=plugin2,...
 ```
 
 The `PLUGIN_ID` value is either the plugin's name or a path to its specification
diff --git a/man/docker-info.1.md b/man/docker-info.1.md
index ae04e49..0dc46c1 100644
--- a/man/docker-info.1.md
+++ b/man/docker-info.1.md
@@ -32,6 +32,9 @@
 
     # docker info
     Containers: 14
+     Running: 3
+     Paused: 1
+     Stopped: 10
     Images: 52
     Server Version: 1.9.0
     Storage Driver: aufs
diff --git a/man/docker-inspect.1.md b/man/docker-inspect.1.md
index 9babd56..05c6078 100644
--- a/man/docker-inspect.1.md
+++ b/man/docker-inspect.1.md
@@ -72,22 +72,36 @@
     "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4",
     "NetworkSettings": {
         "Bridge": "",
-        "EndpointID": "",
-        "Gateway": "",
-        "GlobalIPv6Address": "",
-        "GlobalIPv6PrefixLen": 0,
+        "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f",
         "HairpinMode": false,
-        "IPAddress": "",
-        "IPPrefixLen": 0,
-        "IPv6Gateway": "",
         "LinkLocalIPv6Address": "",
         "LinkLocalIPv6PrefixLen": 0,
-        "MacAddress": "",
-        "NetworkID": "",
-        "Ports": null,
-        "SandboxKey": "",
+        "Ports": {},
+        "SandboxKey": "/var/run/docker/netns/6b4851d1903e",
         "SecondaryIPAddresses": null,
-        "SecondaryIPv6Addresses": null
+        "SecondaryIPv6Addresses": null,
+        "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d",
+        "Gateway": "172.17.0.1",
+        "GlobalIPv6Address": "",
+        "GlobalIPv6PrefixLen": 0,
+        "IPAddress": "172.17.0.2",
+        "IPPrefixLen": 16,
+        "IPv6Gateway": "",
+        "MacAddress": "02:42:ac:12:00:02",
+        "Networks": {
+            "bridge": {
+                "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812",
+                "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d",
+                "Gateway": "172.17.0.1",
+                "IPAddress": "172.17.0.2",
+                "IPPrefixLen": 16,
+                "IPv6Gateway": "",
+                "GlobalIPv6Address": "",
+                "GlobalIPv6PrefixLen": 0,
+                "MacAddress": "02:42:ac:12:00:02"
+            }
+        }
+
     },
     "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf",
     "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname",
diff --git a/man/docker-network-connect.1.md b/man/docker-network-connect.1.md
index 6a77bfe..0fc4d4c 100644
--- a/man/docker-network-connect.1.md
+++ b/man/docker-network-connect.1.md
@@ -11,7 +11,7 @@
 
 # DESCRIPTION
 
-Connects a running container to a network. You can connect a container by name
+Connects a container to a network. You can connect a container by name
 or by ID. Once connected, the container can communicate with other containers in
 the same network.
 
@@ -22,14 +22,26 @@
 You can also use the `docker run --net=<network-name>` option to start a container and immediately connect it to a network.
 
 ```bash
-$ docker run -itd --net=multi-host-network busybox
+$ docker run -itd --net=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox
 ```
 
 You can pause, restart, and stop containers that are connected to a network.
-Paused containers remain connected and a revealed by a `network inspect`. When
-the container is stopped, it does not appear on the network until you restart
-it. The container's IP address is not guaranteed to remain the same when a
-stopped container rejoins the network.
+Paused containers remain connected and can be revealed by a `network inspect`.
+When the container is stopped, it does not appear on the network until you restart
+it. If specified, the container's IP address(es) will be reapplied (if still available)
+when a stopped container rejoins the network. One way to guarantee that the container
+will be assigned the same IP addresses when it rejoins the network after a stop
+or a disconnect, is to specify the `--ip-range` when creating the network, and choose
+the static IP address(es) from outside the range. This will ensure that the IP address
+will not be given to other dynamic containers while this container is not on the network.
+
+```bash
+$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network
+```
+
+```bash
+$ docker network connect --ip 172.20.128.2 multi-host-network container2
+```
 
 To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network.
 
diff --git a/man/docker-network-create.1.md b/man/docker-network-create.1.md
index 4d0782b..c560f7a 100644
--- a/man/docker-network-create.1.md
+++ b/man/docker-network-create.1.md
@@ -10,8 +10,10 @@
 [**-d**|**--driver**=*DRIVER*]
 [**--gateway**=*[]*]
 [**--help**]
+[**--internal**]
 [**--ip-range**=*[]*]
 [**--ipam-driver**=*default*]
+[**--ipam-opt**=*map[]*]
 [**-o**|**--opt**=*map[]*]
 [**--subnet**=*[]*]
 NETWORK-NAME
@@ -120,6 +122,11 @@
 ```
 Be sure that your subnetworks do not overlap. If they do, the network create fails and Engine returns an error.
 
+### Network internal mode
+
+By default, when you connect a container to an `overlay` network, Docker also connects a bridge network to it to provide external connectivity.
+If you want to create an externally isolated `overlay` network, you can specify the `--internal` option.
+
 # OPTIONS
 **--aux-address**=map[]
   Auxiliary ipv4 or ipv6 addresses used by network driver
@@ -133,12 +140,18 @@
 **--help**
   Print usage
 
+**--internal**
+  Restricts external access to the network
+
 **--ip-range**=[]
   Allocate container ip from a sub-range
 
 **--ipam-driver**=*default*
   IP Address Management Driver
 
+**--ipam-opt**=map[]
+  Set custom IPAM plugin options
+
 **-o**, **--opt**=map[]
   Set custom network plugin options
 
diff --git a/man/docker-network-disconnect.1.md b/man/docker-network-disconnect.1.md
index 81b0387..09bcac5 100644
--- a/man/docker-network-disconnect.1.md
+++ b/man/docker-network-disconnect.1.md
@@ -7,11 +7,12 @@
 # SYNOPSIS
 **docker network disconnect**
 [**--help**]
+[**--force**]
 NETWORK CONTAINER
 
 # DESCRIPTION
 
-Disconnects a container from a network. The container must be running to disconnect it from the network.
+Disconnects a container from a network.
 
 ```bash
   $ docker network disconnect multi-host-network container1
@@ -25,6 +26,9 @@
 **CONTAINER**
     Specify container name
 
+**--force**
+  Force the container to disconnect from a network
+
 **--help**
   Print usage statement
 
diff --git a/man/docker-network-inspect.1.md b/man/docker-network-inspect.1.md
index 889967a..ceba368 100644
--- a/man/docker-network-inspect.1.md
+++ b/man/docker-network-inspect.1.md
@@ -12,7 +12,7 @@
 
 # DESCRIPTION
 
-Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to a network:
+Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network:
 
 ```bash
 $ sudo docker run -itd --name=container1 busybox
@@ -73,6 +73,33 @@
 ]
 ```
 
+Returns the information about the user-defined network:
+
+```bash
+$ docker network create simple-network
+69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
+$ docker network inspect simple-network
+[
+    {
+        "Name": "simple-network",
+        "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
+        "Scope": "local",
+        "Driver": "bridge",
+        "IPAM": {
+            "Driver": "default",
+            "Config": [
+                {
+                    "Subnet": "172.22.0.0/16",
+                    "Gateway": "172.22.0.1/16"
+                }
+            ]
+        },
+        "Containers": {},
+        "Options": {}
+    }
+]
+```
+
 # OPTIONS
 **-f**, **--format**=""
   Format the output using the given go template.
diff --git a/man/docker-run.1.md b/man/docker-run.1.md
index 71790d0..ea9b0b8 100644
--- a/man/docker-run.1.md
+++ b/man/docker-run.1.md
@@ -37,6 +37,8 @@
 [**-h**|**--hostname**[=*HOSTNAME*]]
 [**--help**]
 [**-i**|**--interactive**]
+[**--ip**[=*IPv4-ADDRESS*]]
+[**--ip6**[=*IPv6-ADDRESS*]]
 [**--ipc**[=*IPC*]]
 [**--isolation**[=*default*]]
 [**--kernel-memory**[=*KERNEL-MEMORY*]]
@@ -52,6 +54,7 @@
 [**--memory-swappiness**[=*MEMORY-SWAPPINESS*]]
 [**--name**[=*NAME*]]
 [**--net**[=*"bridge"*]]
+[**--net-alias**[=*[]*]]
 [**--oom-kill-disable**]
 [**--oom-score-adj**[=*0*]]
 [**-P**|**--publish-all**]
@@ -274,6 +277,16 @@
 
    When set to true, keep stdin open even if not attached. The default is false.
 
+**--ip**=""
+   Sets the container's interface IPv4 address (e.g. 172.23.0.9)
+
+   It can only be used in conjunction with **--net** for user-defined networks
+
+**--ip6**=""
+   Sets the container's interface IPv6 address (e.g. 2001:db8::1b99)
+
+   It can only be used in conjunction with **--net** for user-defined networks
+
 **--ipc**=""
    Default is to create a private IPC namespace (POSIX SysV IPC) for the container
                                'container:<name|id>': reuses another container shared memory, semaphores and message queues
@@ -371,6 +384,9 @@
                                'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure.
                                '<network-name>|<network-id>': connect to a user-defined network
 
+**--net-alias**=[]
+   Add network-scoped alias for the container
+
 **--oom-kill-disable**=*true*|*false*
    Whether to disable OOM Killer for the container or not.
 
diff --git a/migrate/v1/migratev1.go b/migrate/v1/migratev1.go
index 77507c3..9243c5a 100644
--- a/migrate/v1/migratev1.go
+++ b/migrate/v1/migratev1.go
@@ -476,8 +476,8 @@
 	if err == nil { // best effort
 		dgst, err := digest.ParseDigest(string(checksum))
 		if err == nil {
-			blobSumService := metadata.NewBlobSumService(ms)
-			blobSumService.Add(layer.DiffID(), dgst)
+			V2MetadataService := metadata.NewV2MetadataService(ms)
+			V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst})
 		}
 	}
 	_, err = ls.Release(layer)
diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go
index 5fe2663..6e8af7f 100644
--- a/migrate/v1/migratev1_test.go
+++ b/migrate/v1/migratev1_test.go
@@ -210,19 +210,19 @@
 		t.Fatalf("invalid register count: expected %q, got %q", expected, actual)
 	}
 
-	blobSumService := metadata.NewBlobSumService(ms)
-	blobsums, err := blobSumService.GetBlobSums(layer.EmptyLayer.DiffID())
+	v2MetadataService := metadata.NewV2MetadataService(ms)
+	receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	expectedBlobsums := []digest.Digest{
-		"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57",
-		"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+	expectedMetadata := []metadata.V2Metadata{
+		{Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")},
+		{Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
 	}
 
-	if !reflect.DeepEqual(expectedBlobsums, blobsums) {
-		t.Fatalf("invalid blobsums: expected %q, got %q", expectedBlobsums, blobsums)
+	if !reflect.DeepEqual(expectedMetadata, receivedMetadata) {
+		t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata)
 	}
 
 }
diff --git a/opts/opts.go b/opts/opts.go
index abc9ab8..05aadbe 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -100,6 +100,35 @@
 	return len((*opts.values))
 }
 
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+	Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+	name string
+	ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+	return &NamedListOpts{
+		name:     name,
+		ListOpts: *NewListOptsRef(values, validator),
+	}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+	return o.name
+}
+
 //MapOpts holds a map of values and a validation function.
 type MapOpts struct {
 	values    map[string]string
@@ -145,6 +174,29 @@
 	}
 }
 
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+	name string
+	MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+	return &NamedMapOpts{
+		name:    name,
+		MapOpts: *NewMapOpts(values, validator),
+	}
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+	return o.name
+}
+
 // ValidatorFctType defines a validator function that returns a validated string and/or an error.
 type ValidatorFctType func(val string) (string, error)
 
diff --git a/opts/opts_test.go b/opts/opts_test.go
index da86b21..9f41e47 100644
--- a/opts/opts_test.go
+++ b/opts/opts_test.go
@@ -198,3 +198,35 @@
 	}
 	return "", fmt.Errorf("invalid key %s", vals[0])
 }
+
+func TestNamedListOpts(t *testing.T) {
+	var v []string
+	o := NewNamedListOptsRef("foo-name", &v, nil)
+
+	o.Set("foo")
+	if o.String() != "[foo]" {
+		t.Errorf("%s != [foo]", o.String())
+	}
+	if o.Name() != "foo-name" {
+		t.Errorf("%s != foo-name", o.Name())
+	}
+	if len(v) != 1 {
+		t.Errorf("expected foo to be in the values, got %v", v)
+	}
+}
+
+func TestNamedMapOpts(t *testing.T) {
+	tmpMap := make(map[string]string)
+	o := NewNamedMapOpts("max-name", tmpMap, nil)
+
+	o.Set("max-size=1")
+	if o.String() != "map[max-size:1]" {
+		t.Errorf("%s != [map[max-size:1]", o.String())
+	}
+	if o.Name() != "max-name" {
+		t.Errorf("%s != max-name", o.Name())
+	}
+	if _, exist := tmpMap["max-size"]; !exist {
+		t.Errorf("expected map-size to be in the values, got %v", tmpMap)
+	}
+}
diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go
index c5559a3..c0ef387 100644
--- a/pkg/authorization/authz.go
+++ b/pkg/authorization/authz.go
@@ -1,16 +1,19 @@
 package authorization
 
 import (
+	"bufio"
 	"bytes"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"net/http"
 	"strings"
 
 	"github.com/Sirupsen/logrus"
+	"github.com/docker/docker/pkg/ioutils"
 )
 
+const maxBodySize = 1048576 // 1MB
+
 // NewCtx creates new authZ context, it is used to store authorization information related to a specific docker
 // REST http session
 // A context provides two method:
@@ -52,18 +55,12 @@
 func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error {
 	var body []byte
 	if sendBody(ctx.requestURI, r.Header) {
-		var (
-			err         error
-			drainedBody io.ReadCloser
-		)
-		drainedBody, r.Body, err = drainBody(r.Body)
-		if err != nil {
-			return err
-		}
-		defer drainedBody.Close()
-		body, err = ioutil.ReadAll(drainedBody)
-		if err != nil {
-			return err
+		if r.ContentLength < maxBodySize {
+			var err error
+			body, r.Body, err = drainBody(r.Body)
+			if err != nil {
+				return err
+			}
 		}
 	}
 
@@ -126,15 +123,21 @@
 
 // drainBody dump the body, it reads the body data into memory and
 // see go sources /go/src/net/http/httputil/dump.go
-func drainBody(b io.ReadCloser) (io.ReadCloser, io.ReadCloser, error) {
-	var buf bytes.Buffer
-	if _, err := buf.ReadFrom(b); err != nil {
+func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) {
+	bufReader := bufio.NewReaderSize(body, maxBodySize)
+	newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() })
+
+	data, err := bufReader.Peek(maxBodySize)
+	if err != io.EOF {
+		// This means the request is larger than our max
+		if err == bufio.ErrBufferFull {
+			return nil, newBody, nil
+		}
+		// This means we had an error reading
 		return nil, nil, err
 	}
-	if err := b.Close(); err != nil {
-		return nil, nil, err
-	}
-	return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil
+
+	return data, newBody, nil
 }
 
 // sendBody returns true when request/response body should be sent to AuthZPlugin
diff --git a/pkg/discovery/backends.go b/pkg/discovery/backends.go
index 875a26c..f150115 100644
--- a/pkg/discovery/backends.go
+++ b/pkg/discovery/backends.go
@@ -12,12 +12,8 @@
 var (
 	// Backends is a global map of discovery backends indexed by their
 	// associated scheme.
-	backends map[string]Backend
-)
-
-func init() {
 	backends = make(map[string]Backend)
-}
+)
 
 // Register makes a discovery backend available by the provided scheme.
 // If Register is called twice with the same scheme an error is returned.
@@ -42,7 +38,7 @@
 
 // ParseAdvertise parses the --cluster-advertise daemon config which accepts
 // <ip-address>:<port> or <interface-name>:<port>
-func ParseAdvertise(store, advertise string) (string, error) {
+func ParseAdvertise(advertise string) (string, error) {
 	var (
 		iface *net.Interface
 		addrs []net.Addr
diff --git a/pkg/discovery/memory/memory.go b/pkg/discovery/memory/memory.go
new file mode 100644
index 0000000..777a9a1
--- /dev/null
+++ b/pkg/discovery/memory/memory.go
@@ -0,0 +1,83 @@
+package memory
+
+import (
+	"time"
+
+	"github.com/docker/docker/pkg/discovery"
+)
+
+// Discovery implements a descovery backend that keeps
+// data in memory.
+type Discovery struct {
+	heartbeat time.Duration
+	values    []string
+}
+
+func init() {
+	Init()
+}
+
+// Init registers the memory backend on demand.
+func Init() {
+	discovery.Register("memory", &Discovery{})
+}
+
+// Initialize sets the heartbeat for the memory backend.
+func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error {
+	s.heartbeat = heartbeat
+	s.values = make([]string, 0)
+	return nil
+}
+
+// Watch sends periodic discovery updates to a channel.
+func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+	ch := make(chan discovery.Entries)
+	errCh := make(chan error)
+	ticker := time.NewTicker(s.heartbeat)
+
+	go func() {
+		defer close(errCh)
+		defer close(ch)
+
+		// Send the initial entries if available.
+		var currentEntries discovery.Entries
+		if len(s.values) > 0 {
+			var err error
+			currentEntries, err = discovery.CreateEntries(s.values)
+			if err != nil {
+				errCh <- err
+			} else {
+				ch <- currentEntries
+			}
+		}
+
+		// Periodically send updates.
+		for {
+			select {
+			case <-ticker.C:
+				newEntries, err := discovery.CreateEntries(s.values)
+				if err != nil {
+					errCh <- err
+					continue
+				}
+
+				// Check if the file has really changed.
+				if !newEntries.Equals(currentEntries) {
+					ch <- newEntries
+				}
+				currentEntries = newEntries
+			case <-stopCh:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	return ch, errCh
+}
+
+// Register adds a new address to the discovery.
+func (s *Discovery) Register(addr string) error {
+	s.values = append(s.values, addr)
+	return nil
+}
diff --git a/pkg/discovery/memory/memory_test.go b/pkg/discovery/memory/memory_test.go
new file mode 100644
index 0000000..c2da0a0
--- /dev/null
+++ b/pkg/discovery/memory/memory_test.go
@@ -0,0 +1,48 @@
+package memory
+
+import (
+	"testing"
+
+	"github.com/docker/docker/pkg/discovery"
+	"github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type discoverySuite struct{}
+
+var _ = check.Suite(&discoverySuite{})
+
+func (s *discoverySuite) TestWatch(c *check.C) {
+	d := &Discovery{}
+	d.Initialize("foo", 1000, 0, nil)
+	stopCh := make(chan struct{})
+	ch, errCh := d.Watch(stopCh)
+
+	// We have to drain the error channel otherwise Watch will get stuck.
+	go func() {
+		for range errCh {
+		}
+	}()
+
+	expected := discovery.Entries{
+		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+	}
+
+	c.Assert(d.Register("1.1.1.1:1111"), check.IsNil)
+	c.Assert(<-ch, check.DeepEquals, expected)
+
+	expected = discovery.Entries{
+		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+		&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
+	}
+
+	c.Assert(d.Register("2.2.2.2:2222"), check.IsNil)
+	c.Assert(<-ch, check.DeepEquals, expected)
+
+	// Stop and make sure it closes all channels.
+	close(stopCh)
+	c.Assert(<-ch, check.IsNil)
+	c.Assert(<-errCh, check.IsNil)
+}
diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go
index db3535b..58cc401 100644
--- a/pkg/pidfile/pidfile.go
+++ b/pkg/pidfile/pidfile.go
@@ -9,6 +9,7 @@
 	"os"
 	"path/filepath"
 	"strconv"
+	"strings"
 )
 
 // PIDFile is a file used to store the process ID of a running process.
@@ -17,9 +18,10 @@
 }
 
 func checkPIDFileAlreadyExists(path string) error {
-	if pidString, err := ioutil.ReadFile(path); err == nil {
-		if pid, err := strconv.Atoi(string(pidString)); err == nil {
-			if _, err := os.Stat(filepath.Join("/proc", string(pid))); err == nil {
+	if pidByte, err := ioutil.ReadFile(path); err == nil {
+		pidString := strings.TrimSpace(string(pidByte))
+		if pid, err := strconv.Atoi(pidString); err == nil {
+			if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil {
 				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path)
 			}
 		}
diff --git a/pkg/registrar/registrar.go b/pkg/registrar/registrar.go
new file mode 100644
index 0000000..8910197
--- /dev/null
+++ b/pkg/registrar/registrar.go
@@ -0,0 +1,127 @@
+// Package registrar provides name registration. It reserves a name to a given key.
+package registrar
+
+import (
+	"errors"
+	"sync"
+)
+
+var (
+	// ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved
+	ErrNameReserved = errors.New("name is reserved")
+	// ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved
+	ErrNameNotReserved = errors.New("name is not reserved")
+	// ErrNoSuchKey is returned when trying to find the names for a key which is not known
+	ErrNoSuchKey = errors.New("provided key does not exist")
+)
+
+// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registred to
+// Names must be unique.
+// Registrar is safe for concurrent access.
+type Registrar struct {
+	idx   map[string][]string
+	names map[string]string
+	mu    sync.Mutex
+}
+
+// NewRegistrar creates a new Registrar with the an empty index
+func NewRegistrar() *Registrar {
+	return &Registrar{
+		idx:   make(map[string][]string),
+		names: make(map[string]string),
+	}
+}
+
+// Reserve registers a key to a name
+// Reserve is idempotent
+// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved`
+// A name reservation is globally unique
+func (r *Registrar) Reserve(name, key string) error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	if k, exists := r.names[name]; exists {
+		if k != key {
+			return ErrNameReserved
+		}
+		return nil
+	}
+
+	r.idx[key] = append(r.idx[key], name)
+	r.names[name] = key
+	return nil
+}
+
+// Release releases the reserved name
+// Once released, a name can be reserved again
+func (r *Registrar) Release(name string) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	key, exists := r.names[name]
+	if !exists {
+		return
+	}
+
+	for i, n := range r.idx[key] {
+		if n != name {
+			continue
+		}
+		r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...)
+		break
+	}
+
+	delete(r.names, name)
+
+	if len(r.idx[key]) == 0 {
+		delete(r.idx, key)
+	}
+}
+
+// Delete removes all reservations for the passed in key.
+// All names reserved to this key are released.
+func (r *Registrar) Delete(key string) {
+	r.mu.Lock()
+	for _, name := range r.idx[key] {
+		delete(r.names, name)
+	}
+	delete(r.idx, key)
+	r.mu.Unlock()
+}
+
+// GetNames lists all the reserved names for the given key
+func (r *Registrar) GetNames(key string) ([]string, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	names, exists := r.idx[key]
+	if !exists {
+		return nil, ErrNoSuchKey
+	}
+	return names, nil
+}
+
+// Get returns the key that the passed in name is reserved to
+func (r *Registrar) Get(name string) (string, error) {
+	r.mu.Lock()
+	key, exists := r.names[name]
+	r.mu.Unlock()
+
+	if !exists {
+		return "", ErrNameNotReserved
+	}
+	return key, nil
+}
+
+// GetAll returns all registered names
+func (r *Registrar) GetAll() map[string][]string {
+	out := make(map[string][]string)
+
+	r.mu.Lock()
+	// copy index into out
+	for id, names := range r.idx {
+		out[id] = names
+	}
+	r.mu.Unlock()
+	return out
+}
diff --git a/pkg/registrar/registrar_test.go b/pkg/registrar/registrar_test.go
new file mode 100644
index 0000000..0c1ef31
--- /dev/null
+++ b/pkg/registrar/registrar_test.go
@@ -0,0 +1,119 @@
+package registrar
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestReserve(t *testing.T) {
+	r := NewRegistrar()
+
+	obj := "test1"
+	if err := r.Reserve("test", obj); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := r.Reserve("test", obj); err != nil {
+		t.Fatal(err)
+	}
+
+	obj2 := "test2"
+	err := r.Reserve("test", obj2)
+	if err == nil {
+		t.Fatalf("expected error when reserving an already reserved name to another object")
+	}
+	if err != ErrNameReserved {
+		t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name")
+	}
+}
+
+func TestRelease(t *testing.T) {
+	r := NewRegistrar()
+	obj := "testing"
+
+	if err := r.Reserve("test", obj); err != nil {
+		t.Fatal(err)
+	}
+	r.Release("test")
+	r.Release("test") // Ensure there is no panic here
+
+	if err := r.Reserve("test", obj); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestGetNames(t *testing.T) {
+	r := NewRegistrar()
+	obj := "testing"
+	names := []string{"test1", "test2"}
+
+	for _, name := range names {
+		if err := r.Reserve(name, obj); err != nil {
+			t.Fatal(err)
+		}
+	}
+	r.Reserve("test3", "other")
+
+	names2, err := r.GetNames(obj)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if !reflect.DeepEqual(names, names2) {
+		t.Fatalf("Exepected: %v, Got: %v", names, names2)
+	}
+}
+
+func TestDelete(t *testing.T) {
+	r := NewRegistrar()
+	obj := "testing"
+	names := []string{"test1", "test2"}
+	for _, name := range names {
+		if err := r.Reserve(name, obj); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	r.Reserve("test3", "other")
+	r.Delete(obj)
+
+	_, err := r.GetNames(obj)
+	if err == nil {
+		t.Fatal("expected error getting names for deleted key")
+	}
+
+	if err != ErrNoSuchKey {
+		t.Fatal("expected `ErrNoSuchKey`")
+	}
+}
+
+func TestGet(t *testing.T) {
+	r := NewRegistrar()
+	obj := "testing"
+	name := "test"
+
+	_, err := r.Get(name)
+	if err == nil {
+		t.Fatal("expected error when key does not exist")
+	}
+	if err != ErrNameNotReserved {
+		t.Fatal(err)
+	}
+
+	if err := r.Reserve(name, obj); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err = r.Get(name); err != nil {
+		t.Fatal(err)
+	}
+
+	r.Delete(obj)
+	_, err = r.Get(name)
+	if err == nil {
+		t.Fatal("expected error when key does not exist")
+	}
+	if err != ErrNameNotReserved {
+		t.Fatal(err)
+	}
+}
diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go
index 8ec1ceb..285b3ba 100644
--- a/pkg/sysinfo/sysinfo.go
+++ b/pkg/sysinfo/sysinfo.go
@@ -7,6 +7,8 @@
 type SysInfo struct {
 	// Whether the kernel supports AppArmor or not
 	AppArmor bool
+	// Whether the kernel supports Seccomp or not
+	Seccomp bool
 
 	cgroupMemInfo
 	cgroupCPUInfo
diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go
index ef3410c..59d5379 100644
--- a/pkg/sysinfo/sysinfo_linux.go
+++ b/pkg/sysinfo/sysinfo_linux.go
@@ -5,11 +5,17 @@
 	"os"
 	"path"
 	"strings"
+	"syscall"
 
 	"github.com/Sirupsen/logrus"
 	"github.com/opencontainers/runc/libcontainer/cgroups"
 )
 
+const (
+	// SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER.
+	SeccompModeFilter = uintptr(2)
+)
+
 // New returns a new SysInfo, using the filesystem to detect which features
 // the kernel supports. If `quiet` is `false` warnings are printed in logs
 // whenever an error occurs or misconfigurations are present.
@@ -32,6 +38,14 @@
 		sysInfo.AppArmor = true
 	}
 
+	// Check if Seccomp is supported, via CONFIG_SECCOMP.
+	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL {
+		// Make sure the kernel has CONFIG_SECCOMP_FILTER.
+		if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL {
+			sysInfo.Seccomp = true
+		}
+	}
+
 	return sysInfo
 }
 
diff --git a/runconfig/errors.go b/runconfig/errors.go
index 34b8aec..9f0c6b8 100644
--- a/runconfig/errors.go
+++ b/runconfig/errors.go
@@ -33,4 +33,6 @@
 	ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only")
 	// ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and preferred ip address
 	ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets")
+	// ErrUnsupportedNetworkAndAlias conflict between network mode and alias
+	ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks")
 )
diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go
index 7875d82..e536313 100644
--- a/runconfig/hostconfig_unix.go
+++ b/runconfig/hostconfig_unix.go
@@ -48,10 +48,6 @@
 		return ErrConflictContainerNetworkAndLinks
 	}
 
-	if hc.NetworkMode.IsUserDefined() && len(hc.Links) > 0 {
-		return ErrConflictUserDefinedNetworkAndLinks
-	}
-
 	if (hc.NetworkMode.IsHost() || hc.NetworkMode.IsContainer()) && len(hc.DNS) > 0 {
 		return ErrConflictNetworkAndDNS
 	}
diff --git a/runconfig/opts/parse.go b/runconfig/opts/parse.go
index b27b9b5..f716022 100644
--- a/runconfig/opts/parse.go
+++ b/runconfig/opts/parse.go
@@ -1,7 +1,10 @@
 package opts
 
 import (
+	"bytes"
+	"encoding/json"
 	"fmt"
+	"io/ioutil"
 	"path"
 	"strconv"
 	"strings"
@@ -30,6 +33,7 @@
 		flDeviceReadBps     = NewThrottledeviceOpt(ValidateThrottleBpsDevice)
 		flDeviceWriteBps    = NewThrottledeviceOpt(ValidateThrottleBpsDevice)
 		flLinks             = opts.NewListOpts(ValidateLink)
+		flAliases           = opts.NewListOpts(nil)
 		flDeviceReadIOps    = NewThrottledeviceOpt(ValidateThrottleIOpsDevice)
 		flDeviceWriteIOps   = NewThrottledeviceOpt(ValidateThrottleIOpsDevice)
 		flEnv               = opts.NewListOpts(ValidateEnv)
@@ -100,6 +104,7 @@
 	cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume")
 	cmd.Var(&flTmpfs, []string{"-tmpfs"}, "Mount a tmpfs directory")
 	cmd.Var(&flLinks, []string{"-link"}, "Add link to another container")
+	cmd.Var(&flAliases, []string{"-net-alias"}, "Add network-scoped alias for the container")
 	cmd.Var(&flDevices, []string{"-device"}, "Add a host device to the container")
 	cmd.Var(&flLabels, []string{"l", "-label"}, "Set meta data on a container")
 	cmd.Var(&flLabelsFile, []string{"-label-file"}, "Read in a line delimited file of labels")
@@ -320,6 +325,11 @@
 		return nil, nil, nil, cmd, err
 	}
 
+	securityOpts, err := parseSecurityOpts(flSecurityOpt.GetAll())
+	if err != nil {
+		return nil, nil, nil, cmd, err
+	}
+
 	resources := container.Resources{
 		CgroupParent:         *flCgroupParent,
 		Memory:               flMemory,
@@ -327,7 +337,7 @@
 		MemorySwap:           memorySwap,
 		MemorySwappiness:     flSwappiness,
 		KernelMemory:         KernelMemory,
-		OomKillDisable:       *flOomKillDisable,
+		OomKillDisable:       flOomKillDisable,
 		CPUShares:            *flCPUShares,
 		CPUPeriod:            *flCPUPeriod,
 		CpusetCpus:           *flCpusetCpus,
@@ -394,7 +404,7 @@
 		CapDrop:        strslice.New(flCapDrop.GetAll()...),
 		GroupAdd:       flGroupAdd.GetAll(),
 		RestartPolicy:  restartPolicy,
-		SecurityOpt:    flSecurityOpt.GetAll(),
+		SecurityOpt:    securityOpts,
 		ReadonlyRootfs: *flReadonlyRootfs,
 		LogConfig:      container.LogConfig{Type: *flLoggingDriver, Config: loggingOpts},
 		VolumeDriver:   *flVolumeDriver,
@@ -409,11 +419,11 @@
 		config.StdinOnce = true
 	}
 
-	var networkingConfig *networktypes.NetworkingConfig
+	networkingConfig := &networktypes.NetworkingConfig{
+		EndpointsConfig: make(map[string]*networktypes.EndpointSettings),
+	}
+
 	if *flIPv4Address != "" || *flIPv6Address != "" {
-		networkingConfig = &networktypes.NetworkingConfig{
-			EndpointsConfig: make(map[string]*networktypes.EndpointSettings),
-		}
 		networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = &networktypes.EndpointSettings{
 			IPAMConfig: &networktypes.EndpointIPAMConfig{
 				IPv4Address: *flIPv4Address,
@@ -422,6 +432,26 @@
 		}
 	}
 
+	if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 {
+		epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)]
+		if epConfig == nil {
+			epConfig = &networktypes.EndpointSettings{}
+		}
+		epConfig.Links = make([]string, len(hostConfig.Links))
+		copy(epConfig.Links, hostConfig.Links)
+		networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
+	}
+
+	if hostConfig.NetworkMode.IsUserDefined() && flAliases.Len() > 0 {
+		epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)]
+		if epConfig == nil {
+			epConfig = &networktypes.EndpointSettings{}
+		}
+		epConfig.Aliases = make([]string, flAliases.Len())
+		copy(epConfig.Aliases, flAliases.GetAll())
+		networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig
+	}
+
 	return config, hostConfig, networkingConfig, cmd, nil
 }
 
@@ -464,6 +494,29 @@
 	return loggingOptsMap, nil
 }
 
+// takes a local seccomp daemon, reads the file contents for sending to the daemon
+func parseSecurityOpts(securityOpts []string) ([]string, error) {
+	for key, opt := range securityOpts {
+		con := strings.SplitN(opt, ":", 2)
+		if len(con) == 1 {
+			return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt)
+		}
+		if con[0] == "seccomp" && con[1] != "unconfined" {
+			f, err := ioutil.ReadFile(con[1])
+			if err != nil {
+				return securityOpts, fmt.Errorf("Opening seccomp profile (%s) failed: %v", con[1], err)
+			}
+			b := bytes.NewBuffer(nil)
+			if err := json.Compact(b, f); err != nil {
+				return securityOpts, fmt.Errorf("Compacting json for seccomp profile (%s) failed: %v", con[1], err)
+			}
+			securityOpts[key] = fmt.Sprintf("seccomp:%s", b.Bytes())
+		}
+	}
+
+	return securityOpts, nil
+}
+
 // ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
 func ParseRestartPolicy(policy string) (container.RestartPolicy, error) {
 	p := container.RestartPolicy{}
diff --git a/utils/debug.go b/utils/debug.go
new file mode 100644
index 0000000..d203891
--- /dev/null
+++ b/utils/debug.go
@@ -0,0 +1,26 @@
+package utils
+
+import (
+	"os"
+
+	"github.com/Sirupsen/logrus"
+)
+
+// EnableDebug sets the DEBUG env var to true
+// and makes the logger to log at debug level.
+func EnableDebug() {
+	os.Setenv("DEBUG", "1")
+	logrus.SetLevel(logrus.DebugLevel)
+}
+
+// DisableDebug sets the DEBUG env var to false
+// and makes the logger to log at info level.
+func DisableDebug() {
+	os.Setenv("DEBUG", "")
+	logrus.SetLevel(logrus.InfoLevel)
+}
+
+// IsDebugEnabled checks whether the debug flag is set or not.
+func IsDebugEnabled() bool {
+	return os.Getenv("DEBUG") != ""
+}
diff --git a/vendor/src/github.com/RackSec/srslog/.gitignore b/vendor/src/github.com/RackSec/srslog/.gitignore
new file mode 100644
index 0000000..ebf0f2e
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/.gitignore
@@ -0,0 +1 @@
+.cover
diff --git a/vendor/src/github.com/RackSec/srslog/.travis.yml b/vendor/src/github.com/RackSec/srslog/.travis.yml
new file mode 100644
index 0000000..767c1d8
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/.travis.yml
@@ -0,0 +1,13 @@
+sudo: required
+dist: trusty
+group: edge
+language: go
+go:
+- 1.5
+script:
+- |
+  go get ./...
+  go test -v ./...
+notifications:
+  slack:
+    secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec=
diff --git a/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md b/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..18ac49f
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md
@@ -0,0 +1,50 @@
+# Contributor Code of Conduct
+
+As contributors and maintainers of this project, and in the interest of
+fostering an open and welcoming community, we pledge to respect all people who
+contribute through reporting issues, posting feature requests, updating
+documentation, submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project a harassment-free
+experience for everyone, regardless of level of experience, gender, gender
+identity and expression, sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information, such as physical or electronic
+  addresses, without explicit permission
+* Other unethical or unprofessional conduct
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+By adopting this Code of Conduct, project maintainers commit themselves to
+fairly and consistently applying these principles to every aspect of managing
+this project. Project maintainers who do not follow or enforce the Code of
+Conduct may be permanently removed from the project team.
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting a project maintainer at [sirsean@gmail.com]. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. Maintainers are
+obligated to maintain confidentiality with regard to the reporter of an
+incident.
+
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.3.0, available at
+[http://contributor-covenant.org/version/1/3/0/][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/3/0/
diff --git a/vendor/src/github.com/RackSec/srslog/LICENSE b/vendor/src/github.com/RackSec/srslog/LICENSE
new file mode 100644
index 0000000..9269338
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015 Rackspace. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/RackSec/srslog/README.md b/vendor/src/github.com/RackSec/srslog/README.md
new file mode 100644
index 0000000..1ae1fd4
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/README.md
@@ -0,0 +1,131 @@
+[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog)
+
+# srslog
+
+Go has a `syslog` package in the standard library, but it has the following
+shortcomings:
+
+1. It doesn't have TLS support
+2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716)
+
+I agree that it doesn't need to be in the standard library. So, I've
+followed Brad's suggestion and have made a separate project to handle syslog.
+
+This code was taken directly from the Go project as a base to start from.
+
+However, this _does_ have TLS support.
+
+# Usage
+
+Basic usage retains the same interface as the original `syslog` package. We
+only added to the interface where required to support new functionality.
+
+Switch from the standard library:
+
+```
+import(
+    //"log/syslog"
+    syslog "github.com/RackSec/srslog"
+)
+```
+
+You can still use it for local syslog:
+
+```
+w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag")
+```
+
+Or to unencrypted UDP:
+
+```
+w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag")
+```
+
+Or to unencrypted TCP:
+
+```
+w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag")
+```
+
+But now you can also send messages via TLS-encrypted TCP:
+
+```
+w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem")
+```
+
+And if you need more control over your TLS configuration :
+
+```
+pool := x509.NewCertPool()
+serverCert, err := ioutil.ReadFile("/path/to/servercert.pem")
+if err != nil {
+    return nil, err
+}
+pool.AppendCertsFromPEM(serverCert)
+config := tls.Config{
+    RootCAs: pool,
+}
+
+w, err := DialWithTLSConfig(network, raddr, priority, tag, &config)
+```
+
+(Note that in both TLS cases, this uses a self-signed certificate, where the
+remote syslog server has the keypair and the client has only the public key.)
+
+And then to write log messages, continue like so:
+
+```
+if err != nil {
+    log.Fatal("failed to connect to syslog:", err)
+}
+defer w.Close()
+
+w.Alert("this is an alert")
+w.Crit("this is critical")
+w.Err("this is an error")
+w.Warning("this is a warning")
+w.Notice("this is a notice")
+w.Info("this is info")
+w.Debug("this is debug")
+w.Write([]byte("these are some bytes"))
+```
+
+# Generating TLS Certificates
+
+We've provided a script that you can use to generate a self-signed keypair:
+
+```
+pip install cryptography
+python script/gen-certs.py
+```
+
+That outputs the public key and private key to standard out. Put those into
+`.pem` files. (And don't put them into any source control. The certificate in
+the `test` directory is used by the unit tests, and please do not actually use
+it anywhere else.)
+
+# Running Tests
+
+Run the tests as usual:
+
+```
+go test
+```
+
+But we've also provided a test coverage script that will show you which
+lines of code are not covered:
+
+```
+script/coverage --html
+```
+
+That will open a new browser tab showing coverage information.
+
+# License
+
+This project uses the New BSD License, the same as the Go project itself.
+
+# Code of Conduct
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
diff --git a/vendor/src/github.com/RackSec/srslog/constants.go b/vendor/src/github.com/RackSec/srslog/constants.go
new file mode 100644
index 0000000..600801e
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/constants.go
@@ -0,0 +1,68 @@
+package srslog
+
+import (
+	"errors"
+)
+
+// Priority is a combination of the syslog facility and
+// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity
+// message from the FTP facility. The default severity is LOG_EMERG;
+// the default facility is LOG_KERN.
+type Priority int
+
+const severityMask = 0x07
+const facilityMask = 0xf8
+
+const (
+	// Severity.
+
+	// From /usr/include/sys/syslog.h.
+	// These are the same on Linux, BSD, and OS X.
+	LOG_EMERG Priority = iota
+	LOG_ALERT
+	LOG_CRIT
+	LOG_ERR
+	LOG_WARNING
+	LOG_NOTICE
+	LOG_INFO
+	LOG_DEBUG
+)
+
+const (
+	// Facility.
+
+	// From /usr/include/sys/syslog.h.
+	// These are the same up to LOG_FTP on Linux, BSD, and OS X.
+	LOG_KERN Priority = iota << 3
+	LOG_USER
+	LOG_MAIL
+	LOG_DAEMON
+	LOG_AUTH
+	LOG_SYSLOG
+	LOG_LPR
+	LOG_NEWS
+	LOG_UUCP
+	LOG_CRON
+	LOG_AUTHPRIV
+	LOG_FTP
+	_ // unused
+	_ // unused
+	_ // unused
+	_ // unused
+	LOG_LOCAL0
+	LOG_LOCAL1
+	LOG_LOCAL2
+	LOG_LOCAL3
+	LOG_LOCAL4
+	LOG_LOCAL5
+	LOG_LOCAL6
+	LOG_LOCAL7
+)
+
+func validatePriority(p Priority) error {
+	if p < 0 || p > LOG_LOCAL7|LOG_DEBUG {
+		return errors.New("log/syslog: invalid priority")
+	} else {
+		return nil
+	}
+}
diff --git a/vendor/src/github.com/RackSec/srslog/dialer.go b/vendor/src/github.com/RackSec/srslog/dialer.go
new file mode 100644
index 0000000..7811538
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/dialer.go
@@ -0,0 +1,53 @@
+package srslog
+
+import (
+	"crypto/tls"
+	"net"
+)
+
+func (w Writer) getDialer() func() (serverConn, string, error) {
+	dialers := map[string]func() (serverConn, string, error){
+		"":        w.unixDialer,
+		"tcp+tls": w.tlsDialer,
+	}
+	dialer, ok := dialers[w.network]
+	if !ok {
+		dialer = w.basicDialer
+	}
+	return dialer
+}
+
+func (w Writer) unixDialer() (serverConn, string, error) {
+	sc, err := unixSyslog()
+	hostname := w.hostname
+	if hostname == "" {
+		hostname = "localhost"
+	}
+	return sc, hostname, err
+}
+
+func (w Writer) tlsDialer() (serverConn, string, error) {
+	c, err := tls.Dial("tcp", w.raddr, w.tlsConfig)
+	var sc serverConn
+	hostname := w.hostname
+	if err == nil {
+		sc = &netConn{conn: c}
+		if hostname == "" {
+			hostname = c.LocalAddr().String()
+		}
+	}
+	return sc, hostname, err
+}
+
+func (w Writer) basicDialer() (serverConn, string, error) {
+	c, err := net.Dial(w.network, w.raddr)
+	var sc serverConn
+	hostname := w.hostname
+	if err == nil {
+		sc = &netConn{conn: c}
+		if hostname == "" {
+			hostname = c.LocalAddr().String()
+		}
+	}
+	return sc, hostname, err
+}
diff --git a/vendor/src/github.com/RackSec/srslog/net_conn.go b/vendor/src/github.com/RackSec/srslog/net_conn.go
new file mode 100644
index 0000000..a73394c
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/net_conn.go
@@ -0,0 +1,24 @@
+package srslog
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"time"
+)
+
+type netConn struct {
+	conn net.Conn
+}
+
+func (n *netConn) writeString(p Priority, hostname, tag, msg string) error {
+	timestamp := time.Now().Format(time.RFC3339)
+	_, err := fmt.Fprintf(n.conn, "<%d>%s %s %s[%d]: %s",
+		p, timestamp, hostname,
+		tag, os.Getpid(), msg)
+	return err
+}
+
+func (n *netConn) close() error {
+	return n.conn.Close()
+}
diff --git a/vendor/src/github.com/RackSec/srslog/srslog.go b/vendor/src/github.com/RackSec/srslog/srslog.go
new file mode 100644
index 0000000..3d03272
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/srslog.go
@@ -0,0 +1,96 @@
+package srslog
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+// This interface and the separate syslog_unix.go file exist for
+// Solaris support as implemented by gccgo.  On Solaris you can not
+// simply open a TCP connection to the syslog daemon.  The gccgo
+// sources have a syslog_solaris.go file that implements unixSyslog to
+// return a type that satisfies this interface and simply calls the C
+// library syslog function.
+type serverConn interface {
+	writeString(p Priority, hostname, tag, s string) error
+	close() error
+}
+
+// New establishes a new connection to the system log daemon.  Each
+// write to the returned Writer sends a log message with the given
+// priority and prefix.
+func New(priority Priority, tag string) (w *Writer, err error) {
+	return Dial("", "", priority, tag)
+}
+
+// Dial establishes a connection to a log daemon by connecting to
+// address raddr on the specified network.  Each write to the returned
+// Writer sends a log message with the given facility, severity and
+// tag.
+// If network is empty, Dial will connect to the local syslog server.
+func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) {
+	return DialWithTLSConfig(network, raddr, priority, tag, nil)
+}
+
+// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to
+// address raddr on the specified network. It uses certPath to load TLS certificates and configure
+// the secure connection.
+func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) {
+	pool := x509.NewCertPool()
+	serverCert, err := ioutil.ReadFile(certPath)
+	if err != nil {
+		return nil, err
+	}
+	pool.AppendCertsFromPEM(serverCert)
+	config := tls.Config{
+		RootCAs: pool,
+	}
+
+	return DialWithTLSConfig(network, raddr, priority, tag, &config)
+}
+
+// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to
+// address raddr on the specified network. It uses tlsConfig to configure the secure connection.
+func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) {
+	if err := validatePriority(priority); err != nil {
+		return nil, err
+	}
+
+	if tag == "" {
+		tag = os.Args[0]
+	}
+	hostname, _ := os.Hostname()
+
+	w := &Writer{
+		priority:  priority,
+		tag:       tag,
+		hostname:  hostname,
+		network:   network,
+		raddr:     raddr,
+		tlsConfig: tlsConfig,
+	}
+
+	w.Lock()
+	defer w.Unlock()
+
+	err := w.connect()
+	if err != nil {
+		return nil, err
+	}
+	return w, err
+}
+
+// NewLogger creates a log.Logger whose output is written to
+// the system log service with the specified priority. The logFlag
+// argument is the flag set passed through to log.New to create
+// the Logger.
+func NewLogger(p Priority, logFlag int) (*log.Logger, error) {
+	s, err := New(p, "")
+	if err != nil {
+		return nil, err
+	}
+	return log.New(s, "", logFlag), nil
+}
diff --git a/vendor/src/github.com/RackSec/srslog/srslog_unix.go b/vendor/src/github.com/RackSec/srslog/srslog_unix.go
new file mode 100644
index 0000000..430065c
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/srslog_unix.go
@@ -0,0 +1,47 @@
+package srslog
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"os"
+	"time"
+)
+
+// unixSyslog opens a connection to the syslog daemon running on the
+// local machine using a Unix domain socket.
+
+func unixSyslog() (conn serverConn, err error) {
+	logTypes := []string{"unixgram", "unix"}
+	logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"}
+	for _, network := range logTypes {
+		for _, path := range logPaths {
+			conn, err := net.Dial(network, path)
+			if err != nil {
+				continue
+			} else {
+				return &localConn{conn: conn}, nil
+			}
+		}
+	}
+	return nil, errors.New("Unix syslog delivery error")
+}
+
+type localConn struct {
+	conn net.Conn
+}
+
+func (n *localConn) writeString(p Priority, hostname, tag, msg string) error {
+	// Compared to the network form at srslog.netConn, the changes are:
+	//	1. Use time.Stamp instead of time.RFC3339.
+	//	2. Drop the hostname field from the Fprintf.
+	timestamp := time.Now().Format(time.Stamp)
+	_, err := fmt.Fprintf(n.conn, "<%d>%s %s[%d]: %s",
+		p, timestamp,
+		tag, os.Getpid(), msg)
+	return err
+}
+
+func (n *localConn) close() error {
+	return n.conn.Close()
+}
diff --git a/vendor/src/github.com/RackSec/srslog/writer.go b/vendor/src/github.com/RackSec/srslog/writer.go
new file mode 100644
index 0000000..1e7e2eb
--- /dev/null
+++ b/vendor/src/github.com/RackSec/srslog/writer.go
@@ -0,0 +1,152 @@
+package srslog
+
+import (
+	"crypto/tls"
+	"strings"
+	"sync"
+)
+
+// A Writer is a connection to a syslog server.
+type Writer struct {
+	sync.Mutex // guards conn
+
+	priority  Priority
+	tag       string
+	hostname  string
+	network   string
+	raddr     string
+	tlsConfig *tls.Config
+
+	conn serverConn
+}
+
+// connect makes a connection to the syslog server.
+// It must be called with w.mu held.
+func (w *Writer) connect() (err error) {
+	if w.conn != nil {
+		// ignore err from close, it makes sense to continue anyway
+		w.conn.close()
+		w.conn = nil
+	}
+
+	var conn serverConn
+	var hostname string
+	dialer := w.getDialer()
+	conn, hostname, err = dialer()
+	if err == nil {
+		w.conn = conn
+		w.hostname = hostname
+	}
+
+	return
+}
+
+// Write sends a log message to the syslog daemon using the default priority
+// passed into `srslog.New` or the `srslog.Dial*` functions.
+func (w *Writer) Write(b []byte) (int, error) {
+	return w.writeAndRetry(w.priority, string(b))
+}
+
+// Close closes a connection to the syslog daemon.
+func (w *Writer) Close() error {
+	w.Lock()
+	defer w.Unlock()
+
+	if w.conn != nil {
+		err := w.conn.close()
+		w.conn = nil
+		return err
+	}
+	return nil
+}
+
+// Emerg logs a message with severity LOG_EMERG; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Emerg(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_EMERG, m)
+	return err
+}
+
+// Alert logs a message with severity LOG_ALERT; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Alert(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_ALERT, m)
+	return err
+}
+
+// Crit logs a message with severity LOG_CRIT; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Crit(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_CRIT, m)
+	return err
+}
+
+// Err logs a message with severity LOG_ERR; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Err(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_ERR, m)
+	return err
+}
+
+// Warning logs a message with severity LOG_WARNING; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Warning(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_WARNING, m)
+	return err
+}
+
+// Notice logs a message with severity LOG_NOTICE; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Notice(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_NOTICE, m)
+	return err
+}
+
+// Info logs a message with severity LOG_INFO; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Info(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_INFO, m)
+	return err
+}
+
+// Debug logs a message with severity LOG_DEBUG; this overrides the default
+// priority passed to `srslog.New` and the `srslog.Dial*` functions.
+func (w *Writer) Debug(m string) (err error) {
+	_, err = w.writeAndRetry(LOG_DEBUG, m)
+	return err
+}
+
+func (w *Writer) writeAndRetry(p Priority, s string) (int, error) {
+	pr := (w.priority & facilityMask) | (p & severityMask)
+
+	w.Lock()
+	defer w.Unlock()
+
+	if w.conn != nil {
+		if n, err := w.write(pr, s); err == nil {
+			return n, err
+		}
+	}
+	if err := w.connect(); err != nil {
+		return 0, err
+	}
+	return w.write(pr, s)
+}
+
+// write generates and writes a syslog formatted string. The
+// format is as follows: <PRI>TIMESTAMP HOSTNAME TAG[PID]: MSG
+func (w *Writer) write(p Priority, msg string) (int, error) {
+	// ensure it ends in a \n
+	if !strings.HasSuffix(msg, "\n") {
+		msg += "\n"
+	}
+
+	err := w.conn.writeString(p, w.hostname, w.tag, msg)
+	if err != nil {
+		return 0, err
+	}
+	// Note: return the length of the input, not the number of
+	// bytes printed by Fprintf, because this must behave like
+	// an io.Writer.
+	return len(msg), nil
+}
diff --git a/vendor/src/github.com/docker/distribution/.mailmap b/vendor/src/github.com/docker/distribution/.mailmap
index 2c0af06..191e60c 100644
--- a/vendor/src/github.com/docker/distribution/.mailmap
+++ b/vendor/src/github.com/docker/distribution/.mailmap
@@ -5,3 +5,10 @@
 Josh Hawn <josh.hawn@docker.com>        Josh Hawn <jlhawn@berkeley.edu>
 Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
 Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
+Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
+harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
+Jessie Frazelle <jessie@docker.com>  <jfrazelle@users.noreply.github.com>
+Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
+Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
+Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
+davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
\ No newline at end of file
diff --git a/vendor/src/github.com/docker/distribution/AUTHORS b/vendor/src/github.com/docker/distribution/AUTHORS
index 996eeb2..4b97cd7 100644
--- a/vendor/src/github.com/docker/distribution/AUTHORS
+++ b/vendor/src/github.com/docker/distribution/AUTHORS
@@ -5,13 +5,16 @@
 Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
 Alex Chan <alex.chan@metaswitch.com>
 Alex Elman <aelman@indeed.com>
+amitshukla <ashukla73@hotmail.com>
 Amy Lindburg <amy.lindburg@docker.com>
+Andrew Meredith <andymeredith@gmail.com>
 Andrey Kostov <kostov.andrey@gmail.com>
 Andy Goldstein <agoldste@redhat.com>
 Anton Tiurin <noxiouz@yandex.ru>
 Antonio Mercado <amercado@thinknode.com>
 Arnaud Porterie <arnaud.porterie@docker.com>
 Arthur Baars <arthur@semmle.com>
+Avi Miller <avi.miller@oracle.com>
 Ayose Cazorla <ayosec@gmail.com>
 BadZen <dave.trombley@gmail.com>
 Ben Firshman <ben@firshman.co.uk>
@@ -32,9 +35,10 @@
 Diogo Mónica <diogo.monica@gmail.com>
 Donald Huang <don.hcd@gmail.com>
 Doug Davis <dug@us.ibm.com>
+farmerworking <farmerworking@gmail.com>
 Florentin Raud <florentin.raud@gmail.com>
 Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
-harche <harche@users.noreply.github.com>
+harche <p.harshal@gmail.com>
 Henri Gomez <henri.gomez@gmail.com>
 Hu Keping <hukeping@huawei.com>
 Hua Wang <wanghua.humble@gmail.com>
@@ -42,9 +46,10 @@
 Jack Griffin <jackpg14@gmail.com>
 Jason Freidman <jason.freidman@gmail.com>
 Jeff Nickoloff <jeff@allingeek.com>
-Jessie Frazelle <jfrazelle@users.noreply.github.com>
+Jessie Frazelle <jessie@docker.com>
 Jianqing Wang <tsing@jianqing.org>
 Jon Poler <jonathan.poler@apcera.com>
+Jonathan Boulle <jonathanboulle@gmail.com>
 Jordan Liggitt <jliggitt@redhat.com>
 Josh Hawn <josh.hawn@docker.com>
 Julien Fernandez <julien.fernandez@gmail.com>
@@ -59,6 +64,7 @@
 Matt Robenolt <matt@ydekproductions.com>
 Michael Prokop <mika@grml.org>
 Miquel Sabaté <msabate@suse.com>
+Morgan Bauer <mbauer@us.ibm.com>
 moxiegirl <mary@docker.com>
 Nathan Sullivan <nathan@nightsys.net>
 nevermosby <robolwq@qq.com>
@@ -70,8 +76,8 @@
 Patrick Devine <patrick.devine@docker.com>
 Philip Misiowiec <philip@atlashealth.com>
 Richard Scothern <richard.scothern@docker.com>
+Rusty Conover <rusty@luckydinosaur.com>
 Sebastiaan van Stijn <github@gone.nl>
-Sharif Nassar <mrwacky42@users.noreply.github.com>
 Sharif Nassar <sharif@mrwacky.com>
 Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
 Shreyas Karnik <karnik.shreyas@gmail.com>
@@ -81,15 +87,16 @@
 Sungho Moon <sungho.moon@navercorp.com>
 Sven Dowideit <SvenDowideit@home.org.au>
 Sylvain Baubeau <sbaubeau@redhat.com>
+Ted Reed <ted.reed@gmail.com>
 tgic <farmer1992@gmail.com>
 Thomas Sjögren <konstruktoid@users.noreply.github.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tibor Vass <teabee89@gmail.com>
+Tonis Tiigi <tonistiigi@gmail.com>
 Troels Thomsen <troels@thomsen.io>
 Vincent Batts <vbatts@redhat.com>
 Vincent Demeester <vincent@sbr.pm>
 Vincent Giersch <vincent.giersch@ovh.net>
-Vincent Giersch <vincent@giersch.fr>
 W. Trevor King <wking@tremily.us>
 xg.song <xg.song@venusource.com>
 xiekeyang <xiekeyang@huawei.com>
diff --git a/vendor/src/github.com/docker/distribution/Dockerfile b/vendor/src/github.com/docker/distribution/Dockerfile
index 7a4b3e9..1a58222 100644
--- a/vendor/src/github.com/docker/distribution/Dockerfile
+++ b/vendor/src/github.com/docker/distribution/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.4
+FROM golang:1.5.2
 
 RUN apt-get update && \
     apt-get install -y librados-dev apache2-utils && \
diff --git a/vendor/src/github.com/docker/distribution/MAINTAINERS b/vendor/src/github.com/docker/distribution/MAINTAINERS
index 508fb5c..bda4001 100644
--- a/vendor/src/github.com/docker/distribution/MAINTAINERS
+++ b/vendor/src/github.com/docker/distribution/MAINTAINERS
@@ -1,8 +1,58 @@
-Solomon Hykes <solomon@docker.com> (@shykes)
-Olivier Gambier <olivier@docker.com> (@dmp42)
-Stephen Day <stephen.day@docker.com> (@stevvooe)
-Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
-Richard Scothern <richard.scothern@gmail.com> (@richardscothern)
-Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)
+# Distribution maintainers file
+#
+# This file describes who runs the docker/distribution project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+	[Org."Core maintainers"]
+		people = [
+			"aaronlehmann",
+			"dmcgowan",
+			"dmp42",
+			"richardscothern",
+			"shykes",
+			"stevvooe",
+		]
 
+[people]
 
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+	# ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+	[people.aaronlehmann]
+	Name = "Aaron Lehmann"
+	Email = "aaron.lehmann@docker.com"
+	GitHub = "aaronlehmann"
+
+	[people.dmcgowan]
+	Name = "Derek McGowan"
+	Email = "derek@mcgstyle.net"
+	GitHub = "dmcgowan"
+
+	[people.dmp42]
+	Name = "Olivier Gambier"
+	Email = "olivier@docker.com"
+	GitHub = "dmp42"
+
+	[people.richardscothern]
+	Name = "Richard Scothern"
+	Email = "richard.scothern@gmail.com"
+	GitHub = "richardscothern"
+
+	[people.shykes]
+	Name = "Solomon Hykes"
+	Email = "solomon@docker.com"
+	GitHub = "shykes"
+
+	[people.stevvooe]
+	Name = "Stephen Day"
+	Email = "stephen.day@docker.com"
+	GitHub = "stevvooe"
diff --git a/vendor/src/github.com/docker/distribution/README.md b/vendor/src/github.com/docker/distribution/README.md
index a6d51b6..e826213 100644
--- a/vendor/src/github.com/docker/distribution/README.md
+++ b/vendor/src/github.com/docker/distribution/README.md
@@ -17,9 +17,9 @@
 |**Component**       |Description                                                                                                                                                                                         |
 |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | **registry**       | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+.                                                                                                  |
-| **libraries**      | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
+| **libraries**      | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
 | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec)                                                                                                                        |
-| **documentation**  | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry.                                                                                                                                          |
+| **documentation**  | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry.                                                                                                                                          |
 
 ### How does this integrate with Docker engine?
 
@@ -58,7 +58,7 @@
 ### Who needs to deploy a registry?
 
 By default, Docker users pull images from Docker's public registry instance.
-[Installing Docker](http://docs.docker.com/installation) gives users this
+[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
 ability. Users can also push images to a repository on Docker's public registry,
 if they have a [Docker Hub](https://hub.docker.com/) account. 
 
diff --git a/vendor/src/github.com/docker/distribution/blobs.go b/vendor/src/github.com/docker/distribution/blobs.go
index 2087d0f..ce43ea2 100644
--- a/vendor/src/github.com/docker/distribution/blobs.go
+++ b/vendor/src/github.com/docker/distribution/blobs.go
@@ -9,6 +9,7 @@
 
 	"github.com/docker/distribution/context"
 	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
 )
 
 var (
@@ -40,6 +41,18 @@
 		err.Digest, err.Reason)
 }
 
+// ErrBlobMounted returned when a blob is mounted from another repository
+// instead of initiating an upload session.
+type ErrBlobMounted struct {
+	From       reference.Canonical
+	Descriptor Descriptor
+}
+
+func (err ErrBlobMounted) Error() string {
+	return fmt.Sprintf("blob mounted from: %v to: %v",
+		err.From, err.Descriptor)
+}
+
 // Descriptor describes targeted content. Used in conjunction with a blob
 // store, a descriptor can be used to fetch, store and target any kind of
 // blob. The struct also describes the wire protocol format. Fields should
@@ -61,6 +74,15 @@
 	// depend on the simplicity of this type.
 }
 
+// Descriptor returns the descriptor, to make it satisfy the Describable
+// interface. Note that implementations of Describable are generally objects
+// which can be described, not simply descriptors; this exception is in place
+// to make it more convenient to pass actual descriptors to functions that
+// expect Describable objects.
+func (d Descriptor) Descriptor() Descriptor {
+	return d
+}
+
 // BlobStatter makes blob descriptors available by digest. The service may
 // provide a descriptor of a different digest if the provided digest is not
 // canonical.
@@ -142,12 +164,21 @@
 	// returned handle can be written to and later resumed using an opaque
 	// identifier. With this approach, one can Close and Resume a BlobWriter
 	// multiple times until the BlobWriter is committed or cancelled.
-	Create(ctx context.Context) (BlobWriter, error)
+	Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
 
 	// Resume attempts to resume a write to a blob, identified by an id.
 	Resume(ctx context.Context, id string) (BlobWriter, error)
 }
 
+// BlobCreateOption is a general extensible function argument for blob creation
+// methods. A BlobIngester may choose to honor any or none of the given
+// BlobCreateOptions, which can be specific to the implementation of the
+// BlobIngester receiving them.
+// TODO (brianbland): unify this with ManifestServiceOption in the future
+type BlobCreateOption interface {
+	Apply(interface{}) error
+}
+
 // BlobWriter provides a handle for inserting data into a blob store.
 // Instances should be obtained from BlobWriteService.Writer and
 // BlobWriteService.Resume. If supported by the store, a writer can be
diff --git a/vendor/src/github.com/docker/distribution/circle.yml b/vendor/src/github.com/docker/distribution/circle.yml
index 3033719..e1995d4 100644
--- a/vendor/src/github.com/docker/distribution/circle.yml
+++ b/vendor/src/github.com/docker/distribution/circle.yml
@@ -6,10 +6,12 @@
   # Install ceph to test rados driver & create pool
     - sudo -i ~/distribution/contrib/ceph/ci-setup.sh
     - ceph osd pool create docker-distribution 1
+  # Install codecov for coverage
+    - pip install --user codecov
 
   post:
   # go
-    - gvm install go1.5 --prefer-binary --name=stable
+    - gvm install go1.5.3 --prefer-binary --name=stable
 
   environment:
   # Convenient shortcuts to "common" locations
@@ -45,9 +47,6 @@
     - >
       gvm use stable &&
       go get github.com/axw/gocov/gocov github.com/golang/lint/golint
- 
-  # Disabling goveralls for now
-  # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint
 
 test:
   pre:
@@ -73,25 +72,17 @@
         pwd: $BASE_STABLE
 
   override:
-
   # Test stable, and report
-  # Preset the goverall report file
-  # - echo "$CIRCLE_PAIN" > ~/goverage.report
-
-     - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out:
-         pwd: $BASE_STABLE
-
-     - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}:
+     - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
          timeout: 600
          pwd: $BASE_STABLE
 
   post:
-  # Aggregate and report to coveralls
-    - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report:
+  # Report to codecov
+    - bash <(curl -s https://codecov.io/bash):
         pwd: $BASE_STABLE
 
   ## Notes
-  # Disabled coveralls reporting: build breaking sending coverage data to coveralls
   # Disabled the -race detector due to massive memory usage.
   # Do we want these as well?
   # - go get code.google.com/p/go.tools/cmd/goimports
diff --git a/vendor/src/github.com/docker/distribution/coverpkg.sh b/vendor/src/github.com/docker/distribution/coverpkg.sh
new file mode 100755
index 0000000..7ee751a
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/coverpkg.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# Given a subpackage and the containing package, figures out which packages
+# need to be passed to `go test -coverpkg`:  this includes all of the
+# subpackage's dependencies within the containing package, as well as the
+# subpackage itself.
+DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})"
+echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','
diff --git a/vendor/src/github.com/docker/distribution/digest/digest.go b/vendor/src/github.com/docker/distribution/digest/digest.go
index ae581f1..31d821b 100644
--- a/vendor/src/github.com/docker/distribution/digest/digest.go
+++ b/vendor/src/github.com/docker/distribution/digest/digest.go
@@ -1,21 +1,14 @@
 package digest
 
 import (
-	"bytes"
 	"fmt"
 	"hash"
 	"io"
-	"io/ioutil"
 	"regexp"
 	"strings"
-
-	"github.com/docker/docker/pkg/tarsum"
 )
 
 const (
-	// DigestTarSumV1EmptyTar is the digest for the empty tar file.
-	DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
-
 	// DigestSha256EmptyTar is the canonical sha256 digest of empty data
 	DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
 )
@@ -29,18 +22,21 @@
 //
 // 	sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
 //
-// More important for this code base, this type is compatible with tarsum
-// digests. For example, the following would be a valid Digest:
-//
-// 	tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
-//
 // This allows to abstract the digest behind this type and work only in those
 // terms.
 type Digest string
 
 // NewDigest returns a Digest from alg and a hash.Hash object.
 func NewDigest(alg Algorithm, h hash.Hash) Digest {
-	return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil)))
+	return NewDigestFromBytes(alg, h.Sum(nil))
+}
+
+// NewDigestFromBytes returns a new digest from the byte contents of p.
+// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
+// functions. This is also useful for rebuilding digests from binary
+// serializations.
+func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
+	return Digest(fmt.Sprintf("%s:%x", alg, p))
 }
 
 // NewDigestFromHex returns a Digest from alg and a the hex encoded digest.
@@ -79,41 +75,15 @@
 	return Canonical.FromReader(rd)
 }
 
-// FromTarArchive produces a tarsum digest from reader rd.
-func FromTarArchive(rd io.Reader) (Digest, error) {
-	ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1)
-	if err != nil {
-		return "", err
-	}
-
-	if _, err := io.Copy(ioutil.Discard, ts); err != nil {
-		return "", err
-	}
-
-	d, err := ParseDigest(ts.Sum(nil))
-	if err != nil {
-		return "", err
-	}
-
-	return d, nil
-}
-
 // FromBytes digests the input and returns a Digest.
-func FromBytes(p []byte) (Digest, error) {
-	return FromReader(bytes.NewReader(p))
+func FromBytes(p []byte) Digest {
+	return Canonical.FromBytes(p)
 }
 
 // Validate checks that the contents of d is a valid digest, returning an
 // error if not.
 func (d Digest) Validate() error {
 	s := string(d)
-	// Common case will be tarsum
-	_, err := ParseTarSum(s)
-	if err == nil {
-		return nil
-	}
-
-	// Continue on for general parser
 
 	if !DigestRegexpAnchored.MatchString(s) {
 		return ErrDigestInvalidFormat
diff --git a/vendor/src/github.com/docker/distribution/digest/digester.go b/vendor/src/github.com/docker/distribution/digest/digester.go
index 9a10539..f3105a4 100644
--- a/vendor/src/github.com/docker/distribution/digest/digester.go
+++ b/vendor/src/github.com/docker/distribution/digest/digester.go
@@ -2,6 +2,7 @@
 
 import (
 	"crypto"
+	"fmt"
 	"hash"
 	"io"
 )
@@ -13,10 +14,9 @@
 
 // supported digest types
 const (
-	SHA256         Algorithm = "sha256"           // sha256 with hex encoding
-	SHA384         Algorithm = "sha384"           // sha384 with hex encoding
-	SHA512         Algorithm = "sha512"           // sha512 with hex encoding
-	TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only
+	SHA256 Algorithm = "sha256" // sha256 with hex encoding
+	SHA384 Algorithm = "sha384" // sha384 with hex encoding
+	SHA512 Algorithm = "sha512" // sha512 with hex encoding
 
 	// Canonical is the primary digest algorithm used with the distribution
 	// project. Other digests may be used but this one is the primary storage
@@ -85,11 +85,18 @@
 	}
 }
 
-// Hash returns a new hash as used by the algorithm. If not available, nil is
-// returned. Make sure to check Available before calling.
+// Hash returns a new hash as used by the algorithm. If not available, the
+// method will panic. Check Algorithm.Available() before calling.
 func (a Algorithm) Hash() hash.Hash {
 	if !a.Available() {
-		return nil
+		// NOTE(stevvooe): A missing hash is usually a programming error that
+		// must be resolved at compile time. We don't import in the digest
+		// package to allow users to choose their hash implementation (such as
+		// when using stevvooe/resumable or a hardware accelerated package).
+		//
+		// Applications that may want to resolve the hash at runtime should
+		// call Algorithm.Available before call Algorithm.Hash().
+		panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
 	}
 
 	return algorithms[a].New()
@@ -106,6 +113,22 @@
 	return digester.Digest(), nil
 }
 
+// FromBytes digests the input and returns a Digest.
+func (a Algorithm) FromBytes(p []byte) Digest {
+	digester := a.New()
+
+	if _, err := digester.Hash().Write(p); err != nil {
+		// Writes to a Hash should never fail. None of the existing
+		// hash implementations in the stdlib or hashes vendored
+		// here can return errors from Write. Having a panic in this
+		// condition instead of having FromBytes return an error value
+		// avoids unnecessary error handling paths in all callers.
+		panic("write to hash function returned error: " + err.Error())
+	}
+
+	return digester.Digest()
+}
+
 // TODO(stevvooe): Allow resolution of verifiers using the digest type and
 // this registration system.
 
diff --git a/vendor/src/github.com/docker/distribution/digest/doc.go b/vendor/src/github.com/docker/distribution/digest/doc.go
index 278c50e..f64b0db 100644
--- a/vendor/src/github.com/docker/distribution/digest/doc.go
+++ b/vendor/src/github.com/docker/distribution/digest/doc.go
@@ -1,7 +1,7 @@
 // Package digest provides a generalized type to opaquely represent message
 // digests and their operations within the registry. The Digest type is
 // designed to serve as a flexible identifier in a content-addressable system.
-// More importantly, it provides tools and wrappers to work with tarsums and
+// More importantly, it provides tools and wrappers to work with
 // hash.Hash-based digests with little effort.
 //
 // Basics
@@ -16,17 +16,7 @@
 // 	sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
 //
 // In this case, the string "sha256" is the algorithm and the hex bytes are
-// the "digest". A tarsum example will be more illustrative of the use case
-// involved in the registry:
-//
-// 	tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b
-//
-// For this, we consider the algorithm to be "tarsum+sha256". Prudent
-// applications will favor the ParseDigest function to verify the format over
-// using simple type casts. However, a normal string can be cast as a digest
-// with a simple type conversion:
-//
-// 	Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b")
+// the "digest".
 //
 // Because the Digest type is simply a string, once a valid Digest is
 // obtained, comparisons are cheap, quick and simple to express with the
diff --git a/vendor/src/github.com/docker/distribution/digest/tarsum.go b/vendor/src/github.com/docker/distribution/digest/tarsum.go
deleted file mode 100644
index 9effeb2..0000000
--- a/vendor/src/github.com/docker/distribution/digest/tarsum.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package digest
-
-import (
-	"fmt"
-
-	"regexp"
-)
-
-// TarsumRegexp defines a regular expression to match tarsum identifiers.
-var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+")
-
-// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with
-// capture groups corresponding to each component.
-var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)")
-
-// TarSumInfo contains information about a parsed tarsum.
-type TarSumInfo struct {
-	// Version contains the version of the tarsum.
-	Version string
-
-	// Algorithm contains the algorithm for the final digest
-	Algorithm string
-
-	// Digest contains the hex-encoded digest.
-	Digest string
-}
-
-// InvalidTarSumError provides informations about a TarSum that cannot be parsed
-// by ParseTarSum.
-type InvalidTarSumError string
-
-func (e InvalidTarSumError) Error() string {
-	return fmt.Sprintf("invalid tarsum: %q", string(e))
-}
-
-// ParseTarSum parses a tarsum string into its components of interest. For
-// example, this method may receive the tarsum in the following format:
-//
-//		tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e
-//
-// The function will return the following:
-//
-//		TarSumInfo{
-//			Version: "v1",
-//			Algorithm: "sha256",
-//			Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e",
-//		}
-//
-func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) {
-	components := TarsumRegexpCapturing.FindStringSubmatch(tarSum)
-
-	if len(components) != 1+TarsumRegexpCapturing.NumSubexp() {
-		return TarSumInfo{}, InvalidTarSumError(tarSum)
-	}
-
-	return TarSumInfo{
-		Version:   components[3],
-		Algorithm: components[4],
-		Digest:    components[5],
-	}, nil
-}
-
-// String returns the valid, string representation of the tarsum info.
-func (tsi TarSumInfo) String() string {
-	if tsi.Version == "" {
-		return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest)
-	}
-
-	return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest)
-}
diff --git a/vendor/src/github.com/docker/distribution/digest/verifiers.go b/vendor/src/github.com/docker/distribution/digest/verifiers.go
index f8c75b5..9af3be1 100644
--- a/vendor/src/github.com/docker/distribution/digest/verifiers.go
+++ b/vendor/src/github.com/docker/distribution/digest/verifiers.go
@@ -3,9 +3,6 @@
 import (
 	"hash"
 	"io"
-	"io/ioutil"
-
-	"github.com/docker/docker/pkg/tarsum"
 )
 
 // Verifier presents a general verification interface to be used with message
@@ -27,70 +24,10 @@
 		return nil, err
 	}
 
-	alg := d.Algorithm()
-	switch alg {
-	case "sha256", "sha384", "sha512":
-		return hashVerifier{
-			hash:   alg.Hash(),
-			digest: d,
-		}, nil
-	default:
-		// Assume we have a tarsum.
-		version, err := tarsum.GetVersionFromTarsum(string(d))
-		if err != nil {
-			return nil, err
-		}
-
-		pr, pw := io.Pipe()
-
-		// TODO(stevvooe): We may actually want to ban the earlier versions of
-		// tarsum. That decision may not be the place of the verifier.
-
-		ts, err := tarsum.NewTarSum(pr, true, version)
-		if err != nil {
-			return nil, err
-		}
-
-		// TODO(sday): Ick! A goroutine per digest verification? We'll have to
-		// get the tarsum library to export an io.Writer variant.
-		go func() {
-			if _, err := io.Copy(ioutil.Discard, ts); err != nil {
-				pr.CloseWithError(err)
-			} else {
-				pr.Close()
-			}
-		}()
-
-		return &tarsumVerifier{
-			digest: d,
-			ts:     ts,
-			pr:     pr,
-			pw:     pw,
-		}, nil
-	}
-}
-
-// NewLengthVerifier returns a verifier that returns true when the number of
-// read bytes equals the expected parameter.
-func NewLengthVerifier(expected int64) Verifier {
-	return &lengthVerifier{
-		expected: expected,
-	}
-}
-
-type lengthVerifier struct {
-	expected int64 // expected bytes read
-	len      int64 // bytes read
-}
-
-func (lv *lengthVerifier) Write(p []byte) (n int, err error) {
-	n = len(p)
-	lv.len += int64(n)
-	return n, err
-}
-
-func (lv *lengthVerifier) Verified() bool {
-	return lv.expected == lv.len
+	return hashVerifier{
+		hash:   d.Algorithm().Hash(),
+		digest: d,
+	}, nil
 }
 
 type hashVerifier struct {
@@ -105,18 +42,3 @@
 func (hv hashVerifier) Verified() bool {
 	return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
 }
-
-type tarsumVerifier struct {
-	digest Digest
-	ts     tarsum.TarSum
-	pr     *io.PipeReader
-	pw     *io.PipeWriter
-}
-
-func (tv *tarsumVerifier) Write(p []byte) (n int, err error) {
-	return tv.pw.Write(p)
-}
-
-func (tv *tarsumVerifier) Verified() bool {
-	return tv.digest == Digest(tv.ts.Sum(nil))
-}
diff --git a/vendor/src/github.com/docker/distribution/errors.go b/vendor/src/github.com/docker/distribution/errors.go
index 7bf720e..77bd096 100644
--- a/vendor/src/github.com/docker/distribution/errors.go
+++ b/vendor/src/github.com/docker/distribution/errors.go
@@ -16,6 +16,15 @@
 // performed
 var ErrUnsupported = errors.New("operation unsupported")
 
+// ErrTagUnknown is returned if the given tag is not known by the tag service
+type ErrTagUnknown struct {
+	Tag string
+}
+
+func (err ErrTagUnknown) Error() string {
+	return fmt.Sprintf("unknown tag=%s", err.Tag)
+}
+
 // ErrRepositoryUnknown is returned if the named repository is not known by
 // the registry.
 type ErrRepositoryUnknown struct {
diff --git a/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go
new file mode 100644
index 0000000..49e2b1a
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go
@@ -0,0 +1,147 @@
+package manifestlist
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest"
+)
+
+// MediaTypeManifestList specifies the mediaType for manifest lists.
+const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
+
+// SchemaVersion provides a pre-initialized version structure for this
+// packages version of the manifest.
+var SchemaVersion = manifest.Versioned{
+	SchemaVersion: 2,
+	MediaType:     MediaTypeManifestList,
+}
+
+func init() {
+	manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
+		m := new(DeserializedManifestList)
+		err := m.UnmarshalJSON(b)
+		if err != nil {
+			return nil, distribution.Descriptor{}, err
+		}
+
+		dgst := digest.FromBytes(b)
+		return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
+	}
+	err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
+	if err != nil {
+		panic(fmt.Sprintf("Unable to register manifest: %s", err))
+	}
+}
+
+// PlatformSpec specifies a platform where a particular image manifest is
+// applicable.
+type PlatformSpec struct {
+	// Architecture field specifies the CPU architecture, for example
+	// `amd64` or `ppc64`.
+	Architecture string `json:"architecture"`
+
+	// OS specifies the operating system, for example `linux` or `windows`.
+	OS string `json:"os"`
+
+	// Variant is an optional field specifying a variant of the CPU, for
+	// example `ppc64le` to specify a little-endian version of a PowerPC CPU.
+	Variant string `json:"variant,omitempty"`
+
+	// Features is an optional field specifuing an array of strings, each
+	// listing a required CPU feature (for example `sse4` or `aes`).
+	Features []string `json:"features,omitempty"`
+}
+
+// A ManifestDescriptor references a platform-specific manifest.
+type ManifestDescriptor struct {
+	distribution.Descriptor
+
+	// Platform specifies which platform the manifest pointed to by the
+	// descriptor runs on.
+	Platform PlatformSpec `json:"platform"`
+}
+
+// ManifestList references manifests for various platforms.
+type ManifestList struct {
+	manifest.Versioned
+
+	// Config references the image configuration as a blob.
+	Manifests []ManifestDescriptor `json:"manifests"`
+}
+
+// References returnes the distribution descriptors for the referenced image
+// manifests.
+func (m ManifestList) References() []distribution.Descriptor {
+	dependencies := make([]distribution.Descriptor, len(m.Manifests))
+	for i := range m.Manifests {
+		dependencies[i] = m.Manifests[i].Descriptor
+	}
+
+	return dependencies
+}
+
+// DeserializedManifestList wraps ManifestList with a copy of the original
+// JSON.
+type DeserializedManifestList struct {
+	ManifestList
+
+	// canonical is the canonical byte representation of the Manifest.
+	canonical []byte
+}
+
+// FromDescriptors takes a slice of descriptors, and returns a
+// DeserializedManifestList which contains the resulting manifest list
+// and its JSON representation.
+func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
+	m := ManifestList{
+		Versioned: SchemaVersion,
+	}
+
+	m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors))
+	copy(m.Manifests, descriptors)
+
+	deserialized := DeserializedManifestList{
+		ManifestList: m,
+	}
+
+	var err error
+	deserialized.canonical, err = json.MarshalIndent(&m, "", "   ")
+	return &deserialized, err
+}
+
+// UnmarshalJSON populates a new ManifestList struct from JSON data.
+func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
+	m.canonical = make([]byte, len(b), len(b))
+	// store manifest list in canonical
+	copy(m.canonical, b)
+
+	// Unmarshal canonical JSON into ManifestList object
+	var manifestList ManifestList
+	if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
+		return err
+	}
+
+	m.ManifestList = manifestList
+
+	return nil
+}
+
+// MarshalJSON returns the contents of canonical. If canonical is empty,
+// marshals the inner contents.
+func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
+	if len(m.canonical) > 0 {
+		return m.canonical, nil
+	}
+
+	return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
+}
+
+// Payload returns the raw content of the manifest list. The contents can be
+// used to calculate the content identifier.
+func (m DeserializedManifestList) Payload() (string, []byte, error) {
+	return m.MediaType, m.canonical, nil
+}
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go
new file mode 100644
index 0000000..e9fe81b
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go
@@ -0,0 +1,278 @@
+package schema1
+
+import (
+	"crypto/sha512"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/context"
+	"github.com/docker/libtrust"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest"
+)
+
+type diffID digest.Digest
+
+// gzippedEmptyTar is a gzip-compressed version of an empty tar file
+// (1024 NULL bytes)
+var gzippedEmptyTar = []byte{
+	31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+	0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// digestSHA256GzippedEmptyTar is the canonical sha256 digest of
+// gzippedEmptyTar
+const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+// configManifestBuilder is a type for constructing manifests from an image
+// configuration and generic descriptors.
+type configManifestBuilder struct {
+	// bs is a BlobService used to create empty layer tars in the
+	// blob store if necessary.
+	bs distribution.BlobService
+	// pk is the libtrust private key used to sign the final manifest.
+	pk libtrust.PrivateKey
+	// configJSON is configuration supplied when the ManifestBuilder was
+	// created.
+	configJSON []byte
+	// name is the name provided to NewConfigManifestBuilder
+	name string
+	// tag is the tag provided to NewConfigManifestBuilder
+	tag string
+	// descriptors is the set of descriptors referencing the layers.
+	descriptors []distribution.Descriptor
+	// emptyTarDigest is set to a valid digest if an empty tar has been
+	// put in the blob store; otherwise it is empty.
+	emptyTarDigest digest.Digest
+}
+
+// NewConfigManifestBuilder is used to build new manifests for the current
+// schema version from an image configuration and a set of descriptors.
+// It takes a BlobService so that it can add an empty tar to the blob store
+// if the resulting manifest needs empty layers.
+func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, name, tag string, configJSON []byte) distribution.ManifestBuilder {
+	return &configManifestBuilder{
+		bs:         bs,
+		pk:         pk,
+		configJSON: configJSON,
+		name:       name,
+		tag:        tag,
+	}
+}
+
+// Build produces a final manifest from the given references
+func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) {
+	type imageRootFS struct {
+		Type      string   `json:"type"`
+		DiffIDs   []diffID `json:"diff_ids,omitempty"`
+		BaseLayer string   `json:"base_layer,omitempty"`
+	}
+
+	type imageHistory struct {
+		Created    time.Time `json:"created"`
+		Author     string    `json:"author,omitempty"`
+		CreatedBy  string    `json:"created_by,omitempty"`
+		Comment    string    `json:"comment,omitempty"`
+		EmptyLayer bool      `json:"empty_layer,omitempty"`
+	}
+
+	type imageConfig struct {
+		RootFS       *imageRootFS   `json:"rootfs,omitempty"`
+		History      []imageHistory `json:"history,omitempty"`
+		Architecture string         `json:"architecture,omitempty"`
+	}
+
+	var img imageConfig
+
+	if err := json.Unmarshal(mb.configJSON, &img); err != nil {
+		return nil, err
+	}
+
+	if len(img.History) == 0 {
+		return nil, errors.New("empty history when trying to create schema1 manifest")
+	}
+
+	if len(img.RootFS.DiffIDs) != len(mb.descriptors) {
+		return nil, errors.New("number of descriptors and number of layers in rootfs must match")
+	}
+
+	// Generate IDs for each layer
+	// For non-top-level layers, create fake V1Compatibility strings that
+	// fit the format and don't collide with anything else, but don't
+	// result in runnable images on their own.
+	type v1Compatibility struct {
+		ID              string    `json:"id"`
+		Parent          string    `json:"parent,omitempty"`
+		Comment         string    `json:"comment,omitempty"`
+		Created         time.Time `json:"created"`
+		ContainerConfig struct {
+			Cmd []string
+		} `json:"container_config,omitempty"`
+		ThrowAway bool `json:"throwaway,omitempty"`
+	}
+
+	fsLayerList := make([]FSLayer, len(img.History))
+	history := make([]History, len(img.History))
+
+	parent := ""
+	layerCounter := 0
+	for i, h := range img.History[:len(img.History)-1] {
+		var blobsum digest.Digest
+		if h.EmptyLayer {
+			if blobsum, err = mb.emptyTar(ctx); err != nil {
+				return nil, err
+			}
+		} else {
+			if len(img.RootFS.DiffIDs) <= layerCounter {
+				return nil, errors.New("too many non-empty layers in History section")
+			}
+			blobsum = mb.descriptors[layerCounter].Digest
+			layerCounter++
+		}
+
+		v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex()
+
+		if i == 0 && img.RootFS.BaseLayer != "" {
+			// windows-only baselayer setup
+			baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer))
+			parent = fmt.Sprintf("%x", baseID[:32])
+		}
+
+		v1Compatibility := v1Compatibility{
+			ID:      v1ID,
+			Parent:  parent,
+			Comment: h.Comment,
+			Created: h.Created,
+		}
+		v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
+		if h.EmptyLayer {
+			v1Compatibility.ThrowAway = true
+		}
+		jsonBytes, err := json.Marshal(&v1Compatibility)
+		if err != nil {
+			return nil, err
+		}
+
+		reversedIndex := len(img.History) - i - 1
+		history[reversedIndex].V1Compatibility = string(jsonBytes)
+		fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum}
+
+		parent = v1ID
+	}
+
+	latestHistory := img.History[len(img.History)-1]
+
+	var blobsum digest.Digest
+	if latestHistory.EmptyLayer {
+		if blobsum, err = mb.emptyTar(ctx); err != nil {
+			return nil, err
+		}
+	} else {
+		if len(img.RootFS.DiffIDs) <= layerCounter {
+			return nil, errors.New("too many non-empty layers in History section")
+		}
+		blobsum = mb.descriptors[layerCounter].Digest
+	}
+
+	fsLayerList[0] = FSLayer{BlobSum: blobsum}
+	dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON)))
+
+	// Top-level v1compatibility string should be a modified version of the
+	// image config.
+	transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer)
+	if err != nil {
+		return nil, err
+	}
+
+	history[0].V1Compatibility = string(transformedConfig)
+
+	mfst := Manifest{
+		Versioned: manifest.Versioned{
+			SchemaVersion: 1,
+		},
+		Name:         mb.name,
+		Tag:          mb.tag,
+		Architecture: img.Architecture,
+		FSLayers:     fsLayerList,
+		History:      history,
+	}
+
+	return Sign(&mfst, mb.pk)
+}
+
+// emptyTar pushes a compressed empty tar to the blob store if one doesn't
+// already exist, and returns its blobsum.
+func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) {
+	if mb.emptyTarDigest != "" {
+		// Already put an empty tar
+		return mb.emptyTarDigest, nil
+	}
+
+	descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar)
+	switch err {
+	case nil:
+		mb.emptyTarDigest = descriptor.Digest
+		return descriptor.Digest, nil
+	case distribution.ErrBlobUnknown:
+		// nop
+	default:
+		return "", err
+	}
+
+	// Add gzipped empty tar to the blob store
+	descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar)
+	if err != nil {
+		return "", err
+	}
+
+	mb.emptyTarDigest = descriptor.Digest
+
+	return descriptor.Digest, nil
+}
+
+// AppendReference adds a reference to the current ManifestBuilder
+func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error {
+	// todo: verification here?
+	mb.descriptors = append(mb.descriptors, d.Descriptor())
+	return nil
+}
+
+// References returns the current references added to this builder
+func (mb *configManifestBuilder) References() []distribution.Descriptor {
+	return mb.descriptors
+}
+
+// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON
+func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+	// Top-level v1compatibility string should be a modified version of the
+	// image config.
+	var configAsMap map[string]*json.RawMessage
+	if err := json.Unmarshal(configJSON, &configAsMap); err != nil {
+		return nil, err
+	}
+
+	// Delete fields that didn't exist in old manifest
+	delete(configAsMap, "rootfs")
+	delete(configAsMap, "history")
+	configAsMap["id"] = rawJSON(v1ID)
+	if parentV1ID != "" {
+		configAsMap["parent"] = rawJSON(parentV1ID)
+	}
+	if throwaway {
+		configAsMap["throwaway"] = rawJSON(true)
+	}
+
+	return json.Marshal(configAsMap)
+}
+
+func rawJSON(value interface{}) *json.RawMessage {
+	jsonval, err := json.Marshal(value)
+	if err != nil {
+		return nil
+	}
+	return (*json.RawMessage)(&jsonval)
+}
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go
index e7cbf95..98a7d81 100644
--- a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go
+++ b/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go
@@ -2,20 +2,22 @@
 
 import (
 	"encoding/json"
+	"fmt"
 
+	"github.com/docker/distribution"
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/manifest"
 	"github.com/docker/libtrust"
 )
 
-// TODO(stevvooe): When we rev the manifest format, the contents of this
-// package should be moved to manifest/v1.
-
 const (
-	// ManifestMediaType specifies the mediaType for the current version. Note
-	// that for schema version 1, the the media is optionally
-	// "application/json".
-	ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json"
+	// MediaTypeManifest specifies the mediaType for the current version. Note
+	// that for schema version 1, the the media is optionally "application/json".
+	MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
+	// MediaTypeSignedManifest specifies the mediatype for current SignedManifest version
+	MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+	// MediaTypeManifestLayer specifies the media type for manifest layers
+	MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar"
 )
 
 var (
@@ -26,6 +28,47 @@
 	}
 )
 
+func init() {
+	schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
+		sm := new(SignedManifest)
+		err := sm.UnmarshalJSON(b)
+		if err != nil {
+			return nil, distribution.Descriptor{}, err
+		}
+
+		desc := distribution.Descriptor{
+			Digest:    digest.FromBytes(sm.Canonical),
+			Size:      int64(len(sm.Canonical)),
+			MediaType: MediaTypeManifest,
+		}
+		return sm, desc, err
+	}
+	err := distribution.RegisterManifestSchema(MediaTypeManifest, schema1Func)
+	if err != nil {
+		panic(fmt.Sprintf("Unable to register manifest: %s", err))
+	}
+	err = distribution.RegisterManifestSchema("", schema1Func)
+	if err != nil {
+		panic(fmt.Sprintf("Unable to register manifest: %s", err))
+	}
+	err = distribution.RegisterManifestSchema("application/json; charset=utf-8", schema1Func)
+	if err != nil {
+		panic(fmt.Sprintf("Unable to register manifest: %s", err))
+	}
+}
+
+// FSLayer is a container struct for BlobSums defined in an image manifest
+type FSLayer struct {
+	// BlobSum is the tarsum of the referenced filesystem image layer
+	BlobSum digest.Digest `json:"blobSum"`
+}
+
+// History stores unstructured v1 compatibility information
+type History struct {
+	// V1Compatibility is the raw v1 compatibility information
+	V1Compatibility string `json:"v1Compatibility"`
+}
+
 // Manifest provides the base accessible fields for working with V2 image
 // format in the registry.
 type Manifest struct {
@@ -49,59 +92,64 @@
 }
 
 // SignedManifest provides an envelope for a signed image manifest, including
-// the format sensitive raw bytes. It contains fields to
+// the format sensitive raw bytes.
 type SignedManifest struct {
 	Manifest
 
-	// Raw is the byte representation of the ImageManifest, used for signature
-	// verification. The value of Raw must be used directly during
-	// serialization, or the signature check will fail. The manifest byte
+	// Canonical is the canonical byte representation of the ImageManifest,
+	// without any attached signatures. The manifest byte
 	// representation cannot change or it will have to be re-signed.
-	Raw []byte `json:"-"`
+	Canonical []byte `json:"-"`
+
+	// all contains the byte representation of the Manifest including signatures
+	// and is retuend by Payload()
+	all []byte
 }
 
-// UnmarshalJSON populates a new ImageManifest struct from JSON data.
+// UnmarshalJSON populates a new SignedManifest struct from JSON data.
 func (sm *SignedManifest) UnmarshalJSON(b []byte) error {
-	sm.Raw = make([]byte, len(b), len(b))
-	copy(sm.Raw, b)
+	sm.all = make([]byte, len(b), len(b))
+	// store manifest and signatures in all
+	copy(sm.all, b)
 
-	p, err := sm.Payload()
+	jsig, err := libtrust.ParsePrettySignature(b, "signatures")
 	if err != nil {
 		return err
 	}
 
+	// Resolve the payload in the manifest.
+	bytes, err := jsig.Payload()
+	if err != nil {
+		return err
+	}
+
+	// sm.Canonical stores the canonical manifest JSON
+	sm.Canonical = make([]byte, len(bytes), len(bytes))
+	copy(sm.Canonical, bytes)
+
+	// Unmarshal canonical JSON into Manifest object
 	var manifest Manifest
-	if err := json.Unmarshal(p, &manifest); err != nil {
+	if err := json.Unmarshal(sm.Canonical, &manifest); err != nil {
 		return err
 	}
 
 	sm.Manifest = manifest
+
 	return nil
 }
 
-// Payload returns the raw, signed content of the signed manifest. The
-// contents can be used to calculate the content identifier.
-func (sm *SignedManifest) Payload() ([]byte, error) {
-	jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
-	if err != nil {
-		return nil, err
+// References returnes the descriptors of this manifests references
+func (sm SignedManifest) References() []distribution.Descriptor {
+	dependencies := make([]distribution.Descriptor, len(sm.FSLayers))
+	for i, fsLayer := range sm.FSLayers {
+		dependencies[i] = distribution.Descriptor{
+			MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar",
+			Digest:    fsLayer.BlobSum,
+		}
 	}
 
-	// Resolve the payload in the manifest.
-	return jsig.Payload()
-}
+	return dependencies
 
-// Signatures returns the signatures as provided by
-// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
-// signatures.
-func (sm *SignedManifest) Signatures() ([][]byte, error) {
-	jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
-	if err != nil {
-		return nil, err
-	}
-
-	// Resolve the payload in the manifest.
-	return jsig.Signatures()
 }
 
 // MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner
@@ -109,22 +157,28 @@
 // use Raw directly, since the the content produced by json.Marshal will be
 // compacted and will fail signature checks.
 func (sm *SignedManifest) MarshalJSON() ([]byte, error) {
-	if len(sm.Raw) > 0 {
-		return sm.Raw, nil
+	if len(sm.all) > 0 {
+		return sm.all, nil
 	}
 
 	// If the raw data is not available, just dump the inner content.
 	return json.Marshal(&sm.Manifest)
 }
 
-// FSLayer is a container struct for BlobSums defined in an image manifest
-type FSLayer struct {
-	// BlobSum is the tarsum of the referenced filesystem image layer
-	BlobSum digest.Digest `json:"blobSum"`
+// Payload returns the signed content of the signed manifest.
+func (sm SignedManifest) Payload() (string, []byte, error) {
+	return MediaTypeManifest, sm.all, nil
 }
 
-// History stores unstructured v1 compatibility information
-type History struct {
-	// V1Compatibility is the raw v1 compatibility information
-	V1Compatibility string `json:"v1Compatibility"`
+// Signatures returns the signatures as provided by
+// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws
+// signatures.
+func (sm *SignedManifest) Signatures() ([][]byte, error) {
+	jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures")
+	if err != nil {
+		return nil, err
+	}
+
+	// Resolve the payload in the manifest.
+	return jsig.Signatures()
 }
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go
new file mode 100644
index 0000000..36209e3
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go
@@ -0,0 +1,92 @@
+package schema1
+
+import (
+	"fmt"
+
+	"errors"
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest"
+	"github.com/docker/libtrust"
+)
+
+// referenceManifestBuilder is a type for constructing manifests from schema1
+// dependencies.
+type referenceManifestBuilder struct {
+	Manifest
+	pk libtrust.PrivateKey
+}
+
+// NewReferenceManifestBuilder is used to build new manifests for the current
+// schema version using schema1 dependencies.
+func NewReferenceManifestBuilder(pk libtrust.PrivateKey, name, tag, architecture string) distribution.ManifestBuilder {
+	return &referenceManifestBuilder{
+		Manifest: Manifest{
+			Versioned: manifest.Versioned{
+				SchemaVersion: 1,
+			},
+			Name:         name,
+			Tag:          tag,
+			Architecture: architecture,
+		},
+		pk: pk,
+	}
+}
+
+func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) {
+	m := mb.Manifest
+	if len(m.FSLayers) == 0 {
+		return nil, errors.New("cannot build manifest with zero layers or history")
+	}
+
+	m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers))
+	m.History = make([]History, len(mb.Manifest.History))
+	copy(m.FSLayers, mb.Manifest.FSLayers)
+	copy(m.History, mb.Manifest.History)
+
+	return Sign(&m, mb.pk)
+}
+
+// AppendReference adds a reference to the current ManifestBuilder
+func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error {
+	r, ok := d.(Reference)
+	if !ok {
+		return fmt.Errorf("Unable to add non-reference type to v1 builder")
+	}
+
+	// Entries need to be prepended
+	mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...)
+	mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...)
+	return nil
+
+}
+
+// References returns the current references added to this builder
+func (mb *referenceManifestBuilder) References() []distribution.Descriptor {
+	refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers))
+	for i := range mb.Manifest.FSLayers {
+		layerDigest := mb.Manifest.FSLayers[i].BlobSum
+		history := mb.Manifest.History[i]
+		ref := Reference{layerDigest, 0, history}
+		refs[i] = ref.Descriptor()
+	}
+	return refs
+}
+
+// Reference describes a manifest v2, schema version 1 dependency.
+// An FSLayer associated with a history entry.
+type Reference struct {
+	Digest  digest.Digest
+	Size    int64 // if we know it, set it for the descriptor.
+	History History
+}
+
+// Descriptor describes a reference
+func (r Reference) Descriptor() distribution.Descriptor {
+	return distribution.Descriptor{
+		MediaType: MediaTypeManifestLayer,
+		Digest:    r.Digest,
+		Size:      r.Size,
+	}
+}
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go
index 1b7b674..c862dd8 100644
--- a/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go
+++ b/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go
@@ -31,8 +31,9 @@
 	}
 
 	return &SignedManifest{
-		Manifest: *m,
-		Raw:      pretty,
+		Manifest:  *m,
+		all:       pretty,
+		Canonical: p,
 	}, nil
 }
 
@@ -60,7 +61,8 @@
 	}
 
 	return &SignedManifest{
-		Manifest: *m,
-		Raw:      pretty,
+		Manifest:  *m,
+		all:       pretty,
+		Canonical: p,
 	}, nil
 }
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go
index 60f8cda..fa8daa5 100644
--- a/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go
+++ b/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go
@@ -10,7 +10,7 @@
 // Verify verifies the signature of the signed manifest returning the public
 // keys used during signing.
 func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) {
-	js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
+	js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
 	if err != nil {
 		logrus.WithField("err", err).Debugf("(*SignedManifest).Verify")
 		return nil, err
@@ -23,7 +23,7 @@
 // certificate pool returning the list of verified chains. Signatures without
 // an x509 chain are not checked.
 func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) {
-	js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures")
+	js, err := libtrust.ParsePrettySignature(sm.all, "signatures")
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go
new file mode 100644
index 0000000..70b006a
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go
@@ -0,0 +1,74 @@
+package schema2
+
+import (
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+)
+
+// builder is a type for constructing manifests.
+type builder struct {
+	// bs is a BlobService used to publish the configuration blob.
+	bs distribution.BlobService
+
+	// configJSON references
+	configJSON []byte
+
+	// layers is a list of layer descriptors that gets built by successive
+	// calls to AppendReference.
+	layers []distribution.Descriptor
+}
+
+// NewManifestBuilder is used to build new manifests for the current schema
+// version. It takes a BlobService so it can publish the configuration blob
+// as part of the Build process.
+func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder {
+	mb := &builder{
+		bs:         bs,
+		configJSON: make([]byte, len(configJSON)),
+	}
+	copy(mb.configJSON, configJSON)
+
+	return mb
+}
+
+// Build produces a final manifest from the given references.
+func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
+	m := Manifest{
+		Versioned: SchemaVersion,
+		Layers:    make([]distribution.Descriptor, len(mb.layers)),
+	}
+	copy(m.Layers, mb.layers)
+
+	configDigest := digest.FromBytes(mb.configJSON)
+
+	var err error
+	m.Config, err = mb.bs.Stat(ctx, configDigest)
+	switch err {
+	case nil:
+		return FromStruct(m)
+	case distribution.ErrBlobUnknown:
+		// nop
+	default:
+		return nil, err
+	}
+
+	// Add config to the blob store
+	m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
+	if err != nil {
+		return nil, err
+	}
+
+	return FromStruct(m)
+}
+
+// AppendReference adds a reference to the current ManifestBuilder.
+func (mb *builder) AppendReference(d distribution.Describable) error {
+	mb.layers = append(mb.layers, d.Descriptor())
+	return nil
+}
+
+// References returns the current references added to this builder.
+func (mb *builder) References() []distribution.Descriptor {
+	return mb.layers
+}
diff --git a/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go
new file mode 100644
index 0000000..8d378e9
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go
@@ -0,0 +1,125 @@
+package schema2
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+
+	"github.com/docker/distribution"
+	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/manifest"
+)
+
+const (
+	// MediaTypeManifest specifies the mediaType for the current version.
+	MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
+
+	// MediaTypeConfig specifies the mediaType for the image configuration.
+	MediaTypeConfig = "application/vnd.docker.container.image.v1+json"
+
+	// MediaTypeLayer is the mediaType used for layers referenced by the
+	// manifest.
+	MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+)
+
+var (
+	// SchemaVersion provides a pre-initialized version structure for this
+	// packages version of the manifest.
+	SchemaVersion = manifest.Versioned{
+		SchemaVersion: 2,
+		MediaType:     MediaTypeManifest,
+	}
+)
+
+func init() {
+	schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
+		m := new(DeserializedManifest)
+		err := m.UnmarshalJSON(b)
+		if err != nil {
+			return nil, distribution.Descriptor{}, err
+		}
+
+		dgst := digest.FromBytes(b)
+		return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
+	}
+	err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
+	if err != nil {
+		panic(fmt.Sprintf("Unable to register manifest: %s", err))
+	}
+}
+
+// Manifest defines a schema2 manifest.
+type Manifest struct {
+	manifest.Versioned
+
+	// Config references the image configuration as a blob.
+	Config distribution.Descriptor `json:"config"`
+
+	// Layers lists descriptors for the layers referenced by the
+	// configuration.
+	Layers []distribution.Descriptor `json:"layers"`
+}
+
+// References returnes the descriptors of this manifests references.
+func (m Manifest) References() []distribution.Descriptor {
+	return m.Layers
+
+}
+
+// Target returns the target of this signed manifest.
+func (m Manifest) Target() distribution.Descriptor {
+	return m.Config
+}
+
+// DeserializedManifest wraps Manifest with a copy of the original JSON.
+// It satisfies the distribution.Manifest interface.
+type DeserializedManifest struct {
+	Manifest
+
+	// canonical is the canonical byte representation of the Manifest.
+	canonical []byte
+}
+
+// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
+// DeserializedManifest which contains the manifest and its JSON representation.
+func FromStruct(m Manifest) (*DeserializedManifest, error) {
+	var deserialized DeserializedManifest
+	deserialized.Manifest = m
+
+	var err error
+	deserialized.canonical, err = json.MarshalIndent(&m, "", "   ")
+	return &deserialized, err
+}
+
+// UnmarshalJSON populates a new Manifest struct from JSON data.
+func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
+	m.canonical = make([]byte, len(b), len(b))
+	// store manifest in canonical
+	copy(m.canonical, b)
+
+	// Unmarshal canonical JSON into Manifest object
+	var manifest Manifest
+	if err := json.Unmarshal(m.canonical, &manifest); err != nil {
+		return err
+	}
+
+	m.Manifest = manifest
+
+	return nil
+}
+
+// MarshalJSON returns the contents of canonical. If canonical is empty,
+// marshals the inner contents.
+func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
+	if len(m.canonical) > 0 {
+		return m.canonical, nil
+	}
+
+	return nil, errors.New("JSON representation not initialized in DeserializedManifest")
+}
+
+// Payload returns the raw content of the manifest. The contents can be used to
+// calculate the content identifier.
+func (m DeserializedManifest) Payload() (string, []byte, error) {
+	return m.MediaType, m.canonical, nil
+}
diff --git a/vendor/src/github.com/docker/distribution/manifest/versioned.go b/vendor/src/github.com/docker/distribution/manifest/versioned.go
index bef3829..c57398b 100644
--- a/vendor/src/github.com/docker/distribution/manifest/versioned.go
+++ b/vendor/src/github.com/docker/distribution/manifest/versioned.go
@@ -1,9 +1,12 @@
 package manifest
 
-// Versioned provides a struct with just the manifest schemaVersion. Incoming
+// Versioned provides a struct with the manifest schemaVersion and . Incoming
 // content with unknown schema version can be decoded against this struct to
 // check the version.
 type Versioned struct {
 	// SchemaVersion is the image manifest schema that this image follows
 	SchemaVersion int `json:"schemaVersion"`
+
+	// MediaType is the media type of this schema.
+	MediaType string `json:"mediaType,omitempty"`
 }
diff --git a/vendor/src/github.com/docker/distribution/manifests.go b/vendor/src/github.com/docker/distribution/manifests.go
new file mode 100644
index 0000000..1f93812
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/manifests.go
@@ -0,0 +1,100 @@
+package distribution
+
+import (
+	"fmt"
+
+	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/digest"
+)
+
+// Manifest represents a registry object specifying a set of
+// references and an optional target
+type Manifest interface {
+	// References returns a list of objects which make up this manifest.
+	// The references are strictly ordered from base to head. A reference
+	// is anything which can be represented by a distribution.Descriptor
+	References() []Descriptor
+
+	// Payload provides the serialized format of the manifest, in addition to
+	// the mediatype.
+	Payload() (mediatype string, payload []byte, err error)
+}
+
+// ManifestBuilder creates a manifest allowing one to include dependencies.
+// Instances can be obtained from a version-specific manifest package.  Manifest
+// specific data is passed into the function which creates the builder.
+type ManifestBuilder interface {
+	// Build creates the manifest from his builder.
+	Build(ctx context.Context) (Manifest, error)
+
+	// References returns a list of objects which have been added to this
+	// builder. The dependencies are returned in the order they were added,
+	// which should be from base to head.
+	References() []Descriptor
+
+	// AppendReference includes the given object in the manifest after any
+	// existing dependencies. If the add fails, such as when adding an
+	// unsupported dependency, an error may be returned.
+	AppendReference(dependency Describable) error
+}
+
+// ManifestService describes operations on image manifests.
+type ManifestService interface {
+	// Exists returns true if the manifest exists.
+	Exists(ctx context.Context, dgst digest.Digest) (bool, error)
+
+	// Get retrieves the manifest specified by the given digest
+	Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
+
+	// Put creates or updates the given manifest returning the manifest digest
+	Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
+
+	// Delete removes the manifest specified by the given digest. Deleting
+	// a manifest that doesn't exist will return ErrManifestNotFound
+	Delete(ctx context.Context, dgst digest.Digest) error
+
+	// Enumerate fills 'manifests' with the manifests in this service up
+	// to the size of 'manifests' and returns 'n' for the number of entries
+	// which were filled.  'last' contains an offset in the manifest set
+	// and can be used to resume iteration.
+	//Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error)
+}
+
+// Describable is an interface for descriptors
+type Describable interface {
+	Descriptor() Descriptor
+}
+
+// ManifestMediaTypes returns the supported media types for manifests.
+func ManifestMediaTypes() (mediaTypes []string) {
+	for t := range mappings {
+		mediaTypes = append(mediaTypes, t)
+	}
+	return
+}
+
+// UnmarshalFunc implements manifest unmarshalling a given MediaType
+type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
+
+var mappings = make(map[string]UnmarshalFunc, 0)
+
+// UnmarshalManifest looks up manifest unmarshall functions based on
+// MediaType
+func UnmarshalManifest(mediatype string, p []byte) (Manifest, Descriptor, error) {
+	unmarshalFunc, ok := mappings[mediatype]
+	if !ok {
+		return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype: %s", mediatype)
+	}
+
+	return unmarshalFunc(p)
+}
+
+// RegisterManifestSchema registers an UnmarshalFunc for a given schema type.  This
+// should be called from specific
+func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error {
+	if _, ok := mappings[mediatype]; ok {
+		return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype)
+	}
+	mappings[mediatype] = u
+	return nil
+}
diff --git a/vendor/src/github.com/docker/distribution/reference/reference.go b/vendor/src/github.com/docker/distribution/reference/reference.go
index 7b2cc2e..c188472 100644
--- a/vendor/src/github.com/docker/distribution/reference/reference.go
+++ b/vendor/src/github.com/docker/distribution/reference/reference.go
@@ -4,22 +4,16 @@
 // Grammar
 //
 // 	reference                       := repository [ ":" tag ] [ "@" digest ]
+//	name                            := [hostname '/'] component ['/' component]*
+//	hostname                        := hostcomponent ['.' hostcomponent]* [':' port-number]
+//	hostcomponent                   := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/
+//	port-number                     := /[0-9]+/
+//	component                       := alpha-numeric [separator alpha-numeric]*
+// 	alpha-numeric                   := /[a-z0-9]+/
+//	separator                       := /[_.]|__|[-]*/
 //
-//	// repository.go
-//	repository			:= hostname ['/' component]+
-//	hostname			:= hostcomponent [':' port-number]
-//	component			:= subcomponent [separator subcomponent]*
-//	subcomponent			:= alpha-numeric ['-'* alpha-numeric]*
-//	hostcomponent                   := [hostpart '.']* hostpart
-// 	alpha-numeric			:= /[a-z0-9]+/
-//	separator			:= /([_.]|__)/
-//	port-number			:= /[0-9]+/
-//	hostpart                        := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/
-//
-//	// tag.go
 //	tag                             := /[\w][\w.-]{0,127}/
 //
-//	// from the digest package
 //	digest                          := digest-algorithm ":" digest-hex
 //	digest-algorithm                := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
 //	digest-algorithm-separator      := /[+.-_]/
@@ -52,8 +46,7 @@
 	// ErrNameEmpty is returned for empty, invalid repository names.
 	ErrNameEmpty = errors.New("repository name must have at least one component")
 
-	// ErrNameTooLong is returned when a repository name is longer than
-	// RepositoryNameTotalLengthMax
+	// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
 	ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
 )
 
diff --git a/vendor/src/github.com/docker/distribution/reference/regexp.go b/vendor/src/github.com/docker/distribution/reference/regexp.go
index 06ca8db..a4ffe5b 100644
--- a/vendor/src/github.com/docker/distribution/reference/regexp.go
+++ b/vendor/src/github.com/docker/distribution/reference/regexp.go
@@ -3,47 +3,122 @@
 import "regexp"
 
 var (
-	// nameSubComponentRegexp defines the part of the name which must be
-	// begin and end with an alphanumeric character. These characters can
-	// be separated by any number of dashes.
-	nameSubComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[-]+[a-z0-9]+)*`)
+	// alphaNumericRegexp defines the alpha numeric atom, typically a
+	// component of names. This only allows lower case characters and digits.
+	alphaNumericRegexp = match(`[a-z0-9]+`)
 
-	// nameComponentRegexp restricts registry path component names to
-	// start with at least one letter or number, with following parts able to
-	// be separated by one period, underscore or double underscore.
-	nameComponentRegexp = regexp.MustCompile(nameSubComponentRegexp.String() + `(?:(?:[._]|__)` + nameSubComponentRegexp.String() + `)*`)
+	// separatorRegexp defines the separators allowed to be embedded in name
+	// components. This allow one period, one or two underscore and multiple
+	// dashes.
+	separatorRegexp = match(`(?:[._]|__|[-]*)`)
 
-	nameRegexp = regexp.MustCompile(`(?:` + nameComponentRegexp.String() + `/)*` + nameComponentRegexp.String())
+	// nameComponentRegexp restricts registry path component names to start
+	// with at least one letter or number, with following parts able to be
+	// separated by one period, one or two underscore and multiple dashes.
+	nameComponentRegexp = expression(
+		alphaNumericRegexp,
+		optional(repeated(separatorRegexp, alphaNumericRegexp)))
 
-	hostnameComponentRegexp = regexp.MustCompile(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`)
+	// hostnameComponentRegexp restricts the registry hostname component of a
+	// repository name to start with a component as defined by hostnameRegexp
+	// and followed by an optional port.
+	hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`)
 
-	// hostnameComponentRegexp restricts the registry hostname component of a repository name to
-	// start with a component as defined by hostnameRegexp and followed by an optional port.
-	hostnameRegexp = regexp.MustCompile(`(?:` + hostnameComponentRegexp.String() + `\.)*` + hostnameComponentRegexp.String() + `(?::[0-9]+)?`)
+	// hostnameRegexp defines the structure of potential hostname components
+	// that may be part of image names. This is purposely a subset of what is
+	// allowed by DNS to ensure backwards compatibility with Docker image
+	// names.
+	hostnameRegexp = expression(
+		hostnameComponentRegexp,
+		optional(repeated(literal(`.`), hostnameComponentRegexp)),
+		optional(literal(`:`), match(`[0-9]+`)))
 
 	// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
-	TagRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`)
+	TagRegexp = match(`[\w][\w.-]{0,127}`)
 
 	// anchoredTagRegexp matches valid tag names, anchored at the start and
 	// end of the matched string.
-	anchoredTagRegexp = regexp.MustCompile(`^` + TagRegexp.String() + `$`)
+	anchoredTagRegexp = anchored(TagRegexp)
 
 	// DigestRegexp matches valid digests.
-	DigestRegexp = regexp.MustCompile(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+	DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
 
 	// anchoredDigestRegexp matches valid digests, anchored at the start and
 	// end of the matched string.
-	anchoredDigestRegexp = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
+	anchoredDigestRegexp = anchored(DigestRegexp)
 
 	// NameRegexp is the format for the name component of references. The
 	// regexp has capturing groups for the hostname and name part omitting
 	// the seperating forward slash from either.
-	NameRegexp = regexp.MustCompile(`(?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String())
+	NameRegexp = expression(
+		optional(hostnameRegexp, literal(`/`)),
+		nameComponentRegexp,
+		optional(repeated(literal(`/`), nameComponentRegexp)))
 
-	// ReferenceRegexp is the full supported format of a reference. The
-	// regexp has capturing groups for name, tag, and digest components.
-	ReferenceRegexp = regexp.MustCompile(`^((?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String() + `)(?:[:](` + TagRegexp.String() + `))?(?:[@](` + DigestRegexp.String() + `))?$`)
+	// anchoredNameRegexp is used to parse a name value, capturing the
+	// hostname and trailing components.
+	anchoredNameRegexp = anchored(
+		optional(capture(hostnameRegexp), literal(`/`)),
+		capture(nameComponentRegexp,
+			optional(repeated(literal(`/`), nameComponentRegexp))))
 
-	// anchoredNameRegexp is used to parse a name value, capturing hostname
-	anchoredNameRegexp = regexp.MustCompile(`^(?:(` + hostnameRegexp.String() + `)/)?(` + nameRegexp.String() + `)$`)
+	// ReferenceRegexp is the full supported format of a reference. The regexp
+	// is anchored and has capturing groups for name, tag, and digest
+	// components.
+	ReferenceRegexp = anchored(capture(NameRegexp),
+		optional(literal(":"), capture(TagRegexp)),
+		optional(literal("@"), capture(DigestRegexp)))
 )
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+	re := match(regexp.QuoteMeta(s))
+
+	if _, complete := re.LiteralPrefix(); !complete {
+		panic("must be a literal")
+	}
+
+	return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+	var s string
+	for _, re := range res {
+		s += re.String()
+	}
+
+	return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+	return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+	return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+	return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+	return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+	return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/vendor/src/github.com/docker/distribution/registry.go b/vendor/src/github.com/docker/distribution/registry.go
index 001776f..ce5d777 100644
--- a/vendor/src/github.com/docker/distribution/registry.go
+++ b/vendor/src/github.com/docker/distribution/registry.go
@@ -2,8 +2,6 @@
 
 import (
 	"github.com/docker/distribution/context"
-	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest/schema1"
 )
 
 // Scope defines the set of items that match a namespace.
@@ -44,7 +42,9 @@
 }
 
 // ManifestServiceOption is a function argument for Manifest Service methods
-type ManifestServiceOption func(ManifestService) error
+type ManifestServiceOption interface {
+	Apply(ManifestService) error
+}
 
 // Repository is a named collection of manifests and layers.
 type Repository interface {
@@ -62,59 +62,10 @@
 	// be a BlobService for use with clients. This will allow such
 	// implementations to avoid implementing ServeBlob.
 
-	// Signatures returns a reference to this repository's signatures service.
-	Signatures() SignatureService
+	// Tags returns a reference to this repositories tag service
+	Tags(ctx context.Context) TagService
 }
 
 // TODO(stevvooe): Must add close methods to all these. May want to change the
 // way instances are created to better reflect internal dependency
 // relationships.
-
-// ManifestService provides operations on image manifests.
-type ManifestService interface {
-	// Exists returns true if the manifest exists.
-	Exists(dgst digest.Digest) (bool, error)
-
-	// Get retrieves the identified by the digest, if it exists.
-	Get(dgst digest.Digest) (*schema1.SignedManifest, error)
-
-	// Delete removes the manifest, if it exists.
-	Delete(dgst digest.Digest) error
-
-	// Put creates or updates the manifest.
-	Put(manifest *schema1.SignedManifest) error
-
-	// TODO(stevvooe): The methods after this message should be moved to a
-	// discrete TagService, per active proposals.
-
-	// Tags lists the tags under the named repository.
-	Tags() ([]string, error)
-
-	// ExistsByTag returns true if the manifest exists.
-	ExistsByTag(tag string) (bool, error)
-
-	// GetByTag retrieves the named manifest, if it exists.
-	GetByTag(tag string, options ...ManifestServiceOption) (*schema1.SignedManifest, error)
-
-	// TODO(stevvooe): There are several changes that need to be done to this
-	// interface:
-	//
-	//	1. Allow explicit tagging with Tag(digest digest.Digest, tag string)
-	//	2. Support reading tags with a re-entrant reader to avoid large
-	//       allocations in the registry.
-	//	3. Long-term: Provide All() method that lets one scroll through all of
-	//       the manifest entries.
-	//	4. Long-term: break out concept of signing from manifests. This is
-	//       really a part of the distribution sprint.
-	//	5. Long-term: Manifest should be an interface. This code shouldn't
-	//       really be concerned with the storage format.
-}
-
-// SignatureService provides operations on signatures.
-type SignatureService interface {
-	// Get retrieves all of the signature blobs for the specified digest.
-	Get(dgst digest.Digest) ([][]byte, error)
-
-	// Put stores the signature for the provided digest.
-	Put(dgst digest.Digest, signatures ...[]byte) error
-}
diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go
index fdaddbc..9a405d2 100644
--- a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go
+++ b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go
@@ -25,7 +25,8 @@
 
 // Error returns the ID/Value
 func (ec ErrorCode) Error() string {
-	return ec.Descriptor().Value
+	// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
+	return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
 }
 
 // Descriptor returns the descriptor for the error code.
@@ -104,9 +105,7 @@
 
 // Error returns a human readable representation of the error.
 func (e Error) Error() string {
-	return fmt.Sprintf("%s: %s",
-		strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
-		e.Message)
+	return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
 }
 
 // WithDetail will return a new Error, based on the current one, but with
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
index 7eba362..ad3da3e 100644
--- a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -495,7 +495,7 @@
 		Methods: []MethodDescriptor{
 			{
 				Method:      "GET",
-				Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
+				Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
 				Requests: []RequestDescriptor{
 					{
 						Headers: []ParameterDescriptor{
@@ -1041,6 +1041,70 @@
 							deniedResponseDescriptor,
 						},
 					},
+					{
+						Name:        "Mount Blob",
+						Description: "Mount a blob identified by the `mount` parameter from another repository.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "mount",
+								Type:        "query",
+								Format:      "<digest>",
+								Regexp:      digest.DigestRegexp,
+								Description: `Digest of blob to mount from the source repository.`,
+							},
+							{
+								Name:        "from",
+								Type:        "query",
+								Format:      "<repository name>",
+								Regexp:      reference.NameRegexp,
+								Description: `Name of the source repository.`,
+							},
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob has been mounted in the repository and is available at the provided location.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+						},
+					},
 				},
 			},
 		},
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go
index 4297439..6ba39cc 100644
--- a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go
@@ -204,7 +204,9 @@
 		routeURL.Path = routeURL.Path[1:]
 	}
 
-	return cr.root.ResolveReference(routeURL), nil
+	url := cr.root.ResolveReference(routeURL)
+	url.Scheme = cr.root.Scheme
+	return url, nil
 }
 
 // appendValuesURL appends the parameters to the url.
diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go b/vendor/src/github.com/docker/distribution/registry/client/auth/session.go
index 6c92fc3..6b483c6 100644
--- a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go
+++ b/vendor/src/github.com/docker/distribution/registry/client/auth/session.go
@@ -108,6 +108,8 @@
 	tokenLock       sync.Mutex
 	tokenCache      string
 	tokenExpiration time.Time
+
+	additionalScopes map[string]struct{}
 }
 
 // tokenScope represents the scope at which a token will be requested.
@@ -145,6 +147,7 @@
 			Scope:    scope,
 			Actions:  actions,
 		},
+		additionalScopes: map[string]struct{}{},
 	}
 }
 
@@ -160,7 +163,15 @@
 }
 
 func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
-	if err := th.refreshToken(params); err != nil {
+	var additionalScopes []string
+	if fromParam := req.URL.Query().Get("from"); fromParam != "" {
+		additionalScopes = append(additionalScopes, tokenScope{
+			Resource: "repository",
+			Scope:    fromParam,
+			Actions:  []string{"pull"},
+		}.String())
+	}
+	if err := th.refreshToken(params, additionalScopes...); err != nil {
 		return err
 	}
 
@@ -169,11 +180,18 @@
 	return nil
 }
 
-func (th *tokenHandler) refreshToken(params map[string]string) error {
+func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error {
 	th.tokenLock.Lock()
 	defer th.tokenLock.Unlock()
+	var addedScopes bool
+	for _, scope := range additionalScopes {
+		if _, ok := th.additionalScopes[scope]; !ok {
+			th.additionalScopes[scope] = struct{}{}
+			addedScopes = true
+		}
+	}
 	now := th.clock.Now()
-	if now.After(th.tokenExpiration) {
+	if now.After(th.tokenExpiration) || addedScopes {
 		tr, err := th.fetchToken(params)
 		if err != nil {
 			return err
@@ -223,6 +241,10 @@
 		reqParams.Add("scope", scopeField)
 	}
 
+	for scope := range th.additionalScopes {
+		reqParams.Add("scope", scope)
+	}
+
 	if th.creds != nil {
 		username, password := th.creds.Basic(realmURL)
 		if username != "" && password != "" {
@@ -240,7 +262,8 @@
 	defer resp.Body.Close()
 
 	if !client.SuccessStatus(resp.StatusCode) {
-		return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode))
+		err := client.HandleErrorResponse(resp)
+		return nil, err
 	}
 
 	decoder := json.NewDecoder(resp.Body)
diff --git a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go
index c7eee4e..21a018d 100644
--- a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go
+++ b/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go
@@ -33,7 +33,7 @@
 	if resp.StatusCode == http.StatusNotFound {
 		return distribution.ErrBlobUploadUnknown
 	}
-	return handleErrorResponse(resp)
+	return HandleErrorResponse(resp)
 }
 
 func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
diff --git a/vendor/src/github.com/docker/distribution/registry/client/errors.go b/vendor/src/github.com/docker/distribution/registry/client/errors.go
index 7305c02..8e3cb10 100644
--- a/vendor/src/github.com/docker/distribution/registry/client/errors.go
+++ b/vendor/src/github.com/docker/distribution/registry/client/errors.go
@@ -47,7 +47,11 @@
 	return errors
 }
 
-func handleErrorResponse(resp *http.Response) error {
+// HandleErrorResponse returns error parsed from HTTP response for an
+// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
+// UnexpectedHTTPStatusError returned for response code outside of expected
+// range.
+func HandleErrorResponse(resp *http.Response) error {
 	if resp.StatusCode == 401 {
 		err := parseHTTPErrorResponse(resp.Body)
 		if uErr, ok := err.(*UnexpectedHTTPResponseError); ok {
diff --git a/vendor/src/github.com/docker/distribution/registry/client/repository.go b/vendor/src/github.com/docker/distribution/registry/client/repository.go
index 6fc2bf7..d652121 100644
--- a/vendor/src/github.com/docker/distribution/registry/client/repository.go
+++ b/vendor/src/github.com/docker/distribution/registry/client/repository.go
@@ -3,6 +3,7 @@
 import (
 	"bytes"
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -14,7 +15,6 @@
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/context"
 	"github.com/docker/distribution/digest"
-	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/distribution/registry/api/v2"
 	"github.com/docker/distribution/registry/client/transport"
@@ -91,7 +91,7 @@
 			returnErr = io.EOF
 		}
 	} else {
-		return 0, handleErrorResponse(resp)
+		return 0, HandleErrorResponse(resp)
 	}
 
 	return numFilled, returnErr
@@ -156,26 +156,139 @@
 	}, nil
 }
 
-func (r *repository) Signatures() distribution.SignatureService {
-	ms, _ := r.Manifests(r.context)
-	return &signatures{
-		manifests: ms,
+func (r *repository) Tags(ctx context.Context) distribution.TagService {
+	return &tags{
+		client:  r.client,
+		ub:      r.ub,
+		context: r.context,
+		name:    r.Name(),
 	}
 }
 
-type signatures struct {
-	manifests distribution.ManifestService
+// tags implements remote tagging operations.
+type tags struct {
+	client  *http.Client
+	ub      *v2.URLBuilder
+	context context.Context
+	name    string
 }
 
-func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) {
-	m, err := s.manifests.Get(dgst)
+// All returns all tags
+func (t *tags) All(ctx context.Context) ([]string, error) {
+	var tags []string
+
+	u, err := t.ub.BuildTagsURL(t.name)
 	if err != nil {
-		return nil, err
+		return tags, err
 	}
-	return m.Signatures()
+
+	resp, err := t.client.Get(u)
+	if err != nil {
+		return tags, err
+	}
+	defer resp.Body.Close()
+
+	if SuccessStatus(resp.StatusCode) {
+		b, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return tags, err
+		}
+
+		tagsResponse := struct {
+			Tags []string `json:"tags"`
+		}{}
+		if err := json.Unmarshal(b, &tagsResponse); err != nil {
+			return tags, err
+		}
+		tags = tagsResponse.Tags
+		return tags, nil
+	}
+	return tags, HandleErrorResponse(resp)
 }
 
-func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error {
+func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) {
+	desc := distribution.Descriptor{}
+	headers := response.Header
+
+	ctHeader := headers.Get("Content-Type")
+	if ctHeader == "" {
+		return distribution.Descriptor{}, errors.New("missing or empty Content-Type header")
+	}
+	desc.MediaType = ctHeader
+
+	digestHeader := headers.Get("Docker-Content-Digest")
+	if digestHeader == "" {
+		bytes, err := ioutil.ReadAll(response.Body)
+		if err != nil {
+			return distribution.Descriptor{}, err
+		}
+		_, desc, err := distribution.UnmarshalManifest(ctHeader, bytes)
+		if err != nil {
+			return distribution.Descriptor{}, err
+		}
+		return desc, nil
+	}
+
+	dgst, err := digest.ParseDigest(digestHeader)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	desc.Digest = dgst
+
+	lengthHeader := headers.Get("Content-Length")
+	if lengthHeader == "" {
+		return distribution.Descriptor{}, errors.New("missing or empty Content-Length header")
+	}
+	length, err := strconv.ParseInt(lengthHeader, 10, 64)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	desc.Size = length
+
+	return desc, nil
+
+}
+
+// Get issues a HEAD request for a Manifest against its named endpoint in order
+// to construct a descriptor for the tag.  If the registry doesn't support HEADing
+// a manifest, fallback to GET.
+func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
+	u, err := t.ub.BuildManifestURL(t.name, tag)
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+	var attempts int
+	resp, err := t.client.Head(u)
+
+check:
+	if err != nil {
+		return distribution.Descriptor{}, err
+	}
+
+	switch {
+	case resp.StatusCode >= 200 && resp.StatusCode < 400:
+		return descriptorFromResponse(resp)
+	case resp.StatusCode == http.StatusMethodNotAllowed:
+		resp, err = t.client.Get(u)
+		attempts++
+		if attempts > 1 {
+			return distribution.Descriptor{}, err
+		}
+		goto check
+	default:
+		return distribution.Descriptor{}, HandleErrorResponse(resp)
+	}
+}
+
+func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) {
+	panic("not implemented")
+}
+
+func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
+	panic("not implemented")
+}
+
+func (t *tags) Untag(ctx context.Context, tag string) error {
 	panic("not implemented")
 }
 
@@ -186,44 +299,8 @@
 	etags  map[string]string
 }
 
-func (ms *manifests) Tags() ([]string, error) {
-	u, err := ms.ub.BuildTagsURL(ms.name)
-	if err != nil {
-		return nil, err
-	}
-
-	resp, err := ms.client.Get(u)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	if SuccessStatus(resp.StatusCode) {
-		b, err := ioutil.ReadAll(resp.Body)
-		if err != nil {
-			return nil, err
-		}
-
-		tagsResponse := struct {
-			Tags []string `json:"tags"`
-		}{}
-		if err := json.Unmarshal(b, &tagsResponse); err != nil {
-			return nil, err
-		}
-
-		return tagsResponse.Tags, nil
-	}
-	return nil, handleErrorResponse(resp)
-}
-
-func (ms *manifests) Exists(dgst digest.Digest) (bool, error) {
-	// Call by Tag endpoint since the API uses the same
-	// URL endpoint for tags and digests.
-	return ms.ExistsByTag(dgst.String())
-}
-
-func (ms *manifests) ExistsByTag(tag string) (bool, error) {
-	u, err := ms.ub.BuildManifestURL(ms.name, tag)
+func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
+	u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
 	if err != nil {
 		return false, err
 	}
@@ -238,49 +315,66 @@
 	} else if resp.StatusCode == http.StatusNotFound {
 		return false, nil
 	}
-	return false, handleErrorResponse(resp)
+	return false, HandleErrorResponse(resp)
 }
 
-func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) {
-	// Call by Tag endpoint since the API uses the same
-	// URL endpoint for tags and digests.
-	return ms.GetByTag(dgst.String())
-}
-
-// AddEtagToTag allows a client to supply an eTag to GetByTag which will be
+// AddEtagToTag allows a client to supply an eTag to Get which will be
 // used for a conditional HTTP request.  If the eTag matches, a nil manifest
-// and nil error will be returned. etag is automatically quoted when added to
-// this map.
+// and ErrManifestNotModified error will be returned. etag is automatically
+// quoted when added to this map.
 func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
-	return func(ms distribution.ManifestService) error {
-		if ms, ok := ms.(*manifests); ok {
-			ms.etags[tag] = fmt.Sprintf(`"%s"`, etag)
-			return nil
-		}
-		return fmt.Errorf("etag options is a client-only option")
-	}
+	return etagOption{tag, etag}
 }
 
-func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
+type etagOption struct{ tag, etag string }
+
+func (o etagOption) Apply(ms distribution.ManifestService) error {
+	if ms, ok := ms.(*manifests); ok {
+		ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
+		return nil
+	}
+	return fmt.Errorf("etag options is a client-only option")
+}
+
+func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
+
+	var tag string
 	for _, option := range options {
-		err := option(ms)
-		if err != nil {
-			return nil, err
+		if opt, ok := option.(withTagOption); ok {
+			tag = opt.tag
+		} else {
+			err := option.Apply(ms)
+			if err != nil {
+				return nil, err
+			}
 		}
 	}
 
-	u, err := ms.ub.BuildManifestURL(ms.name, tag)
+	var ref string
+	if tag != "" {
+		ref = tag
+	} else {
+		ref = dgst.String()
+	}
+
+	u, err := ms.ub.BuildManifestURL(ms.name, ref)
 	if err != nil {
 		return nil, err
 	}
+
 	req, err := http.NewRequest("GET", u, nil)
 	if err != nil {
 		return nil, err
 	}
 
-	if _, ok := ms.etags[tag]; ok {
-		req.Header.Set("If-None-Match", ms.etags[tag])
+	for _, t := range distribution.ManifestMediaTypes() {
+		req.Header.Add("Accept", t)
 	}
+
+	if _, ok := ms.etags[ref]; ok {
+		req.Header.Set("If-None-Match", ms.etags[ref])
+	}
+
 	resp, err := ms.client.Do(req)
 	if err != nil {
 		return nil, err
@@ -289,44 +383,89 @@
 	if resp.StatusCode == http.StatusNotModified {
 		return nil, distribution.ErrManifestNotModified
 	} else if SuccessStatus(resp.StatusCode) {
-		var sm schema1.SignedManifest
-		decoder := json.NewDecoder(resp.Body)
+		mt := resp.Header.Get("Content-Type")
+		body, err := ioutil.ReadAll(resp.Body)
 
-		if err := decoder.Decode(&sm); err != nil {
+		if err != nil {
 			return nil, err
 		}
-		return &sm, nil
+		m, _, err := distribution.UnmarshalManifest(mt, body)
+		if err != nil {
+			return nil, err
+		}
+		return m, nil
 	}
-	return nil, handleErrorResponse(resp)
+	return nil, HandleErrorResponse(resp)
 }
 
-func (ms *manifests) Put(m *schema1.SignedManifest) error {
-	manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag)
-	if err != nil {
-		return err
+// WithTag allows a tag to be passed into Put which enables the client
+// to build a correct URL.
+func WithTag(tag string) distribution.ManifestServiceOption {
+	return withTagOption{tag}
+}
+
+type withTagOption struct{ tag string }
+
+func (o withTagOption) Apply(m distribution.ManifestService) error {
+	if _, ok := m.(*manifests); ok {
+		return nil
+	}
+	return fmt.Errorf("withTagOption is a client-only option")
+}
+
+// Put puts a manifest.  A tag can be specified using an options parameter which uses some shared state to hold the
+// tag name in order to build the correct upload URL.  This state is written and read under a lock.
+func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
+	var tag string
+
+	for _, option := range options {
+		if opt, ok := option.(withTagOption); ok {
+			tag = opt.tag
+		} else {
+			err := option.Apply(ms)
+			if err != nil {
+				return "", err
+			}
+		}
 	}
 
-	// todo(richardscothern): do something with options here when they become applicable
-
-	putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw))
+	manifestURL, err := ms.ub.BuildManifestURL(ms.name, tag)
 	if err != nil {
-		return err
+		return "", err
 	}
 
+	mediaType, p, err := m.Payload()
+	if err != nil {
+		return "", err
+	}
+
+	putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p))
+	if err != nil {
+		return "", err
+	}
+
+	putRequest.Header.Set("Content-Type", mediaType)
+
 	resp, err := ms.client.Do(putRequest)
 	if err != nil {
-		return err
+		return "", err
 	}
 	defer resp.Body.Close()
 
 	if SuccessStatus(resp.StatusCode) {
-		// TODO(dmcgowan): make use of digest header
-		return nil
+		dgstHeader := resp.Header.Get("Docker-Content-Digest")
+		dgst, err := digest.ParseDigest(dgstHeader)
+		if err != nil {
+			return "", err
+		}
+
+		return dgst, nil
 	}
-	return handleErrorResponse(resp)
+
+	return "", HandleErrorResponse(resp)
 }
 
-func (ms *manifests) Delete(dgst digest.Digest) error {
+func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
 	u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
 	if err != nil {
 		return err
@@ -345,9 +484,14 @@
 	if SuccessStatus(resp.StatusCode) {
 		return nil
 	}
-	return handleErrorResponse(resp)
+	return HandleErrorResponse(resp)
 }
 
+// todo(richardscothern): Restore interface and implementation with merge of #1050
+/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
+	panic("not supported")
+}*/
+
 type blobs struct {
 	name   string
 	ub     *v2.URLBuilder
@@ -377,11 +521,7 @@
 }
 
 func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
-	desc, err := bs.Stat(ctx, dgst)
-	if err != nil {
-		return nil, err
-	}
-	reader, err := bs.Open(ctx, desc.Digest)
+	reader, err := bs.Open(ctx, dgst)
 	if err != nil {
 		return nil, err
 	}
@@ -401,7 +541,7 @@
 			if resp.StatusCode == http.StatusNotFound {
 				return distribution.ErrBlobUnknown
 			}
-			return handleErrorResponse(resp)
+			return HandleErrorResponse(resp)
 		}), nil
 }
 
@@ -432,8 +572,57 @@
 	return writer.Commit(ctx, desc)
 }
 
-func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
-	u, err := bs.ub.BuildBlobUploadURL(bs.name)
+// createOptions is a collection of blob creation modifiers relevant to general
+// blob storage intended to be configured by the BlobCreateOption.Apply method.
+type createOptions struct {
+	Mount struct {
+		ShouldMount bool
+		From        reference.Canonical
+	}
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+	return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+	return optionFunc(func(v interface{}) error {
+		opts, ok := v.(*createOptions)
+		if !ok {
+			return fmt.Errorf("unexpected options type: %T", v)
+		}
+
+		opts.Mount.ShouldMount = true
+		opts.Mount.From = ref
+
+		return nil
+	})
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+	var opts createOptions
+
+	for _, option := range options {
+		err := option.Apply(&opts)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var values []url.Values
+
+	if opts.Mount.ShouldMount {
+		values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+	}
+
+	u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+	if err != nil {
+		return nil, err
+	}
 
 	resp, err := bs.client.Post(u, "", nil)
 	if err != nil {
@@ -441,7 +630,14 @@
 	}
 	defer resp.Body.Close()
 
-	if SuccessStatus(resp.StatusCode) {
+	switch resp.StatusCode {
+	case http.StatusCreated:
+		desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+		if err != nil {
+			return nil, err
+		}
+		return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+	case http.StatusAccepted:
 		// TODO(dmcgowan): Check for invalid UUID
 		uuid := resp.Header.Get("Docker-Upload-UUID")
 		location, err := sanitizeLocation(resp.Header.Get("Location"), u)
@@ -456,8 +652,9 @@
 			startedAt: time.Now(),
 			location:  location,
 		}, nil
+	default:
+		return nil, HandleErrorResponse(resp)
 	}
-	return nil, handleErrorResponse(resp)
 }
 
 func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
@@ -488,6 +685,10 @@
 
 	if SuccessStatus(resp.StatusCode) {
 		lengthHeader := resp.Header.Get("Content-Length")
+		if lengthHeader == "" {
+			return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
+		}
+
 		length, err := strconv.ParseInt(lengthHeader, 10, 64)
 		if err != nil {
 			return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
@@ -501,7 +702,7 @@
 	} else if resp.StatusCode == http.StatusNotFound {
 		return distribution.Descriptor{}, distribution.ErrBlobUnknown
 	}
-	return distribution.Descriptor{}, handleErrorResponse(resp)
+	return distribution.Descriptor{}, HandleErrorResponse(resp)
 }
 
 func buildCatalogValues(maxEntries int, last string) url.Values {
@@ -538,7 +739,7 @@
 	if SuccessStatus(resp.StatusCode) {
 		return nil
 	}
-	return handleErrorResponse(resp)
+	return HandleErrorResponse(resp)
 }
 
 func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
diff --git a/vendor/src/github.com/docker/distribution/tags.go b/vendor/src/github.com/docker/distribution/tags.go
new file mode 100644
index 0000000..5030565
--- /dev/null
+++ b/vendor/src/github.com/docker/distribution/tags.go
@@ -0,0 +1,27 @@
+package distribution
+
+import (
+	"github.com/docker/distribution/context"
+)
+
+// TagService provides access to information about tagged objects.
+type TagService interface {
+	// Get retrieves the descriptor identified by the tag. Some
+	// implementations may differentiate between "trusted" tags and
+	// "untrusted" tags. If a tag is "untrusted", the mapping will be returned
+	// as an ErrTagUntrusted error, with the target descriptor.
+	Get(ctx context.Context, tag string) (Descriptor, error)
+
+	// Tag associates the tag with the provided descriptor, updating the
+	// current association, if needed.
+	Tag(ctx context.Context, tag string, desc Descriptor) error
+
+	// Untag removes the given tag association
+	Untag(ctx context.Context, tag string) error
+
+	// All returns the set of tags managed by this tag service
+	All(ctx context.Context) ([]string, error)
+
+	// Lookup returns the set of tags referencing the given digest.
+	Lookup(ctx context.Context, digest Descriptor) ([]string, error)
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/client.go b/vendor/src/github.com/docker/engine-api/client/client.go
index 061fd55..28f00e5 100644
--- a/vendor/src/github.com/docker/engine-api/client/client.go
+++ b/vendor/src/github.com/docker/engine-api/client/client.go
@@ -65,7 +65,6 @@
 func NewClient(host string, version string, transport *http.Transport, httpHeaders map[string]string) (*Client, error) {
 	var (
 		basePath       string
-		tlsConfig      *tls.Config
 		scheme         = "http"
 		protoAddrParts = strings.SplitN(host, "://", 2)
 		proto, addr    = protoAddrParts[0], protoAddrParts[1]
@@ -90,7 +89,7 @@
 		addr:              addr,
 		basePath:          basePath,
 		scheme:            scheme,
-		tlsConfig:         tlsConfig,
+		tlsConfig:         transport.TLSClientConfig,
 		httpClient:        &http.Client{Transport: transport},
 		version:           version,
 		customHTTPHeaders: httpHeaders,
diff --git a/vendor/src/github.com/docker/engine-api/client/container_create.go b/vendor/src/github.com/docker/engine-api/client/container_create.go
index 1c35aaf..0f85e7b 100644
--- a/vendor/src/github.com/docker/engine-api/client/container_create.go
+++ b/vendor/src/github.com/docker/engine-api/client/container_create.go
@@ -33,13 +33,13 @@
 
 	serverResp, err := cli.post("/containers/create", query, body, nil)
 	if err != nil {
-		if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), config.Image) {
+		if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
 			return response, imageNotFoundError{config.Image}
 		}
 		return response, err
 	}
 
-	if serverResp.statusCode == 404 && strings.Contains(err.Error(), config.Image) {
+	if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
 		return response, imageNotFoundError{config.Image}
 	}
 
diff --git a/vendor/src/github.com/docker/engine-api/client/interface.go b/vendor/src/github.com/docker/engine-api/client/interface.go
index 3aecdfc..155a2bc 100644
--- a/vendor/src/github.com/docker/engine-api/client/interface.go
+++ b/vendor/src/github.com/docker/engine-api/client/interface.go
@@ -60,7 +60,7 @@
 	Info() (types.Info, error)
 	NetworkConnect(networkID, containerID string, config *network.EndpointSettings) error
 	NetworkCreate(options types.NetworkCreate) (types.NetworkCreateResponse, error)
-	NetworkDisconnect(networkID, containerID string) error
+	NetworkDisconnect(networkID, containerID string, force bool) error
 	NetworkInspect(networkID string) (types.NetworkResource, error)
 	NetworkList(options types.NetworkListOptions) ([]types.NetworkResource, error)
 	NetworkRemove(networkID string) error
diff --git a/vendor/src/github.com/docker/engine-api/client/network.go b/vendor/src/github.com/docker/engine-api/client/network.go
index ccd6083..de7f184 100644
--- a/vendor/src/github.com/docker/engine-api/client/network.go
+++ b/vendor/src/github.com/docker/engine-api/client/network.go
@@ -42,9 +42,9 @@
 }
 
 // NetworkDisconnect disconnects a container from an existent network in the docker host.
-func (cli *Client) NetworkDisconnect(networkID, containerID string) error {
-	nc := types.NetworkConnect{Container: containerID}
-	resp, err := cli.post("/networks/"+networkID+"/disconnect", nil, nc, nil)
+func (cli *Client) NetworkDisconnect(networkID, containerID string, force bool) error {
+	nd := types.NetworkDisconnect{Container: containerID, Force: force}
+	resp, err := cli.post("/networks/"+networkID+"/disconnect", nil, nd, nil)
 	ensureReaderClosed(resp)
 	return err
 }
diff --git a/vendor/src/github.com/docker/engine-api/types/client.go b/vendor/src/github.com/docker/engine-api/types/client.go
index 77d94f3..16c1cb1 100644
--- a/vendor/src/github.com/docker/engine-api/types/client.go
+++ b/vendor/src/github.com/docker/engine-api/types/client.go
@@ -154,28 +154,19 @@
 
 // ImageCreateOptions holds information to create images.
 type ImageCreateOptions struct {
-	// Parent is the image to create this image from
-	Parent string
-	// Tag is the name to tag this image
-	Tag string
-	// RegistryAuth is the base64 encoded credentials for this server
-	RegistryAuth string
+	Parent       string // Parent is the name of the image to pull
+	Tag          string // Tag is the name to tag this image with
+	RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
 }
 
 // ImageImportOptions holds information to import images from the client host.
 type ImageImportOptions struct {
-	// Source is the data to send to the server to create this image from
-	Source io.Reader
-	// Source is the name of the source to import this image from
-	SourceName string
-	// RepositoryName is the name of the repository to import this image
-	RepositoryName string
-	// Message is the message to tag the image with
-	Message string
-	// Tag is the name to tag this image
-	Tag string
-	// Changes are the raw changes to apply to the image
-	Changes []string
+	Source         io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
+	SourceName     string    // SourceName is the name of the image to pull (mutually exclusive with Source)
+	RepositoryName string    // RepositoryName is the name of the repository to import this image into
+	Message        string    // Message is the message to tag the image with
+	Tag            string    // Tag is the name to tag this image with
+	Changes        []string  // Changes are the raw changes to apply to this image
 }
 
 // ImageListOptions holds parameters to filter the list of images with.
@@ -193,10 +184,9 @@
 
 // ImagePullOptions holds information to pull images.
 type ImagePullOptions struct {
-	ImageID string
-	Tag     string
-	// RegistryAuth is the base64 encoded credentials for this server
-	RegistryAuth string
+	ImageID      string // ImageID is the name of the image to pull
+	Tag          string // Tag is the name of the tag to be pulled
+	RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
 }
 
 //ImagePushOptions holds information to push images.
diff --git a/vendor/src/github.com/docker/engine-api/types/container/host_config.go b/vendor/src/github.com/docker/engine-api/types/container/host_config.go
index b7af3f0..f43263d 100644
--- a/vendor/src/github.com/docker/engine-api/types/container/host_config.go
+++ b/vendor/src/github.com/docker/engine-api/types/container/host_config.go
@@ -180,7 +180,7 @@
 	MemoryReservation    int64           // Memory soft limit (in bytes)
 	MemorySwap           int64           // Total memory usage (memory + swap); set `-1` to disable swap
 	MemorySwappiness     *int64          // Tuning container memory swappiness behaviour
-	OomKillDisable       bool            // Whether to disable OOM Killer or not
+	OomKillDisable       *bool           // Whether to disable OOM Killer or not
 	PidsLimit            int64           // Setting pids limit for a container
 	Ulimits              []*units.Ulimit // List of ulimits to be set in the container
 }
@@ -222,7 +222,6 @@
 	PublishAllPorts bool               // Should docker publish all exposed port for the container
 	ReadonlyRootfs  bool               // Is the container root filesystem in read-only
 	SecurityOpt     []string           // List of string values to customize labels for MLS systems, such as SELinux.
-	StorageOpt      []string           // Graph storage options per container
 	Tmpfs           map[string]string  `json:",omitempty"` // List of tmpfs (mounts) used for the container
 	UTSMode         UTSMode            // UTS namespace to use for the container
 	ShmSize         int64              // Total shm memory usage
diff --git a/vendor/src/github.com/docker/engine-api/types/filters/parse.go b/vendor/src/github.com/docker/engine-api/types/filters/parse.go
index e99462c..9c80b1e 100644
--- a/vendor/src/github.com/docker/engine-api/types/filters/parse.go
+++ b/vendor/src/github.com/docker/engine-api/types/filters/parse.go
@@ -10,12 +10,12 @@
 	"strings"
 )
 
-// Args stores filter arguments as map key:{array of values}.
-// It contains a aggregation of the list of arguments (which are in the form
+// Args stores filter arguments as map key:{map key: bool}.
+// It contains a aggregation of the map of arguments (which are in the form
 // of -f 'key=value') based on the key, and store values for the same key
-// in an slice.
+// in an map with string keys and boolean values.
 // e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
-// the args will be {'label': {'label1=1','label2=2'}, 'image.name', {'ubuntu'}}
+// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
 type Args struct {
 	fields map[string]map[string]bool
 }
diff --git a/vendor/src/github.com/docker/engine-api/types/network/network.go b/vendor/src/github.com/docker/engine-api/types/network/network.go
index 9b09f5e..48b2199 100644
--- a/vendor/src/github.com/docker/engine-api/types/network/network.go
+++ b/vendor/src/github.com/docker/engine-api/types/network/network.go
@@ -8,8 +8,9 @@
 
 // IPAM represents IP Address Management
 type IPAM struct {
-	Driver string
-	Config []IPAMConfig
+	Driver  string
+	Options map[string]string //Per network IPAM driver options
+	Config  []IPAMConfig
 }
 
 // IPAMConfig represents IPAM configurations
@@ -30,7 +31,10 @@
 type EndpointSettings struct {
 	// Configurations
 	IPAMConfig *EndpointIPAMConfig
+	Links      []string
+	Aliases    []string
 	// Operational data
+	NetworkID           string
 	EndpointID          string
 	Gateway             string
 	IPAddress           string
diff --git a/vendor/src/github.com/docker/engine-api/types/seccomp.go b/vendor/src/github.com/docker/engine-api/types/seccomp.go
new file mode 100644
index 0000000..e0305a9
--- /dev/null
+++ b/vendor/src/github.com/docker/engine-api/types/seccomp.go
@@ -0,0 +1,68 @@
+package types
+
+// Seccomp represents the config for a seccomp profile for syscall restriction.
+type Seccomp struct {
+	DefaultAction Action     `json:"defaultAction"`
+	Architectures []Arch     `json:"architectures"`
+	Syscalls      []*Syscall `json:"syscalls"`
+}
+
+// Arch used for additional architectures
+type Arch string
+
+// Additional architectures permitted to be used for system calls
+// By default only the native architecture of the kernel is permitted
+const (
+	ArchX86         Arch = "SCMP_ARCH_X86"
+	ArchX86_64      Arch = "SCMP_ARCH_X86_64"
+	ArchX32         Arch = "SCMP_ARCH_X32"
+	ArchARM         Arch = "SCMP_ARCH_ARM"
+	ArchAARCH64     Arch = "SCMP_ARCH_AARCH64"
+	ArchMIPS        Arch = "SCMP_ARCH_MIPS"
+	ArchMIPS64      Arch = "SCMP_ARCH_MIPS64"
+	ArchMIPS64N32   Arch = "SCMP_ARCH_MIPS64N32"
+	ArchMIPSEL      Arch = "SCMP_ARCH_MIPSEL"
+	ArchMIPSEL64    Arch = "SCMP_ARCH_MIPSEL64"
+	ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
+)
+
+// Action taken upon Seccomp rule match
+type Action string
+
+// Define actions for Seccomp rules
+const (
+	ActKill  Action = "SCMP_ACT_KILL"
+	ActTrap  Action = "SCMP_ACT_TRAP"
+	ActErrno Action = "SCMP_ACT_ERRNO"
+	ActTrace Action = "SCMP_ACT_TRACE"
+	ActAllow Action = "SCMP_ACT_ALLOW"
+)
+
+// Operator used to match syscall arguments in Seccomp
+type Operator string
+
+// Define operators for syscall arguments in Seccomp
+const (
+	OpNotEqual     Operator = "SCMP_CMP_NE"
+	OpLessThan     Operator = "SCMP_CMP_LT"
+	OpLessEqual    Operator = "SCMP_CMP_LE"
+	OpEqualTo      Operator = "SCMP_CMP_EQ"
+	OpGreaterEqual Operator = "SCMP_CMP_GE"
+	OpGreaterThan  Operator = "SCMP_CMP_GT"
+	OpMaskedEqual  Operator = "SCMP_CMP_MASKED_EQ"
+)
+
+// Arg used for matching specific syscall arguments in Seccomp
+type Arg struct {
+	Index    uint     `json:"index"`
+	Value    uint64   `json:"value"`
+	ValueTwo uint64   `json:"valueTwo"`
+	Op       Operator `json:"op"`
+}
+
+// Syscall is used to match a syscall in Seccomp
+type Syscall struct {
+	Name   string `json:"name"`
+	Action Action `json:"action"`
+	Args   []*Arg `json:"args"`
+}
diff --git a/vendor/src/github.com/docker/engine-api/types/types.go b/vendor/src/github.com/docker/engine-api/types/types.go
index 946ab03..9666ea4 100644
--- a/vendor/src/github.com/docker/engine-api/types/types.go
+++ b/vendor/src/github.com/docker/engine-api/types/types.go
@@ -192,6 +192,9 @@
 type Info struct {
 	ID                 string
 	Containers         int
+	ContainersRunning  int
+	ContainersPaused   int
+	ContainersStopped  int
 	Images             int
 	Driver             string
 	DriverStatus       [][2]string
@@ -404,6 +407,7 @@
 	CheckDuplicate bool
 	Driver         string
 	IPAM           network.IPAM
+	Internal       bool
 	Options        map[string]string
 }
 
@@ -416,10 +420,11 @@
 // NetworkConnect represents the data to be used to connect a container to the network
 type NetworkConnect struct {
 	Container      string
-	EndpointConfig *network.EndpointSettings `json:"endpoint_config"`
+	EndpointConfig *network.EndpointSettings `json:",omitempty"`
 }
 
 // NetworkDisconnect represents the data to be used to disconnect a container from the network
 type NetworkDisconnect struct {
 	Container string
+	Force     bool
 }
diff --git a/vendor/src/github.com/docker/libnetwork/.gitignore b/vendor/src/github.com/docker/libnetwork/.gitignore
index 08e11d7..f9cd104 100644
--- a/vendor/src/github.com/docker/libnetwork/.gitignore
+++ b/vendor/src/github.com/docker/libnetwork/.gitignore
@@ -8,6 +8,8 @@
 integration-tmp/
 _obj
 _test
+.vagrant
+
 
 # Architecture specific extensions/prefixes
 *.[568vq]
diff --git a/vendor/src/github.com/docker/libnetwork/CHANGELOG.md b/vendor/src/github.com/docker/libnetwork/CHANGELOG.md
new file mode 100644
index 0000000..ea136da
--- /dev/null
+++ b/vendor/src/github.com/docker/libnetwork/CHANGELOG.md
@@ -0,0 +1,55 @@
+# Changelog
+
+## 0.5.6 (2016-01-14)
+- Setup embedded DNS server correctly on container restart. Fixes docker/docker#19354
+
+## 0.5.5 (2016-01-14)
+- Allow network-scoped alias to be resolved for anonymous endpoint
+- Self repair corrupted IP database that could happen in 1.9.0 & 1.9.1
+- Skip IPTables cleanup if --iptables=false is set. Fixes docker/docker#19063
+
+## 0.5.4 (2016-01-12)
+- Removed the isNodeAlive protection when user forces an endpoint delete
+
+## 0.5.3 (2016-01-12)
+- Bridge driver supporting internal network option
+- Backend implementation to support "force" option to network disconnect
+- Fixing a regex in etchosts package to fix docker/docker#19080
+
+## 0.5.2 (2016-01-08)
+- Embedded DNS replacing /etc/hosts based Service Discovery
+- Container local alias and Network-scoped alias support
+- Backend support for internal network mode
+- Support for IPAM driver options
+- Fixes overlay veth cleanup issue : docker/docker#18814
+- fixes docker/docker#19139
+- disable IPv6 Duplicate Address Detection
+
+## 0.5.1 (2015-12-07)
+- Allowing user to assign IP Address for containers
+- Fixes docker/docker#18214
+- Fixes docker/docker#18380
+
+## 0.5.0 (2015-10-30)
+
+- Docker multi-host networking exiting experimental channel
+- Introduced IP Address Management and IPAM drivers
+- DEPRECATE service discovery from default bridge network
+- Introduced new network UX
+- Support for multiple networks in bridge driver
+- Local persistance with boltdb
+
+## 0.4.0 (2015-07-24)
+
+- Introduce experimental version of Overlay driver
+- Introduce experimental version of network plugins
+- Introduce experimental version of network & service UX
+- Introduced experimental /etc/hosts based service discovery
+- Integrated with libkv
+- Improving test coverage
+- Fixed a bunch of issues with osl namespace mgmt
+
+## 0.3.0 (2015-05-27)
+ 
+- Introduce CNM (Container Networking Model)
+- Replace docker networking with CNM & Bridge driver
diff --git a/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go b/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go
index a537ed0..270a36a 100644
--- a/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go
+++ b/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go
@@ -9,6 +9,7 @@
 	"fmt"
 	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/libnetwork/datastore"
 	"github.com/docker/libnetwork/types"
 )
@@ -243,6 +244,58 @@
 	return err != nil
 }
 
+func (h *Handle) runConsistencyCheck() bool {
+	corrupted := false
+	for p, c := h.head, h.head.next; c != nil; c = c.next {
+		if c.count == 0 {
+			corrupted = true
+			p.next = c.next
+			continue // keep same p
+		}
+		p = c
+	}
+	return corrupted
+}
+
+// CheckConsistency checks if the bit sequence is in an inconsistent state and attempts to fix it.
+// It looks for a corruption signature that may happen in docker 1.9.0 and 1.9.1.
+func (h *Handle) CheckConsistency() error {
+	for {
+		h.Lock()
+		store := h.store
+		h.Unlock()
+
+		if store != nil {
+			if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
+				return err
+			}
+		}
+
+		h.Lock()
+		nh := h.getCopy()
+		h.Unlock()
+
+		if !nh.runConsistencyCheck() {
+			return nil
+		}
+
+		if err := nh.writeToStore(); err != nil {
+			if _, ok := err.(types.RetryError); !ok {
+				return fmt.Errorf("internal failure while fixing inconsistent bitsequence: %v", err)
+			}
+			continue
+		}
+
+		log.Infof("Fixed inconsistent bit sequence in datastore:\n%s\n%s", h, nh)
+
+		h.Lock()
+		h.head = nh.head
+		h.Unlock()
+
+		return nil
+	}
+}
+
 // set/reset the bit
 func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64, error) {
 	var (
diff --git a/vendor/src/github.com/docker/libnetwork/controller.go b/vendor/src/github.com/docker/libnetwork/controller.go
index 1cd200f..7efc409 100644
--- a/vendor/src/github.com/docker/libnetwork/controller.go
+++ b/vendor/src/github.com/docker/libnetwork/controller.go
@@ -216,6 +216,31 @@
 	return true
 }
 
+func (c *controller) clusterHostID() string {
+	c.Lock()
+	defer c.Unlock()
+	if c.cfg == nil || c.cfg.Cluster.Address == "" {
+		return ""
+	}
+	addr := strings.Split(c.cfg.Cluster.Address, ":")
+	return addr[0]
+}
+
+func (c *controller) isNodeAlive(node string) bool {
+	if c.discovery == nil {
+		return false
+	}
+
+	nodes := c.discovery.Fetch()
+	for _, n := range nodes {
+		if n.String() == node {
+			return true
+		}
+	}
+
+	return false
+}
+
 func (c *controller) initDiscovery(watcher discovery.Watcher) error {
 	if c.cfg == nil {
 		return fmt.Errorf("discovery initialization requires a valid configuration")
diff --git a/vendor/src/github.com/docker/libnetwork/default_gateway.go b/vendor/src/github.com/docker/libnetwork/default_gateway.go
index d9277ba..bfd7b72 100644
--- a/vendor/src/github.com/docker/libnetwork/default_gateway.go
+++ b/vendor/src/github.com/docker/libnetwork/default_gateway.go
@@ -87,7 +87,7 @@
 	if err := ep.sbLeave(sb); err != nil {
 		return fmt.Errorf("container %s: endpoint leaving GW Network failed: %v", sb.containerID, err)
 	}
-	if err := ep.Delete(); err != nil {
+	if err := ep.Delete(false); err != nil {
 		return fmt.Errorf("container %s: deleting endpoint on GW Network failed: %v", sb.containerID, err)
 	}
 	return nil
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
index dbe3a3e..2bb4350 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -68,6 +68,7 @@
 	DefaultGatewayIPv6 net.IP
 	dbIndex            uint64
 	dbExists           bool
+	Internal           bool
 }
 
 // endpointConfiguration represents the user specified configuration for the sandbox endpoint
@@ -134,7 +135,7 @@
 	if err := iptables.FirewalldInit(); err != nil {
 		logrus.Debugf("Fail to initialize firewalld: %v, using raw iptables instead", err)
 	}
-	removeIPChains()
+
 	d := newDriver()
 	if err := d.configure(config); err != nil {
 		return err
@@ -280,16 +281,25 @@
 // from each of the other networks
 func (n *bridgeNetwork) isolateNetwork(others []*bridgeNetwork, enable bool) error {
 	n.Lock()
-	thisIface := n.config.BridgeName
+	thisConfig := n.config
 	n.Unlock()
 
+	if thisConfig.Internal {
+		return nil
+	}
+
 	// Install the rules to isolate this networks against each of the other networks
 	for _, o := range others {
 		o.Lock()
-		otherIface := o.config.BridgeName
+		otherConfig := o.config
 		o.Unlock()
-		if thisIface != otherIface {
-			if err := setINC(thisIface, otherIface, enable); err != nil {
+
+		if otherConfig.Internal {
+			continue
+		}
+
+		if thisConfig.BridgeName != otherConfig.BridgeName {
+			if err := setINC(thisConfig.BridgeName, otherConfig.BridgeName, enable); err != nil {
 				return err
 			}
 		}
@@ -368,6 +378,7 @@
 	}
 
 	if config.EnableIPTables {
+		removeIPChains()
 		natChain, filterChain, isolationChain, err = setupIPChains(config)
 		if err != nil {
 			return err
@@ -483,7 +494,7 @@
 
 	if val, ok := option[netlabel.Internal]; ok {
 		if internal, ok := val.(bool); ok && internal {
-			return nil, &driverapi.ErrNotImplemented{}
+			config.Internal = true
 		}
 	}
 
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
index 60329c8..1d523d6 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
@@ -82,38 +82,46 @@
 		IP:   ipnet.IP.Mask(ipnet.Mask),
 		Mask: ipnet.Mask,
 	}
-	if err = setupIPTablesInternal(config.BridgeName, maskedAddrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, true); err != nil {
-		return fmt.Errorf("Failed to Setup IP tables: %s", err.Error())
-	}
-	n.registerIptCleanFunc(func() error {
-		return setupIPTablesInternal(config.BridgeName, maskedAddrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, false)
-	})
+	if config.Internal {
+		if err = setupInternalNetworkRules(config.BridgeName, maskedAddrv4, true); err != nil {
+			return fmt.Errorf("Failed to Setup IP tables: %s", err.Error())
+		}
+		n.registerIptCleanFunc(func() error {
+			return setupInternalNetworkRules(config.BridgeName, maskedAddrv4, false)
+		})
+	} else {
+		if err = setupIPTablesInternal(config.BridgeName, maskedAddrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, true); err != nil {
+			return fmt.Errorf("Failed to Setup IP tables: %s", err.Error())
+		}
+		n.registerIptCleanFunc(func() error {
+			return setupIPTablesInternal(config.BridgeName, maskedAddrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, false)
+		})
+		natChain, filterChain, _, err := n.getDriverChains()
+		if err != nil {
+			return fmt.Errorf("Failed to setup IP tables, cannot acquire chain info %s", err.Error())
+		}
 
-	natChain, filterChain, _, err := n.getDriverChains()
-	if err != nil {
-		return fmt.Errorf("Failed to setup IP tables, cannot acquire chain info %s", err.Error())
-	}
+		err = iptables.ProgramChain(natChain, config.BridgeName, hairpinMode, true)
+		if err != nil {
+			return fmt.Errorf("Failed to program NAT chain: %s", err.Error())
+		}
 
-	err = iptables.ProgramChain(natChain, config.BridgeName, hairpinMode, true)
-	if err != nil {
-		return fmt.Errorf("Failed to program NAT chain: %s", err.Error())
-	}
+		err = iptables.ProgramChain(filterChain, config.BridgeName, hairpinMode, true)
+		if err != nil {
+			return fmt.Errorf("Failed to program FILTER chain: %s", err.Error())
+		}
 
-	err = iptables.ProgramChain(filterChain, config.BridgeName, hairpinMode, true)
-	if err != nil {
-		return fmt.Errorf("Failed to program FILTER chain: %s", err.Error())
+		n.registerIptCleanFunc(func() error {
+			return iptables.ProgramChain(filterChain, config.BridgeName, hairpinMode, false)
+		})
+
+		n.portMapper.SetIptablesChain(filterChain, n.getNetworkBridgeName())
 	}
 
 	if err := ensureJumpRule("FORWARD", IsolationChain); err != nil {
 		return err
 	}
 
-	n.registerIptCleanFunc(func() error {
-		return iptables.ProgramChain(filterChain, config.BridgeName, hairpinMode, false)
-	})
-
-	n.portMapper.SetIptablesChain(filterChain, n.getNetworkBridgeName())
-
 	return nil
 }
 
@@ -312,12 +320,26 @@
 
 func removeIPChains() {
 	for _, chainInfo := range []iptables.ChainInfo{
-		iptables.ChainInfo{Name: DockerChain, Table: iptables.Nat},
-		iptables.ChainInfo{Name: DockerChain, Table: iptables.Filter},
-		iptables.ChainInfo{Name: IsolationChain, Table: iptables.Filter},
+		{Name: DockerChain, Table: iptables.Nat},
+		{Name: DockerChain, Table: iptables.Filter},
+		{Name: IsolationChain, Table: iptables.Filter},
 	} {
 		if err := chainInfo.Remove(); err != nil {
 			logrus.Warnf("Failed to remove existing iptables entries in table %s chain %s : %v", chainInfo.Table, chainInfo.Name, err)
 		}
 	}
 }
+
+func setupInternalNetworkRules(bridgeIface string, addr net.Addr, insert bool) error {
+	var (
+		inDropRule  = iptRule{table: iptables.Filter, chain: IsolationChain, args: []string{"-i", bridgeIface, "!", "-d", addr.String(), "-j", "DROP"}}
+		outDropRule = iptRule{table: iptables.Filter, chain: IsolationChain, args: []string{"-o", bridgeIface, "!", "-s", addr.String(), "-j", "DROP"}}
+	)
+	if err := programChainRule(inDropRule, "DROP INCOMING", insert); err != nil {
+		return err
+	}
+	if err := programChainRule(outDropRule, "DROP OUTGOING", insert); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint.go b/vendor/src/github.com/docker/libnetwork/endpoint.go
index de08c42..88312e9 100644
--- a/vendor/src/github.com/docker/libnetwork/endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/endpoint.go
@@ -41,7 +41,7 @@
 	DriverInfo() (map[string]interface{}, error)
 
 	// Delete and detaches this endpoint from the network.
-	Delete() error
+	Delete(force bool) error
 }
 
 // EndpointOption is a option setter function type used to pass varios options to Network
@@ -56,6 +56,7 @@
 	iface             *endpointInterface
 	joinInfo          *endpointJoinInfo
 	sandboxID         string
+	locator           string
 	exposedPorts      []types.TransportPort
 	anonymous         bool
 	disableResolution bool
@@ -84,6 +85,7 @@
 		epMap["generic"] = ep.generic
 	}
 	epMap["sandbox"] = ep.sandboxID
+	epMap["locator"] = ep.locator
 	epMap["anonymous"] = ep.anonymous
 	epMap["disableResolution"] = ep.disableResolution
 	epMap["myAliases"] = ep.myAliases
@@ -167,6 +169,9 @@
 	if v, ok := epMap["disableResolution"]; ok {
 		ep.disableResolution = v.(bool)
 	}
+	if l, ok := epMap["locator"]; ok {
+		ep.locator = l.(string)
+	}
 	ma, _ := json.Marshal(epMap["myAliases"])
 	var myAliases []string
 	json.Unmarshal(ma, &myAliases)
@@ -186,6 +191,7 @@
 	dstEp.name = ep.name
 	dstEp.id = ep.id
 	dstEp.sandboxID = ep.sandboxID
+	dstEp.locator = ep.locator
 	dstEp.dbIndex = ep.dbIndex
 	dstEp.dbExists = ep.dbExists
 	dstEp.anonymous = ep.anonymous
@@ -600,7 +606,19 @@
 	return sb.clearDefaultGW()
 }
 
-func (ep *endpoint) Delete() error {
+func (n *network) validateForceDelete(locator string) error {
+	if n.Scope() == datastore.LocalScope {
+		return nil
+	}
+
+	if locator == "" {
+		return fmt.Errorf("invalid endpoint locator identifier")
+	}
+
+	return nil
+}
+
+func (ep *endpoint) Delete(force bool) error {
 	var err error
 	n, err := ep.getNetworkFromStore()
 	if err != nil {
@@ -615,18 +633,33 @@
 	ep.Lock()
 	epid := ep.id
 	name := ep.name
-	sb, _ := n.getController().SandboxByID(ep.sandboxID)
-	if sb != nil {
-		ep.Unlock()
+	sbid := ep.sandboxID
+	locator := ep.locator
+	ep.Unlock()
+
+	if force {
+		if err = n.validateForceDelete(locator); err != nil {
+			return fmt.Errorf("unable to force delete endpoint %s: %v", name, err)
+		}
+	}
+
+	sb, _ := n.getController().SandboxByID(sbid)
+	if sb != nil && !force {
 		return &ActiveContainerError{name: name, id: epid}
 	}
-	ep.Unlock()
+
+	if sb != nil {
+		if e := ep.sbLeave(sb); e != nil {
+			log.Warnf("failed to leave sandbox for endpoint %s : %v", name, e)
+		}
+	}
 
 	if err = n.getController().deleteFromStore(ep); err != nil {
 		return err
 	}
+
 	defer func() {
-		if err != nil {
+		if err != nil && !force {
 			ep.dbExists = false
 			if e := n.getController().updateToStore(ep); e != nil {
 				log.Warnf("failed to recreate endpoint in store %s : %v", name, e)
@@ -634,11 +667,11 @@
 		}
 	}()
 
-	if err = n.getEpCnt().DecEndpointCnt(); err != nil {
+	if err = n.getEpCnt().DecEndpointCnt(); err != nil && !force {
 		return err
 	}
 	defer func() {
-		if err != nil {
+		if err != nil && !force {
 			if e := n.getEpCnt().IncEndpointCnt(); e != nil {
 				log.Warnf("failed to update network %s : %v", n.name, e)
 			}
@@ -648,7 +681,7 @@
 	// unwatch for service records
 	n.getController().unWatchSvcRecord(ep)
 
-	if err = ep.deleteEndpoint(); err != nil {
+	if err = ep.deleteEndpoint(); err != nil && !force {
 		return err
 	}
 
@@ -683,8 +716,8 @@
 }
 
 func (ep *endpoint) getSandbox() (*sandbox, bool) {
-	ep.Lock()
 	c := ep.network.getController()
+	ep.Lock()
 	sid := ep.sandboxID
 	ep.Unlock()
 
@@ -923,7 +956,7 @@
 		}
 
 		for _, ep := range epl {
-			if err := ep.Delete(); err != nil {
+			if err := ep.Delete(false); err != nil {
 				log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err)
 			}
 		}
diff --git a/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
index 256a89d..5f68372 100644
--- a/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
+++ b/vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go
@@ -182,6 +182,6 @@
 	if err != nil {
 		return err
 	}
-	var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)", regexp.QuoteMeta(hostname)))
-	return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2")), 0644)
+	var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)(\\s|\\.)", regexp.QuoteMeta(hostname)))
+	return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2"+"$3")), 0644)
 }
diff --git a/vendor/src/github.com/docker/libnetwork/ipam/allocator.go b/vendor/src/github.com/docker/libnetwork/ipam/allocator.go
index be8b4ac..ce404e2 100644
--- a/vendor/src/github.com/docker/libnetwork/ipam/allocator.go
+++ b/vendor/src/github.com/docker/libnetwork/ipam/allocator.go
@@ -70,6 +70,9 @@
 		}
 	}
 
+	a.checkConsistency(localAddressSpace)
+	a.checkConsistency(globalAddressSpace)
+
 	return a, nil
 }
 
@@ -115,6 +118,25 @@
 	return nil
 }
 
+// Checks for and fixes damaged bitmask. Meant to be called in constructor only.
+func (a *Allocator) checkConsistency(as string) {
+	// Retrieve this address space's configuration and bitmasks from the datastore
+	a.refresh(as)
+	aSpace, ok := a.addrSpaces[as]
+	if !ok {
+		return
+	}
+	a.updateBitMasks(aSpace)
+	for sk, pd := range aSpace.subnets {
+		if pd.Range != nil {
+			continue
+		}
+		if err := a.addresses[sk].CheckConsistency(); err != nil {
+			log.Warnf("Error while running consistency check for %s: %v", sk, err)
+		}
+	}
+}
+
 // GetDefaultAddressSpaces returns the local and global default address spaces
 func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) {
 	return localAddressSpace, globalAddressSpace, nil
diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go
index 2fe4906..7449c90 100644
--- a/vendor/src/github.com/docker/libnetwork/network.go
+++ b/vendor/src/github.com/docker/libnetwork/network.go
@@ -681,6 +681,7 @@
 	// Initialize ep.network with a possibly stale copy of n. We need this to get network from
 	// store. But once we get it from store we will have the most uptodate copy possible.
 	ep.network = n
+	ep.locator = n.getController().clusterHostID()
 	ep.network, err = ep.getNetworkFromStore()
 	if err != nil {
 		return nil, fmt.Errorf("failed to get network during CreateEndpoint: %v", err)
@@ -821,20 +822,20 @@
 }
 
 func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) {
-	if ep.isAnonymous() {
-		return
-	}
-
 	epName := ep.Name()
 	if iface := ep.Iface(); iface.Address() != nil {
 		myAliases := ep.MyAliases()
 		if isAdd {
-			n.addSvcRecords(epName, iface.Address().IP, true)
+			if !ep.isAnonymous() {
+				n.addSvcRecords(epName, iface.Address().IP, true)
+			}
 			for _, alias := range myAliases {
 				n.addSvcRecords(alias, iface.Address().IP, false)
 			}
 		} else {
-			n.deleteSvcRecords(epName, iface.Address().IP, true)
+			if !ep.isAnonymous() {
+				n.deleteSvcRecords(epName, iface.Address().IP, true)
+			}
 			for _, alias := range myAliases {
 				n.deleteSvcRecords(alias, iface.Address().IP, false)
 			}
diff --git a/vendor/src/github.com/docker/libnetwork/resolver.go b/vendor/src/github.com/docker/libnetwork/resolver.go
index 3cd74e0..d395ab4 100644
--- a/vendor/src/github.com/docker/libnetwork/resolver.go
+++ b/vendor/src/github.com/docker/libnetwork/resolver.go
@@ -15,7 +15,8 @@
 type Resolver interface {
 	// Start starts the name server for the container
 	Start() error
-	// Stop stops the name server for the container
+	// Stop stops the name server for the container. Stopped resolver
+	// can be reused after running the SetupFunc again.
 	Stop()
 	// SetupFunc() provides the setup function that should be run
 	// in the container's network namespace.
@@ -102,6 +103,8 @@
 	if r.server != nil {
 		r.server.Shutdown()
 	}
+	r.conn = nil
+	r.err = fmt.Errorf("setup not done yet")
 }
 
 func (r *resolver) SetExtServers(dns []string) {
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox.go
index 2f69897..9dbb100 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox.go
@@ -198,7 +198,7 @@
 			log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
 		}
 
-		if err := ep.Delete(); err != nil {
+		if err := ep.Delete(false); err != nil {
 			log.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err)
 		}
 	}
@@ -488,23 +488,22 @@
 }
 
 func (sb *sandbox) SetKey(basePath string) error {
-	var err error
 	if basePath == "" {
 		return types.BadRequestErrorf("invalid sandbox key")
 	}
 
 	sb.Lock()
-	osSbox := sb.osSbox
+	oldosSbox := sb.osSbox
 	sb.Unlock()
 
-	if osSbox != nil {
+	if oldosSbox != nil {
 		// If we already have an OS sandbox, release the network resources from that
 		// and destroy the OS snab. We are moving into a new home further down. Note that none
 		// of the network resources gets destroyed during the move.
 		sb.releaseOSSbox()
 	}
 
-	osSbox, err = osl.GetSandboxForExternalKey(basePath, sb.Key())
+	osSbox, err := osl.GetSandboxForExternalKey(basePath, sb.Key())
 	if err != nil {
 		return err
 	}
@@ -520,6 +519,17 @@
 		}
 	}()
 
+	// If the resolver was setup before stop it and set it up in the
+	// new osl sandbox.
+	if oldosSbox != nil && sb.resolver != nil {
+		sb.resolver.Stop()
+
+		sb.osSbox.InvokeFunc(sb.resolver.SetupFunc())
+		if err := sb.resolver.Start(); err != nil {
+			log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err)
+		}
+	}
+
 	for _, ep := range sb.getConnectedEndpoints() {
 		if err = sb.populateNetworkResources(ep); err != nil {
 			return err
diff --git a/vendor/src/github.com/docker/libnetwork/store.go b/vendor/src/github.com/docker/libnetwork/store.go
index a7682e5..be3e8ae 100644
--- a/vendor/src/github.com/docker/libnetwork/store.go
+++ b/vendor/src/github.com/docker/libnetwork/store.go
@@ -129,7 +129,9 @@
 
 		for _, kvo := range kvol {
 			n := kvo.(*network)
+			n.Lock()
 			n.ctrlr = c
+			n.Unlock()
 
 			ec := &endpointCnt{n: n}
 			err = store.GetObject(datastore.Key(ec.Key()...), ec)
diff --git a/vendor/src/github.com/docker/notary/CONTRIBUTING.md b/vendor/src/github.com/docker/notary/CONTRIBUTING.md
index dbe4fe2..0d4d16f 100644
--- a/vendor/src/github.com/docker/notary/CONTRIBUTING.md
+++ b/vendor/src/github.com/docker/notary/CONTRIBUTING.md
@@ -19,7 +19,6 @@
 Then please do not open an issue here yet - you should first try one of the following support forums:
 
  - irc: #docker-trust on freenode
- - mailing-list: <trust@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/trust
 
 ## Reporting an issue properly
 
diff --git a/vendor/src/github.com/docker/notary/MAINTAINERS b/vendor/src/github.com/docker/notary/MAINTAINERS
index 73741d6..999e280 100644
--- a/vendor/src/github.com/docker/notary/MAINTAINERS
+++ b/vendor/src/github.com/docker/notary/MAINTAINERS
@@ -16,6 +16,7 @@
 			"dmcgowan",
 			"endophage",
 			"nathanmccauley",
+			"riyazdf",
 		]
 
 [people]
@@ -50,3 +51,8 @@
 	Name = "Nathan McCauley"
 	Email = "nathan.mccauley@docker.com"
 	GitHub = "nathanmccauley"
+
+	[people.riyazdf]
+	Name = "Riyaz Faizullabhoy"
+	Email = "riyaz@docker.com"
+	GitHub = "riyazdf"
diff --git a/vendor/src/github.com/docker/notary/Makefile b/vendor/src/github.com/docker/notary/Makefile
index 641ba69..c632423 100644
--- a/vendor/src/github.com/docker/notary/Makefile
+++ b/vendor/src/github.com/docker/notary/Makefile
@@ -92,14 +92,21 @@
 	@echo "+ $@"
 	@go build -tags "${NOTARY_BUILDTAGS}" -v ${GO_LDFLAGS} ./...
 
+# When running `go test ./...`, it runs all the suites in parallel, which causes
+# problems when running with a yubikey
 test: TESTOPTS =
 test: go_version
+	@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
 	@echo "+ $@ $(TESTOPTS)"
+	@echo
 	go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) ./...
 
+test-full: TESTOPTS =
 test-full: vet lint
+	@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
 	@echo "+ $@"
-	go test -tags "${NOTARY_BUILDTAGS}" -v ./...
+	@echo
+	go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) -v ./...
 
 protos:
 	@protoc --go_out=plugins=grpc:. proto/*.proto
@@ -118,14 +125,18 @@
 	@mkdir -p "$(COVERDIR)"
 	$(foreach PKG,$(PKGS),$(call gocover,$(PKG)))
 
+# Generates the cover binaries and runs them all in serial, so this can be used
+# run all tests with a yubikey without any problems
 cover: GO_EXC := go
        OPTS = -tags "${NOTARY_BUILDTAGS}" -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))"
 cover: gen-cover covmerge
 	@go tool cover -html="$(COVERPROFILE)"
 
-# Codecov knows how to merge multiple coverage files
+# Generates the cover binaries and runs them all in serial, so this can be used
+# run all tests with a yubikey without any problems
 ci: OPTS = -tags "${NOTARY_BUILDTAGS}" -race -coverpkg "$(shell ./coverpkg.sh $(1) $(NOTARY_PKG))"
     GO_EXC := godep go
+# Codecov knows how to merge multiple coverage files, so covmerge is not needed
 ci: gen-cover
 
 covmerge:
@@ -151,10 +162,10 @@
 	@docker build --rm --force-rm -t notary .
 
 server-dockerfile:
-	@docker build --rm --force-rm -f Dockerfile.server -t notary-server .
+	@docker build --rm --force-rm -f server.Dockerfile -t notary-server .
 
 signer-dockerfile:
-	@docker build --rm --force-rm -f Dockerfile.signer -t notary-signer .
+	@docker build --rm --force-rm -f signer.Dockerfile -t notary-signer .
 
 docker-images: notary-dockerfile server-dockerfile signer-dockerfile
 
diff --git a/vendor/src/github.com/docker/notary/client/client.go b/vendor/src/github.com/docker/notary/client/client.go
index b065936..a3c7b7f 100644
--- a/vendor/src/github.com/docker/notary/client/client.go
+++ b/vendor/src/github.com/docker/notary/client/client.go
@@ -419,7 +419,7 @@
 // subtree and also the "targets/x" subtree, as we will defer parsing it until
 // we explicitly reach it in our iteration of the provided list of roles.
 func (r *NotaryRepository) ListTargets(roles ...string) ([]*TargetWithRole, error) {
-	_, err := r.Update()
+	_, err := r.Update(false)
 	if err != nil {
 		return nil, err
 	}
@@ -479,7 +479,7 @@
 // will be returned
 // See the IMPORTANT section on ListTargets above. Those roles also apply here.
 func (r *NotaryRepository) GetTargetByName(name string, roles ...string) (*TargetWithRole, error) {
-	c, err := r.Update()
+	c, err := r.Update(false)
 	if err != nil {
 		return nil, err
 	}
@@ -514,7 +514,7 @@
 func (r *NotaryRepository) Publish() error {
 	var initialPublish bool
 	// update first before publishing
-	_, err := r.Update()
+	_, err := r.Update(true)
 	if err != nil {
 		// If the remote is not aware of the repo, then this is being published
 		// for the first time.  Try to load from disk instead for publishing.
@@ -555,13 +555,21 @@
 	// we send anything to remote
 	updatedFiles := make(map[string][]byte)
 
-	// check if our root file is nearing expiry. Resign if it is.
-	if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty || initialPublish {
+	// check if our root file is nearing expiry or dirty. Resign if it is.  If
+	// root is not dirty but we are publishing for the first time, then just
+	// publish the existing root we have.
+	if nearExpiry(r.tufRepo.Root) || r.tufRepo.Root.Dirty {
 		rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole)
 		if err != nil {
 			return err
 		}
 		updatedFiles[data.CanonicalRootRole] = rootJSON
+	} else if initialPublish {
+		rootJSON, err := r.tufRepo.Root.MarshalJSON()
+		if err != nil {
+			return err
+		}
+		updatedFiles[data.CanonicalRootRole] = rootJSON
 	}
 
 	// iterate through all the targets files - if they are dirty, sign and update
@@ -714,75 +722,94 @@
 	return r.fileStore.SetMeta(data.CanonicalSnapshotRole, snapshotJSON)
 }
 
+// returns a properly constructed ErrRepositoryNotExist error based on this
+// repo's information
+func (r *NotaryRepository) errRepositoryNotExist() error {
+	host := r.baseURL
+	parsed, err := url.Parse(r.baseURL)
+	if err == nil {
+		host = parsed.Host // try to exclude the scheme and any paths
+	}
+	return ErrRepositoryNotExist{remote: host, gun: r.gun}
+}
+
 // Update bootstraps a trust anchor (root.json) before updating all the
 // metadata from the repo.
-func (r *NotaryRepository) Update() (*tufclient.Client, error) {
-	c, err := r.bootstrapClient()
+func (r *NotaryRepository) Update(forWrite bool) (*tufclient.Client, error) {
+	c, err := r.bootstrapClient(forWrite)
 	if err != nil {
 		if _, ok := err.(store.ErrMetaNotFound); ok {
-			host := r.baseURL
-			parsed, err := url.Parse(r.baseURL)
-			if err == nil {
-				host = parsed.Host // try to exclude the scheme and any paths
-			}
-			return nil, ErrRepositoryNotExist{remote: host, gun: r.gun}
+			return nil, r.errRepositoryNotExist()
 		}
 		return nil, err
 	}
 	err = c.Update()
 	if err != nil {
+		if notFound, ok := err.(store.ErrMetaNotFound); ok && notFound.Resource == data.CanonicalRootRole {
+			return nil, r.errRepositoryNotExist()
+		}
 		return nil, err
 	}
 	return c, nil
 }
 
-func (r *NotaryRepository) bootstrapClient() (*tufclient.Client, error) {
-	var rootJSON []byte
-	remote, err := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
-	if err == nil {
+// bootstrapClient attempts to bootstrap a root.json to be used as the trust
+// anchor for a repository. The checkInitialized argument indicates whether
+// we should always attempt to contact the server to determine if the repository
+// is initialized or not. If set to true, we will always attempt to download
+// and return an error if the remote repository errors.
+func (r *NotaryRepository) bootstrapClient(checkInitialized bool) (*tufclient.Client, error) {
+	var (
+		rootJSON   []byte
+		err        error
+		signedRoot *data.SignedRoot
+	)
+	// try to read root from cache first. We will trust this root
+	// until we detect a problem during update which will cause
+	// us to download a new root and perform a rotation.
+	rootJSON, cachedRootErr := r.fileStore.GetMeta("root", maxSize)
+
+	if cachedRootErr == nil {
+		signedRoot, cachedRootErr = r.validateRoot(rootJSON)
+	}
+
+	remote, remoteErr := getRemoteStore(r.baseURL, r.gun, r.roundTrip)
+	if remoteErr != nil {
+		logrus.Error(remoteErr)
+	} else if cachedRootErr != nil || checkInitialized {
+		// remoteErr was nil and we had a cachedRootErr (or are specifically
+		// checking for initialization of the repo).
+
 		// if remote store successfully set up, try and get root from remote
-		rootJSON, err = remote.GetMeta("root", maxSize)
-	}
-
-	// if remote store couldn't be setup, or we failed to get a root from it
-	// load the root from cache (offline operation)
-	if err != nil {
-		if err, ok := err.(store.ErrMetaNotFound); ok {
-			// if the error was MetaNotFound then we successfully contacted
-			// the store and it doesn't know about the repo.
+		tmpJSON, err := remote.GetMeta("root", maxSize)
+		if err != nil {
+			// we didn't have a root in cache and were unable to load one from
+			// the server. Nothing we can do but error.
 			return nil, err
 		}
-		result, cacheErr := r.fileStore.GetMeta("root", maxSize)
-		if cacheErr != nil {
-			// if cache didn't return a root, we cannot proceed - just return
-			// the original error.
-			return nil, err
-		}
-		rootJSON = result
-		logrus.Debugf(
-			"Using local cache instead of remote due to failure: %s", err.Error())
-	}
-	// can't just unmarshal into SignedRoot because validate root
-	// needs the root.Signed field to still be []byte for signature
-	// validation
-	root := &data.Signed{}
-	err = json.Unmarshal(rootJSON, root)
-	if err != nil {
-		return nil, err
-	}
+		if cachedRootErr != nil {
+			// we always want to use the downloaded root if there was a cache
+			// error.
+			signedRoot, err = r.validateRoot(tmpJSON)
+			if err != nil {
+				return nil, err
+			}
 
-	err = r.CertManager.ValidateRoot(root, r.gun)
-	if err != nil {
-		return nil, err
+			err = r.fileStore.SetMeta("root", tmpJSON)
+			if err != nil {
+				// if we can't write cache we should still continue, just log error
+				logrus.Errorf("could not save root to cache: %s", err.Error())
+			}
+		}
 	}
 
 	kdb := keys.NewDB()
 	r.tufRepo = tuf.NewRepo(kdb, r.CryptoService)
 
-	signedRoot, err := data.RootFromSigned(root)
-	if err != nil {
-		return nil, err
+	if signedRoot == nil {
+		return nil, ErrRepoNotInitialized{}
 	}
+
 	err = r.tufRepo.SetRoot(signedRoot)
 	if err != nil {
 		return nil, err
@@ -796,6 +823,28 @@
 	), nil
 }
 
+// validateRoot MUST only be used during bootstrapping. It will only validate
+// signatures of the root based on known keys, not expiry or other metadata.
+// This is so that an out of date root can be loaded to be used in a rotation
+// should the TUF update process detect a problem.
+func (r *NotaryRepository) validateRoot(rootJSON []byte) (*data.SignedRoot, error) {
+	// can't just unmarshal into SignedRoot because validate root
+	// needs the root.Signed field to still be []byte for signature
+	// validation
+	root := &data.Signed{}
+	err := json.Unmarshal(rootJSON, root)
+	if err != nil {
+		return nil, err
+	}
+
+	err = r.CertManager.ValidateRoot(root, r.gun)
+	if err != nil {
+		return nil, err
+	}
+
+	return data.RootFromSigned(root)
+}
+
 // RotateKey removes all existing keys associated with the role, and either
 // creates and adds one new key or delegates managing the key to the server.
 // These changes are staged in a changelist until publish is called.
diff --git a/vendor/src/github.com/docker/notary/client/helpers.go b/vendor/src/github.com/docker/notary/client/helpers.go
index 23b9249..304ac3d 100644
--- a/vendor/src/github.com/docker/notary/client/helpers.go
+++ b/vendor/src/github.com/docker/notary/client/helpers.go
@@ -17,7 +17,7 @@
 
 // Use this to initialize remote HTTPStores from the config settings
 func getRemoteStore(baseURL, gun string, rt http.RoundTripper) (store.RemoteStore, error) {
-	return store.NewHTTPStore(
+	s, err := store.NewHTTPStore(
 		baseURL+"/v2/"+gun+"/_trust/tuf/",
 		"",
 		"json",
@@ -25,6 +25,10 @@
 		"key",
 		rt,
 	)
+	if err != nil {
+		return store.OfflineStore{}, err
+	}
+	return s, err
 }
 
 func applyChangelist(repo *tuf.Repo, cl changelist.Changelist) error {
diff --git a/vendor/src/github.com/docker/notary/client/repo_pkcs11.go b/vendor/src/github.com/docker/notary/client/repo_pkcs11.go
index b93f9bf..dd697ff 100644
--- a/vendor/src/github.com/docker/notary/client/repo_pkcs11.go
+++ b/vendor/src/github.com/docker/notary/client/repo_pkcs11.go
@@ -26,7 +26,7 @@
 	keyStores := []trustmanager.KeyStore{fileKeyStore}
 	yubiKeyStore, _ := yubikey.NewYubiKeyStore(fileKeyStore, retriever)
 	if yubiKeyStore != nil {
-		keyStores = append(keyStores, yubiKeyStore)
+		keyStores = []trustmanager.KeyStore{yubiKeyStore, fileKeyStore}
 	}
 
 	return repositoryFromKeystores(baseDir, gun, baseURL, rt, keyStores)
diff --git a/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go b/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go
index d473564..f5bfa07 100644
--- a/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go
+++ b/vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go
@@ -73,9 +73,9 @@
 
 }
 
-// GetPrivateKey returns a private key by ID. It tries to get the key first
-// without a GUN (in which case it's a root key).  If that fails, try to get
-// the key with the GUN (non-root key).
+// GetPrivateKey returns a private key and role if present by ID.
+// It tries to get the key first without a GUN (in which case it's a root key).
+// If that fails, try to get the key with the GUN (non-root key).
 // If that fails, then we don't have the key.
 func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role string, err error) {
 	keyPaths := []string{keyID, filepath.Join(cs.gun, keyID)}
diff --git a/vendor/src/github.com/docker/notary/docker-compose.yml b/vendor/src/github.com/docker/notary/docker-compose.yml
index 5bd578c..17b5798 100644
--- a/vendor/src/github.com/docker/notary/docker-compose.yml
+++ b/vendor/src/github.com/docker/notary/docker-compose.yml
@@ -1,6 +1,6 @@
 notaryserver:
   build: .
-  dockerfile: Dockerfile.server
+  dockerfile: server.Dockerfile
   links:
    - notarymysql
    - notarysigner
@@ -15,7 +15,7 @@
    - /dev/bus/usb/003/010:/dev/bus/usb/002/010
    - /var/run/pcscd/pcscd.comm:/var/run/pcscd/pcscd.comm
   build: .
-  dockerfile: Dockerfile.signer
+  dockerfile: signer.Dockerfile
   links:
    - notarymysql
   command: -config=fixtures/signer-config.json
diff --git a/vendor/src/github.com/docker/notary/Dockerfile.server b/vendor/src/github.com/docker/notary/server.Dockerfile
similarity index 100%
rename from vendor/src/github.com/docker/notary/Dockerfile.server
rename to vendor/src/github.com/docker/notary/server.Dockerfile
diff --git a/vendor/src/github.com/docker/notary/Dockerfile.signer b/vendor/src/github.com/docker/notary/signer.Dockerfile
similarity index 100%
rename from vendor/src/github.com/docker/notary/Dockerfile.signer
rename to vendor/src/github.com/docker/notary/signer.Dockerfile
diff --git a/vendor/src/github.com/docker/notary/tuf/client/client.go b/vendor/src/github.com/docker/notary/tuf/client/client.go
index 9d28a8c..263ee42 100644
--- a/vendor/src/github.com/docker/notary/tuf/client/client.go
+++ b/vendor/src/github.com/docker/notary/tuf/client/client.go
@@ -129,6 +129,7 @@
 
 // downloadRoot is responsible for downloading the root.json
 func (c *Client) downloadRoot() error {
+	logrus.Debug("Downloading Root...")
 	role := data.CanonicalRootRole
 	size := maxSize
 	var expectedSha256 []byte
@@ -240,7 +241,7 @@
 // Timestamps are special in that we ALWAYS attempt to download and only
 // use cache if the download fails (and the cache is still valid).
 func (c *Client) downloadTimestamp() error {
-	logrus.Debug("downloadTimestamp")
+	logrus.Debug("Downloading Timestamp...")
 	role := data.CanonicalTimestampRole
 
 	// We may not have a cached timestamp if this is the first time
@@ -299,7 +300,7 @@
 
 // downloadSnapshot is responsible for downloading the snapshot.json
 func (c *Client) downloadSnapshot() error {
-	logrus.Debug("downloadSnapshot")
+	logrus.Debug("Downloading Snapshot...")
 	role := data.CanonicalSnapshotRole
 	if c.local.Timestamp == nil {
 		return ErrMissingMeta{role: "snapshot"}
@@ -372,6 +373,7 @@
 // It uses a pre-order tree traversal as it's necessary to download parents first
 // to obtain the keys to validate children.
 func (c *Client) downloadTargets(role string) error {
+	logrus.Debug("Downloading Targets...")
 	stack := utils.NewStack()
 	stack.Push(role)
 	for !stack.Empty() {
diff --git a/vendor/src/github.com/docker/notary/tuf/data/root.go b/vendor/src/github.com/docker/notary/tuf/data/root.go
index 9ef8cd6..e555cbd 100644
--- a/vendor/src/github.com/docker/notary/tuf/data/root.go
+++ b/vendor/src/github.com/docker/notary/tuf/data/root.go
@@ -43,10 +43,11 @@
 
 // ToSigned partially serializes a SignedRoot for further signing
 func (r SignedRoot) ToSigned() (*Signed, error) {
-	s, err := json.MarshalCanonical(r.Signed)
+	s, err := defaultSerializer.MarshalCanonical(r.Signed)
 	if err != nil {
 		return nil, err
 	}
+	// cast into a json.RawMessage
 	signed := json.RawMessage{}
 	err = signed.UnmarshalJSON(s)
 	if err != nil {
@@ -60,6 +61,15 @@
 	}, nil
 }
 
+// MarshalJSON returns the serialized form of SignedRoot as bytes
+func (r SignedRoot) MarshalJSON() ([]byte, error) {
+	signed, err := r.ToSigned()
+	if err != nil {
+		return nil, err
+	}
+	return defaultSerializer.Marshal(signed)
+}
+
 // RootFromSigned fully unpacks a Signed object into a SignedRoot
 func RootFromSigned(s *Signed) (*SignedRoot, error) {
 	r := Root{}
diff --git a/vendor/src/github.com/docker/notary/tuf/data/serializer.go b/vendor/src/github.com/docker/notary/tuf/data/serializer.go
new file mode 100644
index 0000000..91fa1bc
--- /dev/null
+++ b/vendor/src/github.com/docker/notary/tuf/data/serializer.go
@@ -0,0 +1,36 @@
+package data
+
+import "github.com/jfrazelle/go/canonical/json"
+
+// Serializer is an interface that can marshal and unmarshal TUF data.  This
+// is expected to be a canonical JSON marshaller
+type serializer interface {
+	MarshalCanonical(from interface{}) ([]byte, error)
+	Marshal(from interface{}) ([]byte, error)
+	Unmarshal(from []byte, to interface{}) error
+}
+
+// CanonicalJSON marshals to and from canonical JSON
+type canonicalJSON struct{}
+
+// MarshalCanonical returns the canonical JSON form of a thing
+func (c canonicalJSON) MarshalCanonical(from interface{}) ([]byte, error) {
+	return json.MarshalCanonical(from)
+}
+
+// Marshal returns the regular non-canonical JSON form of a thing
+func (c canonicalJSON) Marshal(from interface{}) ([]byte, error) {
+	return json.Marshal(from)
+}
+
+// Unmarshal unmarshals some JSON bytes
+func (c canonicalJSON) Unmarshal(from []byte, to interface{}) error {
+	return json.Unmarshal(from, to)
+}
+
+// defaultSerializer is a canonical JSON serializer
+var defaultSerializer serializer = canonicalJSON{}
+
+func setDefaultSerializer(s serializer) {
+	defaultSerializer = s
+}
diff --git a/vendor/src/github.com/docker/notary/tuf/signed/ed25519.go b/vendor/src/github.com/docker/notary/tuf/signed/ed25519.go
index 3f7ad1e..e09b550 100644
--- a/vendor/src/github.com/docker/notary/tuf/signed/ed25519.go
+++ b/vendor/src/github.com/docker/notary/tuf/signed/ed25519.go
@@ -95,7 +95,7 @@
 	return data.PublicKeyFromPrivate(e.keys[keyID].privKey)
 }
 
-// GetPrivateKey returns a single private key based on the ID
+// GetPrivateKey returns a single private key and role if present, based on the ID
 func (e *Ed25519) GetPrivateKey(keyID string) (data.PrivateKey, string, error) {
 	if k, ok := e.keys[keyID]; ok {
 		return k.privKey, k.role, nil
diff --git a/vendor/src/github.com/docker/notary/tuf/store/httpstore.go b/vendor/src/github.com/docker/notary/tuf/store/httpstore.go
index 66e4bcc..ef69a61 100644
--- a/vendor/src/github.com/docker/notary/tuf/store/httpstore.go
+++ b/vendor/src/github.com/docker/notary/tuf/store/httpstore.go
@@ -118,12 +118,12 @@
 	return err
 }
 
-func translateStatusToError(resp *http.Response) error {
+func translateStatusToError(resp *http.Response, resource string) error {
 	switch resp.StatusCode {
 	case http.StatusOK:
 		return nil
 	case http.StatusNotFound:
-		return ErrMetaNotFound{}
+		return ErrMetaNotFound{Resource: resource}
 	case http.StatusBadRequest:
 		return tryUnmarshalError(resp, ErrInvalidOperation{})
 	default:
@@ -148,7 +148,7 @@
 		return nil, err
 	}
 	defer resp.Body.Close()
-	if err := translateStatusToError(resp); err != nil {
+	if err := translateStatusToError(resp, name); err != nil {
 		logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
 		return nil, err
 	}
@@ -179,7 +179,7 @@
 		return err
 	}
 	defer resp.Body.Close()
-	return translateStatusToError(resp)
+	return translateStatusToError(resp, "POST "+name)
 }
 
 // NewMultiPartMetaRequest builds a request with the provided metadata updates
@@ -223,7 +223,8 @@
 		return err
 	}
 	defer resp.Body.Close()
-	return translateStatusToError(resp)
+	// if this 404's something is pretty wrong
+	return translateStatusToError(resp, "POST metadata endpoint")
 }
 
 func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
@@ -271,7 +272,7 @@
 		return nil, err
 	}
 	defer resp.Body.Close()
-	if err := translateStatusToError(resp); err != nil {
+	if err := translateStatusToError(resp, path); err != nil {
 		return nil, err
 	}
 	return resp.Body, nil
@@ -292,7 +293,7 @@
 		return nil, err
 	}
 	defer resp.Body.Close()
-	if err := translateStatusToError(resp); err != nil {
+	if err := translateStatusToError(resp, role+" key"); err != nil {
 		return nil, err
 	}
 	body, err := ioutil.ReadAll(resp.Body)
diff --git a/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go b/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go
new file mode 100644
index 0000000..d32e113
--- /dev/null
+++ b/vendor/src/github.com/docker/notary/tuf/store/offlinestore.go
@@ -0,0 +1,43 @@
+package store
+
+import (
+	"io"
+)
+
+// ErrOffline is used to indicate we are operating offline
+type ErrOffline struct{}
+
+func (e ErrOffline) Error() string {
+	return "client is offline"
+}
+
+var err = ErrOffline{}
+
+// OfflineStore is to be used as a placeholder for a nil store. It simply
+// return ErrOffline for every operation
+type OfflineStore struct{}
+
+// GetMeta return ErrOffline
+func (es OfflineStore) GetMeta(name string, size int64) ([]byte, error) {
+	return nil, err
+}
+
+// SetMeta return ErrOffline
+func (es OfflineStore) SetMeta(name string, blob []byte) error {
+	return err
+}
+
+// SetMultiMeta return ErrOffline
+func (es OfflineStore) SetMultiMeta(map[string][]byte) error {
+	return err
+}
+
+// GetKey return ErrOffline
+func (es OfflineStore) GetKey(role string) ([]byte, error) {
+	return nil, err
+}
+
+// GetTarget return ErrOffline
+func (es OfflineStore) GetTarget(path string) (io.ReadCloser, error) {
+	return nil, err
+}
diff --git a/vendor/src/github.com/imdario/mergo/.travis.yml b/vendor/src/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 0000000..9d91c63
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,2 @@
+language: go
+install: go get -t
diff --git a/vendor/src/github.com/imdario/mergo/LICENSE b/vendor/src/github.com/imdario/mergo/LICENSE
new file mode 100644
index 0000000..6866802
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/imdario/mergo/README.md b/vendor/src/github.com/imdario/mergo/README.md
new file mode 100644
index 0000000..4f0f990
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/README.md
@@ -0,0 +1,122 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
+
+![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
+
+## Status
+
+It is ready for production use. It works fine after extensive use in the wild.
+
+[![Build Status][1]][2]
+[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo)
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+
+### Important note
+
+Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected.
+
+If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+
+### Mergo in the wild
+
+- [imdario/zas](https://github.com/imdario/zas)
+- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+
+## Installation
+
+    go get github.com/imdario/mergo
+
+    // use in your .go code
+    import (
+        "github.com/imdario/mergo"
+    )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+    if err := mergo.Merge(&dst, src); err != nil {
+        // ...
+    }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+    if err := mergo.Map(&dst, srcMap); err != nil {
+        // ...
+    }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+### Nice example
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/imdario/mergo"
+)
+
+type Foo struct {
+	A string
+	B int64
+}
+
+func main() {
+	src := Foo{
+		A: "one",
+	}
+
+	dest := Foo{
+		A: "two",
+		B: 2,
+	}
+
+	mergo.Merge(&dest, src)
+
+	fmt.Println(dest)
+	// Will print
+	// {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+    go get gopkg.in/yaml.v1
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/src/github.com/imdario/mergo/doc.go b/vendor/src/github.com/imdario/mergo/doc.go
new file mode 100644
index 0000000..6e9aa7b
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+	type networkConfig struct {
+		Protocol string
+		Address string
+		ServerType string `json: "server_type"`
+		Port uint16
+	}
+
+	type FssnConfig struct {
+		Network networkConfig
+	}
+
+	var fssnDefault = FssnConfig {
+		networkConfig {
+			"tcp",
+			"127.0.0.1",
+			"http",
+			31560,
+		},
+	}
+
+	// Inside a function [...]
+
+	if err := mergo.Merge(&config, fssnDefault); err != nil {
+		log.Fatal(err)
+	}
+
+	// More code [...]
+
+*/
+package mergo
diff --git a/vendor/src/github.com/imdario/mergo/map.go b/vendor/src/github.com/imdario/mergo/map.go
new file mode 100644
index 0000000..1ed3d71
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/map.go
@@ -0,0 +1,154 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"fmt"
+	"reflect"
+	"unicode"
+	"unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+	if s == "" {
+		return s
+	}
+	r, n := utf8.DecodeRuneInString(s)
+	return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+	r, _ := utf8.DecodeRuneInString(field.Name)
+	return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	zeroValue := reflect.Value{}
+	switch dst.Kind() {
+	case reflect.Map:
+		dstMap := dst.Interface().(map[string]interface{})
+		for i, n := 0, src.NumField(); i < n; i++ {
+			srcType := src.Type()
+			field := srcType.Field(i)
+			if !isExported(field) {
+				continue
+			}
+			fieldName := field.Name
+			fieldName = changeInitialCase(fieldName, unicode.ToLower)
+			if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+				dstMap[fieldName] = src.Field(i).Interface()
+			}
+		}
+	case reflect.Struct:
+		srcMap := src.Interface().(map[string]interface{})
+		for key := range srcMap {
+			srcValue := srcMap[key]
+			fieldName := changeInitialCase(key, unicode.ToUpper)
+			dstElement := dst.FieldByName(fieldName)
+			if dstElement == zeroValue {
+				// We discard it because the field doesn't exist.
+				continue
+			}
+			srcElement := reflect.ValueOf(srcValue)
+			dstKind := dstElement.Kind()
+			srcKind := srcElement.Kind()
+			if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+				srcElement = srcElement.Elem()
+				srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+			} else if dstKind == reflect.Ptr {
+				// Can this work? I guess it can't.
+				if srcKind != reflect.Ptr && srcElement.CanAddr() {
+					srcPtr := srcElement.Addr()
+					srcElement = reflect.ValueOf(srcPtr)
+					srcKind = reflect.Ptr
+				}
+			}
+			if !srcElement.IsValid() {
+				continue
+			}
+			if srcKind == dstKind {
+				if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+					return
+				}
+			} else {
+				if srcKind == reflect.Map {
+					if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+						return
+					}
+				} else {
+					return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+				}
+			}
+		}
+	}
+	return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}) error {
+	return _map(dst, src, false)
+}
+
+func MapWithOverwrite(dst, src interface{}) error {
+	return _map(dst, src, true)
+}
+
+func _map(dst, src interface{}, overwrite bool) error {
+	var (
+		vDst, vSrc reflect.Value
+		err        error
+	)
+	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+		return err
+	}
+	// To be friction-less, we redirect equal-type arguments
+	// to deepMerge. Only because arguments can be anything.
+	if vSrc.Kind() == vDst.Kind() {
+		return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+	}
+	switch vSrc.Kind() {
+	case reflect.Struct:
+		if vDst.Kind() != reflect.Map {
+			return ErrExpectedMapAsDestination
+		}
+	case reflect.Map:
+		if vDst.Kind() != reflect.Struct {
+			return ErrExpectedStructAsDestination
+		}
+	default:
+		return ErrNotSupported
+	}
+	return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/src/github.com/imdario/mergo/merge.go b/vendor/src/github.com/imdario/mergo/merge.go
new file mode 100644
index 0000000..a7dd9d8
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/merge.go
@@ -0,0 +1,120 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"reflect"
+)
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+	if !src.IsValid() {
+		return
+	}
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	switch dst.Kind() {
+	case reflect.Struct:
+		for i, n := 0, dst.NumField(); i < n; i++ {
+			if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
+				return
+			}
+		}
+	case reflect.Map:
+		for _, key := range src.MapKeys() {
+			srcElement := src.MapIndex(key)
+			if !srcElement.IsValid() {
+				continue
+			}
+			dstElement := dst.MapIndex(key)
+			switch srcElement.Kind() {
+			case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+				if srcElement.IsNil() {
+					continue
+				}
+				fallthrough
+			default:
+				switch reflect.TypeOf(srcElement.Interface()).Kind() {
+				case reflect.Struct:
+					fallthrough
+				case reflect.Ptr:
+					fallthrough
+				case reflect.Map:
+					if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+						return
+					}
+				}
+			}
+			if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
+				if dst.IsNil() {
+					dst.Set(reflect.MakeMap(dst.Type()))
+				}
+				dst.SetMapIndex(key, srcElement)
+			}
+		}
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Interface:
+		if src.IsNil() {
+			break
+		} else if dst.IsNil() {
+			if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+				dst.Set(src)
+			}
+		} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil {
+			return
+		}
+	default:
+		if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+			dst.Set(src)
+		}
+	}
+	return
+}
+
+// Merge sets fields' values in dst from src if they have a zero
+// value of their type.
+// dst and src must be valid same-type structs and dst must be
+// a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+func Merge(dst, src interface{}) error {
+	return merge(dst, src, false)
+}
+
+func MergeWithOverwrite(dst, src interface{}) error {
+	return merge(dst, src, true)
+}
+
+func merge(dst, src interface{}, overwrite bool) error {
+	var (
+		vDst, vSrc reflect.Value
+		err        error
+	)
+	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+		return err
+	}
+	if vDst.Type() != vSrc.Type() {
+		return ErrDifferentArgumentsTypes
+	}
+	return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/src/github.com/imdario/mergo/mergo.go b/vendor/src/github.com/imdario/mergo/mergo.go
new file mode 100644
index 0000000..f8a0991
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"errors"
+	"reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+	ErrNilArguments                = errors.New("src and dst must not be nil")
+	ErrDifferentArgumentsTypes     = errors.New("src and dst must be of same type")
+	ErrNotSupported                = errors.New("only structs and maps are supported")
+	ErrExpectedMapAsDestination    = errors.New("dst was expected to be a map")
+	ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress.  The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+	ptr  uintptr
+	typ  reflect.Type
+	next *visit
+}
+
+// From src/pkg/encoding/json.
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+	if dst == nil || src == nil {
+		err = ErrNilArguments
+		return
+	}
+	vDst = reflect.ValueOf(dst).Elem()
+	if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+		err = ErrNotSupported
+		return
+	}
+	vSrc = reflect.ValueOf(src)
+	// We check if vSrc is a pointer to dereference it.
+	if vSrc.Kind() == reflect.Ptr {
+		vSrc = vSrc.Elem()
+	}
+	return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	return // TODO refactor
+}
diff --git a/vendor/src/github.com/opencontainers/specs/.travis.yml b/vendor/src/github.com/opencontainers/specs/.travis.yml
deleted file mode 100644
index 64b57bf..0000000
--- a/vendor/src/github.com/opencontainers/specs/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-language: go
-go:
-  - 1.5.1
-  - 1.4.3
-  - 1.3.3
-
-sudo: false
-
-before_install:
-  - go get golang.org/x/tools/cmd/vet
-  - go get github.com/golang/lint/golint
-  - go get github.com/vbatts/git-validation
-
-install: true
-
-script:
-  - go vet -x ./...
-  - $HOME/gopath/bin/golint ./...
-  - $HOME/gopath/bin/git-validation -run DCO,short-subject -v -range ${TRAVIS_COMMIT_RANGE}
-  
diff --git a/vendor/src/github.com/opencontainers/specs/LICENSE b/vendor/src/github.com/opencontainers/specs/LICENSE
deleted file mode 100644
index bdc4036..0000000
--- a/vendor/src/github.com/opencontainers/specs/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   Copyright 2015 The Linux Foundation.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/src/github.com/opencontainers/specs/MAINTAINERS b/vendor/src/github.com/opencontainers/specs/MAINTAINERS
deleted file mode 100644
index 286dbe3..0000000
--- a/vendor/src/github.com/opencontainers/specs/MAINTAINERS
+++ /dev/null
@@ -1,8 +0,0 @@
-Michael Crosby <michael@docker.com> (@crosbymichael)
-Alexander Morozov <lk4d4@docker.com> (@LK4D4)
-Vishnu Kannan <vishnuk@google.com> (@vishnuk)
-Mrunal Patel <mpatel@redhat.com> (@mrunalp)
-Vincent Batts <vbatts@redhat.com> (@vbatts)
-Daniel, Dao Quang Minh <dqminh89@gmail.com> (@dqminh)
-Brandon Philips <brandon.philips@coreos.com> (@philips)
-Tianon Gravi <admwiggin@gmail.com> (@tianon)
diff --git a/vendor/src/github.com/opencontainers/specs/README.md b/vendor/src/github.com/opencontainers/specs/README.md
deleted file mode 100644
index cfe73fa..0000000
--- a/vendor/src/github.com/opencontainers/specs/README.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# Open Container Specifications
-
-[Open Container Initiative](http://www.opencontainers.org/) Specifications for standards on Operating System process and application containers.
-
-
-Table of Contents
-
-- [Container Principles](principles.md)
-- [Filesystem Bundle](bundle.md)
-- Configuration
-  - [Container Configuration](config.md)
-  - [Container Configuration (Linux-specific)](config-linux.md)
-  - [Runtime Configuration](runtime-config.md)
-  - [Runtime Configuration (Linux-specific)](runtime-config-linux.md)
-- [Runtime and Lifecycle](runtime.md)
-  - [Linux Specific Runtime](runtime-linux.md)
-- [Implementations](implementations.md)
-
-# Use Cases
-
-To provide context for users the following section gives example use cases for each part of the spec.
-
-## Filesystem Bundle & Configuration
-
-- A user can create a root filesystem and configuration, with low-level OS and host specific details, and launch it as a container under an Open Container runtime.
-
-# Releases
-
-There is a loose [Road Map](https://github.com/opencontainers/specs/wiki/RoadMap:) on the wiki.
-During the `0.x` series of OCI releases we make no backwards compatibility guarantees and intend to break the schema during this series.
-
-# Contributing
-
-Development happens on GitHub for the spec.
-Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
-
-The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository.
-
-## Code of Conduct
-
-Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct](code-of-conduct.md).
-
-## Discuss your design
-
-The project welcomes submissions, but please let everyone know what you are working on.
-
-Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do.
-This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits.
-It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions.
-
-Typos and grammatical errors can go straight to a pull-request.
-When in doubt, start on the [mailing-list](#mailing-list).
-
-## Weekly Call
-
-The contributors and maintainers of the project have a weekly meeting Wednesdays at 10:00 AM PST.
-Everyone is welcome to participate in the [BlueJeans call][BlueJeans].
-An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
-Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived to the [wiki](https://github.com/opencontainers/specs/wiki) for those who are unable to join the call.
-
-## Mailing List
-
-You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev).
-
-## IRC
-
-OCI discussion happens on #opencontainers on Freenode.
-
-## Markdown style
-
-To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line.
-This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length.
-For example, this paragraph will span three lines in the Markdown source.
-
-## Git commit
-
-### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch.
-The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
-    have the right to submit it under the open source license
-    indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
-    of my knowledge, is covered under an appropriate open source
-    license and I have the right under that license to submit that
-    work with modifications, whether created in whole or in part
-    by me, under the same open source license (unless I am
-    permitted to submit under a different license), as indicated
-    in the file; or
-
-(c) The contribution was provided directly to me by some other
-    person who certified (a), (b) or (c) and I have not modified
-    it.
-
-(d) I understand and agree that this project and the contribution
-    are public and that a record of the contribution (including all
-    personal information I submit with it, including my sign-off) is
-    maintained indefinitely and may be redistributed consistent with
-    this project or the open source license(s) involved.
-```
-
-then you just add a line to every git commit message:
-
-    Signed-off-by: Joe Smith <joe@gmail.com>
-
-using your real name (sorry, no pseudonyms or anonymous contributions.)
-
-You can add the sign off when creating the git commit via `git commit -s`.
-
-### Commit Style
-
-Simple house-keeping for clean git history.
-Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit).
-
-1. Separate the subject from body with a blank line
-2. Limit the subject line to 50 characters
-3. Capitalize the subject line
-4. Do not end the subject line with a period
-5. Use the imperative mood in the subject line
-6. Wrap the body at 72 characters
-7. Use the body to explain what and why vs. how
-  * If there was important/useful/essential conversation or information, copy or include a reference
-8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...")
-
-[BlueJeans]: https://bluejeans.com/1771332256/
diff --git a/vendor/src/github.com/opencontainers/specs/ROADMAP.md b/vendor/src/github.com/opencontainers/specs/ROADMAP.md
deleted file mode 100644
index 8941528..0000000
--- a/vendor/src/github.com/opencontainers/specs/ROADMAP.md
+++ /dev/null
@@ -1,96 +0,0 @@
-# OCI Specs Roadmap
-
-This document serves to provide a long term roadmap on our quest to a 1.0 version of the OCI container specification.
-Its goal is to help both maintainers and contributors find meaningful tasks to focus on and create a low noise environment.
-The items in the 1.0 roadmap can be broken down into smaller milestones that are easy to accomplish.
-The topics below are broad and small working groups will be needed for each to define scope and requirements or if the feature is required at all for the OCI level.
-Topics listed in the roadmap do not mean that they will be implemented or added but are areas that need discussion to see if they fit in to the goals of the OCI.
-
-## 1.0
-
-### Digest and Hashing
-
-A bundle is designed to be moved between hosts. 
-Although OCI doesn't define a transport method we should have a cryptographic digest of the on-disk bundle that can be used to verify that a bundle is not corrupted and in an expected configuration.
-
-*Owner:* philips
-
-### Review the need for runtime.json
-
-There are some discussions about having `runtime.json` being optional for containers and specifying defaults.
-Runtimes would use this standard set of defaults for containers and `runtime.json` would provide overrides for fine tuning of these extra host or platform specific settings.
-
-*Owner:*  
-
-### Define Container Lifecycle
-
-Containers have a lifecycle and being able to identify and document the lifecycle of a container is very helpful for implementations of the spec.  
-The lifecycle events of a container also help identify areas to implement hooks that are portable across various implementations and platforms.
-
-*Owner:* mrunalp
-
-### Define Standard Container Actions
-
-Define what type of actions a runtime can perform on a container without imposing hardships on authors of platforms that do not support advanced options.
-
-*Owner:*  
-
-### Clarify rootfs requirement in base spec
-
-Is the rootfs needed or should it just be expected in the bundle without having a field in the spec?
-
-*Owner:*  
-
-### Container Definition
-
-Define what a software container is and its attributes in a cross platform way.
-
-*Owner:*  
-
-### Live Container Updates
-
-Should we allow dynamic container updates to runtime options? 
-
-*Owner:* vishh
-
-### Protobuf Config 
-
-We currently have only one language binding for the spec and that is Go.
-If we change the specs format in the respository to be something like protobuf then the generation for multiple language bindings become effortless.
-
-*Owner:* vbatts
-
-### Validation Tooling
-
-Provide validation tooling for compliance with OCI spec and runtime environment. 
-
-*Owner:* mrunalp
-
-### Version Schema
-
-Decide on a robust versioning schema for the spec as it evolves.
-
-*Owner:*  
-
-### Printable/Compiled Spec
-
-Reguardless of how the spec is written, ensure that it is easy to read and follow for first time users.
-
-*Owner:* vbatts 
-
-### Base Config Compatibility
-
-Ensure that the base configuration format is viable for various platforms.
-
-Systems: 
-
-* Solaris
-* Windows 
-* Linux
-
-*Owner:* 
-
-### Full Lifecycle Hooks
-Ensure that we have lifecycle hooks in the correct places with full coverage over the container lifecycle.
-
-*Owner:*  
diff --git a/vendor/src/github.com/opencontainers/specs/bundle.md b/vendor/src/github.com/opencontainers/specs/bundle.md
deleted file mode 100644
index 8e25255..0000000
--- a/vendor/src/github.com/opencontainers/specs/bundle.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Filesystem Bundle
-
-## Container Format
-
-This section defines a format for encoding a container as a *filesystem bundle* - a set of files organized in a certain way, and containing all the necessary data and metadata for any compliant runtime to perform all standard operations against it.
-See also [OS X application bundles](http://en.wikipedia.org/wiki/Bundle_%28OS_X%29) for a similar use of the term *bundle*.
-
-The definition of a bundle is only concerned with how a container, and its configuration data, are stored on a local file system so that it can be consumed by a compliant runtime.
-
-A Standard Container bundle contains all the information needed to load and run a container.
-This includes the following three artifacts which MUST all reside in the same directory on the local filesystem:
-
-1. `config.json` : contains host independent configuration data.
-This REQUIRED file, which MUST be named `config.json`, contains settings that are host independent and application specific such as security permissions, environment variables and arguments.
-When the bundle is packaged up for distribution, this file MUST be included.
-See [`config.json`](config.md) for more details.
-
-2. `runtime.json` : contains host-specific configuration data.
-This REQUIRED file, which MUST be named `runtime.json`, contains settings that are host specific such as mount sources and hooks.
-The goal is that the bundle can be moved as a unit to another runtime and run the same application once a host-specific `runtime.json` is defined.
-When the bundle is packaged up for distribution, this file MUST NOT be included.
-See [`runtime.json`](runtime-config.md) for more details.
-
-3. A directory representing the root filesystem of the container.
-While the name of this REQUIRED directory may be arbitrary, users should consider using a conventional name, such as `rootfs`.
-When the bundle is packaged up for distribution, this directory MUST be included.
-This directory MUST be referenced from within the `config.json` file.
-
-While these three artifacts MUST all be present in a single directory on the local filesytem, that directory itself is not part of the bundle.
-In other words, a tar archive of a *bundle* will have these artifacts at the root of the archive, not nested within a top-level directory.
diff --git a/vendor/src/github.com/opencontainers/specs/code-of-conduct.md b/vendor/src/github.com/opencontainers/specs/code-of-conduct.md
deleted file mode 100644
index 06cb2b8..0000000
--- a/vendor/src/github.com/opencontainers/specs/code-of-conduct.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# OpenContainers Code of Conduct
-
-Behave as a community member, follow the code of conduct.
-
-## Code of Conduct
-
-The OpenContainers community is made up of a mixture of professionals and volunteers from all over the world.
-
-When we disagree, we try to understand why.
-Disagreements, both social and technical, happen all the time and OpenContainers is no exception.
-It is important that we resolve disagreements and differing views constructively.
-
-This code of conduct applies both within project spaces and in public spaces when an individual is representing the project or its community.
-Participants should be aware of these concerns.
-
-We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information, such as physical or electronic addresses, without explicit permission
-* Other unethical or unprofessional conduct
-
-The OpenContainers team does not condone any statements by speakers contrary to these standards.
-The OpenContainers team reserves the right to deny participation any individual found to be engaging in discriminatory or harassing actions.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project.
-
-## Thanks 
-
-Thanks to the [Fedora Code of Conduct](https://getfedora.org/code-of-conduct) and [Contributor Covenant](http://contributor-covenant.org) for inspiration and ideas.
-
-Portions of this Code of Conduct are adapted from the Contributor Covenant, version 1.2.0, available at http://contributor-covenant.org/version/1/2/0/
diff --git a/vendor/src/github.com/opencontainers/specs/config-linux.md b/vendor/src/github.com/opencontainers/specs/config-linux.md
deleted file mode 100644
index 883daba..0000000
--- a/vendor/src/github.com/opencontainers/specs/config-linux.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# Linux-specific Container Configuration
-
-The Linux container specification uses various kernel features like namespaces, cgroups, capabilities, LSM, and file system jails to fulfill the spec.
-Additional information is needed for Linux over the [default spec configuration](config.md) in order to configure these various kernel features.
-
-## Capabilities
-
-Capabilities is an array that specifies Linux capabilities that can be provided to the process inside the container.
-Valid values are the strings for capabilities defined in [the man page](http://man7.org/linux/man-pages/man7/capabilities.7.html)
-
-```json
-   "capabilities": [
-        "CAP_AUDIT_WRITE",
-        "CAP_KILL",
-        "CAP_NET_BIND_SERVICE"
-    ]
-```
-
-## User namespace mappings
-
-```json
-    "uidMappings": [
-        {
-            "hostID": 1000,
-            "containerID": 0,
-            "size": 10
-        }
-    ],
-    "gidMappings": [
-        {
-            "hostID": 1000,
-            "containerID": 0,
-            "size": 10
-        }
-    ]
-```
-
-uid/gid mappings describe the user namespace mappings from the host to the container.
-The mappings represent how the bundle `rootfs` expects the user namespace to be setup and the runtime SHOULD NOT modify the permissions on the rootfs to realize the mapping.
-*hostID* is the starting uid/gid on the host to be mapped to *containerID* which is the starting uid/gid in the container and *size* refers to the number of ids to be mapped.
-There is a limit of 5 mappings which is the Linux kernel hard limit.
-
-## Default Devices and File Systems
-
-The Linux ABI includes both syscalls and several special file paths.
-Applications expecting a Linux environment will very likely expect these files paths to be setup correctly.
-
-The following devices and filesystems MUST be made available in each application's filesystem
-
-|     Path     |  Type  |  Notes  |
-| ------------ | ------ | ------- |
-| /proc        | [procfs](https://www.kernel.org/doc/Documentation/filesystems/proc.txt)    | |
-| /sys         | [sysfs](https://www.kernel.org/doc/Documentation/filesystems/sysfs.txt)    | |
-| /dev/null    | [device](http://man7.org/linux/man-pages/man4/null.4.html)                 | |
-| /dev/zero    | [device](http://man7.org/linux/man-pages/man4/zero.4.html)                 | |
-| /dev/full    | [device](http://man7.org/linux/man-pages/man4/full.4.html)                 | |
-| /dev/random  | [device](http://man7.org/linux/man-pages/man4/random.4.html)               | |
-| /dev/urandom | [device](http://man7.org/linux/man-pages/man4/random.4.html)               | |
-| /dev/tty     | [device](http://man7.org/linux/man-pages/man4/tty.4.html)                  | |
-| /dev/console | [device](http://man7.org/linux/man-pages/man4/console.4.html)              | |
-| /dev/pts     | [devpts](https://www.kernel.org/doc/Documentation/filesystems/devpts.txt)  | |
-| /dev/ptmx    | [device](https://www.kernel.org/doc/Documentation/filesystems/devpts.txt)  | Bind-mount or symlink of /dev/pts/ptmx |
-| /dev/shm     | [tmpfs](https://www.kernel.org/doc/Documentation/filesystems/tmpfs.txt)    | |
diff --git a/vendor/src/github.com/opencontainers/specs/config.go b/vendor/src/github.com/opencontainers/specs/config.go
deleted file mode 100644
index de2aa5a..0000000
--- a/vendor/src/github.com/opencontainers/specs/config.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package specs
-
-// Spec is the base configuration for the container.  It specifies platform
-// independent configuration. This information must be included when the
-// bundle is packaged for distribution.
-type Spec struct {
-	// Version is the version of the specification that is supported.
-	Version string `json:"version"`
-	// Platform is the host information for OS and Arch.
-	Platform Platform `json:"platform"`
-	// Process is the container's main process.
-	Process Process `json:"process"`
-	// Root is the root information for the container's filesystem.
-	Root Root `json:"root"`
-	// Hostname is the container's host name.
-	Hostname string `json:"hostname"`
-	// Mounts profile configuration for adding mounts to the container's filesystem.
-	Mounts []MountPoint `json:"mounts"`
-}
-
-// Process contains information to start a specific application inside the container.
-type Process struct {
-	// Terminal creates an interactive terminal for the container.
-	Terminal bool `json:"terminal"`
-	// User specifies user information for the process.
-	User User `json:"user"`
-	// Args specifies the binary and arguments for the application to execute.
-	Args []string `json:"args"`
-	// Env populates the process environment for the process.
-	Env []string `json:"env"`
-	// Cwd is the current working directory for the process and must be
-	// relative to the container's root.
-	Cwd string `json:"cwd"`
-}
-
-// Root contains information about the container's root filesystem on the host.
-type Root struct {
-	// Path is the absolute path to the container's root filesystem.
-	Path string `json:"path"`
-	// Readonly makes the root filesystem for the container readonly before the process is executed.
-	Readonly bool `json:"readonly"`
-}
-
-// Platform specifies OS and arch information for the host system that the container
-// is created for.
-type Platform struct {
-	// OS is the operating system.
-	OS string `json:"os"`
-	// Arch is the architecture
-	Arch string `json:"arch"`
-}
-
-// MountPoint describes a directory that may be fullfilled by a mount in the runtime.json.
-type MountPoint struct {
-	// Name is a unique descriptive identifier for this mount point.
-	Name string `json:"name"`
-	// Path specifies the path of the mount. The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point.
-	Path string `json:"path"`
-}
diff --git a/vendor/src/github.com/opencontainers/specs/config.md b/vendor/src/github.com/opencontainers/specs/config.md
deleted file mode 100644
index 4e98d29..0000000
--- a/vendor/src/github.com/opencontainers/specs/config.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# Container Configuration file
-
-The container's top-level directory MUST contain a configuration file called `config.json`.
-For now the canonical schema is defined in [config.go](config.go) and [config_linux.go](config_linux.go), but this will be moved to a formal JSON schema over time.
-
-The configuration file contains metadata necessary to implement standard operations against the container.
-This includes the process to run, environment variables to inject, sandboxing features to use, etc.
-
-Below is a detailed description of each field defined in the configuration format.
-
-## Manifest version
-
-* **`version`** (string, required) must be in [SemVer v2.0.0](http://semver.org/spec/v2.0.0.html) format and specifies the version of the OCF specification with which the container bundle complies. The Open Container spec follows semantic versioning and retains forward and backward compatibility within major versions. For example, if an implementation is compliant with version 1.0.1 of the spec, it is compatible with the complete 1.x series.
-
-*Example*
-
-```json
-    "version": "0.1.0"
-```
-
-## Root Configuration
-
-Each container has exactly one *root filesystem*, specified in the *root* object:
-
-* **`path`** (string, required) Specifies the path to the root filesystem for the container, relative to the path where the manifest is. A directory MUST exist at the relative path declared by the field.
-* **`readonly`** (bool, optional) If true then the root filesystem MUST be read-only inside the container. Defaults to false.
-
-*Example*
-
-```json
-"root": {
-    "path": "rootfs",
-    "readonly": true
-}
-```
-
-## Mount Points
-
-You can add array of mount points inside container as `mounts`.
-Each record in this array must have configuration in [runtime config](runtime-config.md#mount-configuration).
-The runtime MUST mount entries in the listed order.
-
-* **`name`** (string, required) Name of mount point. Used for config lookup.
-* **`path`** (string, required) Destination of mount point: path inside container.
-
-*Example*
-
-```json
-"mounts": [
-    {
-        "name": "proc",
-        "path": "/proc"
-    },
-    {
-        "name": "dev",
-        "path": "/dev"
-    },
-    {
-        "name": "devpts",
-        "path": "/dev/pts"
-    },
-    {
-        "name": "data",
-        "path": "/data"
-    }
-]
-```
-
-## Process configuration
-
-* **`terminal`** (bool, optional) specifies whether you want a terminal attached to that process. Defaults to false.
-* **`cwd`** (string, optional) is the working directory that will be set for the executable.
-* **`env`** (array of strings, optional) contains a list of variables that will be set in the process's environment prior to execution. Elements in the array are specified as Strings in the form "KEY=value". The left hand side must consist solely of letters, digits, and underscores `_` as outlined in [IEEE Std 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html).
-* **`args`** (string, required) executable to launch and any flags as an array. The executable is the first element and must be available at the given path inside of the rootfs. If the executable path is not an absolute path then the search $PATH is interpreted to find the executable.
-
-The user for the process is a platform-specific structure that allows specific control over which user the process runs as.
-For Linux-based systems the user structure has the following fields:
-
-* **`uid`** (int, required) specifies the user id.
-* **`gid`** (int, required) specifies the group id.
-* **`additionalGids`** (array of ints, optional) specifies additional group ids to be added to the process.
-
-*Example (Linux)*
-
-```json
-"process": {
-    "terminal": true,
-    "user": {
-        "uid": 1,
-        "gid": 1,
-        "additionalGids": [5, 6]
-    },
-    "env": [
-        "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-        "TERM=xterm"
-    ],
-    "cwd": "/root",
-    "args": [
-        "sh"
-    ]
-}
-```
-
-
-## Hostname
-
-* **`hostname`** (string, optional) as it is accessible to processes running inside.
-
-*Example*
-
-```json
-"hostname": "mrsdalloway"
-```
-
-## Platform-specific configuration
-
-* **`os`** (string, required) specifies the operating system family this image must run on. Values for os must be in the list specified by the Go Language document for [`$GOOS`](https://golang.org/doc/install/source#environment).
-* **`arch`** (string, required) specifies the instruction set for which the binaries in the image have been compiled. Values for arch must be in the list specified by the Go Language document for [`$GOARCH`](https://golang.org/doc/install/source#environment).
-
-```json
-"platform": {
-    "os": "linux",
-    "arch": "amd64"
-}
-```
-
-Interpretation of the platform section of the JSON file is used to find which platform-specific sections may be available in the document.
-For example, if `os` is set to `linux`, then a JSON object conforming to the [Linux-specific schema](config-linux.md) SHOULD be found at the key `linux` in the `config.json`.
diff --git a/vendor/src/github.com/opencontainers/specs/config_linux.go b/vendor/src/github.com/opencontainers/specs/config_linux.go
deleted file mode 100644
index b991553..0000000
--- a/vendor/src/github.com/opencontainers/specs/config_linux.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package specs
-
-// LinuxSpec is the full specification for linux containers.
-type LinuxSpec struct {
-	Spec
-	// Linux is platform specific configuration for linux based containers.
-	Linux Linux `json:"linux"`
-}
-
-// Linux contains platform specific configuration for linux based containers.
-type Linux struct {
-	// Capabilities are linux capabilities that are kept for the container.
-	Capabilities []string `json:"capabilities"`
-}
-
-// User specifies linux specific user and group information for the container's
-// main process.
-type User struct {
-	// UID is the user id.
-	UID uint32 `json:"uid"`
-	// GID is the group id.
-	GID uint32 `json:"gid"`
-	// AdditionalGids are additional group ids set for the container's process.
-	AdditionalGids []uint32 `json:"additionalGids"`
-}
diff --git a/vendor/src/github.com/opencontainers/specs/implementations.md b/vendor/src/github.com/opencontainers/specs/implementations.md
deleted file mode 100644
index 5cd16b9..0000000
--- a/vendor/src/github.com/opencontainers/specs/implementations.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Implementations
-
-The following sections link to associated projects, some of which are maintained by the OCI and some of which are maintained by external organizations.
-If you know of any associated projects that are not listed here, please file a pull request adding a link to that project.
-
-## Runtime (Container)
-
-* [opencontainers/runc](https://github.com/opencontainers/runc) - Reference implementation of OCI runtime
-
-## Runtime (Virtual Machine)
-
-* [hyperhq/runv](https://github.com/hyperhq/runv) - Hypervisor-based runtime for OCI
-
-## Bundle authoring
-
-* [kunalkushwaha/octool](https://github.com/kunalkushwaha/octool) - A config linter and validator.
-* [mrunalp/ocitools](https://github.com/mrunalp/ocitools) - A config generator.
-
-## Testing
-
-* [huawei-openlab/oct](https://github.com/huawei-openlab/oct) - Open Container Testing framework for OCI configuration and runtime
diff --git a/vendor/src/github.com/opencontainers/specs/principles.md b/vendor/src/github.com/opencontainers/specs/principles.md
deleted file mode 100644
index 5dbab16..0000000
--- a/vendor/src/github.com/opencontainers/specs/principles.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# The 5 principles of Standard Containers
-
-Define a unit of software delivery called a Standard Container.
-The goal of a Standard Container is to encapsulate a software component and all its dependencies in a format that is self-describing and portable, so that any compliant runtime can run it without extra dependencies, regardless of the underlying machine and the contents of the container.
-
-The specification for Standard Containers defines:
-
-1. configuration file formats
-2. a set of standard operations
-3. an execution environment.
-
-A great analogy for this is the physical shipping container used by the transportation industry.
-Shipping containers are a fundamental unit of delivery, they can be lifted, stacked, locked, loaded, unloaded and labelled.
-Irrespective of their contents, by standardizing the container itself it allowed for a consistent, more streamlined and efficient set of processes to be defined.
-For software Standard Containers offer similar functionality by being the fundamental, standardized, unit of delivery for a software package.
-
-## 1. Standard operations
-
-Standard Containers define a set of STANDARD OPERATIONS.
-They can be created, started, and stopped using standard container tools; copied and snapshotted using standard filesystem tools; and downloaded and uploaded using standard network tools.
-
-## 2. Content-agnostic
-
-Standard Containers are CONTENT-AGNOSTIC: all standard operations have the same effect regardless of the contents.
-They are started in the same way whether they contain a postgres database, a php application with its dependencies and application server, or Java build artifacts.
-
-## 3. Infrastructure-agnostic
-
-Standard Containers are INFRASTRUCTURE-AGNOSTIC: they can be run in any OCI supported infrastructure.
-For example, a standard container can be bundled on a laptop, uploaded to cloud storage, downloaded, run and snapshotted by a build server at a fiber hotel in Virginia, uploaded to 10 staging servers in a home-made private cloud cluster, then sent to 30 production instances across 3 public cloud regions.
-
-## 4. Designed for automation
-
-Standard Containers are DESIGNED FOR AUTOMATION: because they offer the same standard operations regardless of content and infrastructure, Standard Containers, are extremely well-suited for automation.
-In fact, you could say automation is their secret weapon.
-
-Many things that once required time-consuming and error-prone human effort can now be programmed.
-Before Standard Containers, by the time a software component ran in production, it had been individually built, configured, bundled, documented, patched, vendored, templated, tweaked and instrumented by 10 different people on 10 different computers.
-Builds failed, libraries conflicted, mirrors crashed, post-it notes were lost, logs were misplaced, cluster updates were half-broken.
-The process was slow, inefficient and cost a fortune - and was entirely different depending on the language and infrastructure provider.
-
-## 5. Industrial-grade delivery
-
-Standard Containers make INDUSTRIAL-GRADE DELIVERY of software a reality.
-Leveraging all of the properties listed above, Standard Containers are enabling large and small enterprises to streamline and automate their software delivery pipelines.
-Whether it is in-house devOps flows, or external customer-based software delivery mechanisms, Standard Containers are changing the way the community thinks about software packaging and delivery.
diff --git a/vendor/src/github.com/opencontainers/specs/runtime-config-linux.md b/vendor/src/github.com/opencontainers/specs/runtime-config-linux.md
deleted file mode 100644
index 1fff724..0000000
--- a/vendor/src/github.com/opencontainers/specs/runtime-config-linux.md
+++ /dev/null
@@ -1,502 +0,0 @@
-# Linux-specific Runtime Configuration
-
-## Namespaces
-
-A namespace wraps a global system resource in an abstraction that makes it appear to the processes within the namespace that they have their own isolated instance of the global resource.
-Changes to the global resource are visible to other processes that are members of the namespace, but are invisible to other processes.
-For more information, see [the man page](http://man7.org/linux/man-pages/man7/namespaces.7.html).
-
-Namespaces are specified as an array of entries inside the `namespaces` root field.
-The following parameters can be specified to setup namespaces:
-
-* **`type`** *(string, required)* - namespace type. The following namespaces types are supported:
-    * **`pid`** processes inside the container will only be able to see other processes inside the same container
-    * **`network`** the container will have its own network stack
-    * **`mount`** the container will have an isolated mount table
-    * **`ipc`** processes inside the container will only be able to communicate to other processes inside the same container via system level IPC
-    * **`uts`** the container will be able to have its own hostname and domain name
-    * **`user`** the container will be able to remap user and group IDs from the host to local users and groups within the container
-
-* **`path`** *(string, optional)* - path to namespace file
-
-If a path is specified, that particular file is used to join that type of namespace.
-Also, when a path is specified, a runtime MUST assume that the setup for that particular namespace has already been done and error out if the config specifies anything else related to that namespace.
-
-###### Example
-
-```json
-    "namespaces": [
-        {
-            "type": "pid",
-            "path": "/proc/1234/ns/pid"
-        },
-        {
-            "type": "network",
-            "path": "/var/run/netns/neta"
-        },
-        {
-            "type": "mount"
-        },
-        {
-            "type": "ipc"
-        },
-        {
-            "type": "uts"
-        },
-        {
-            "type": "user"
-        }
-    ]
-```
-
-## Devices
-
-`devices` is an array specifying the list of devices to be created in the container.
-
-The following parameters can be specified:
-
-* **`type`** *(char, required)* - type of device: `c`, `b`, `u` or `p`. More info in `man mknod`.
-
-* **`path`** *(string, optional)* - full path to device inside container
-
-* **`major, minor`** *(int64, required)* - major, minor numbers for device. More info in `man mknod`. There is a special value: `-1`, which means `*` for `device` cgroup setup.
-
-* **`permissions`** *(string, optional)* - cgroup permissions for device. A composition of `r` (*read*), `w` (*write*), and `m` (*mknod*).
-
-* **`fileMode`** *(uint32, optional)* - file mode for device file
-
-* **`uid`** *(uint32, optional)* - uid of device owner
-
-* **`gid`** *(uint32, optional)* - gid of device owner
-
-**`fileMode`**, **`uid`** and **`gid`** are required if **`path`** is given and are otherwise not allowed.
-
-###### Example
-
-```json
-   "devices": [
-        {
-            "path": "/dev/random",
-            "type": "c",
-            "major": 1,
-            "minor": 8,
-            "permissions": "rwm",
-            "fileMode": 0666,
-            "uid": 0,
-            "gid": 0
-        },
-        {
-            "path": "/dev/urandom",
-            "type": "c",
-            "major": 1,
-            "minor": 9,
-            "permissions": "rwm",
-            "fileMode": 0666,
-            "uid": 0,
-            "gid": 0
-        },
-        {
-            "path": "/dev/null",
-            "type": "c",
-            "major": 1,
-            "minor": 3,
-            "permissions": "rwm",
-            "fileMode": 0666,
-            "uid": 0,
-            "gid": 0
-        },
-        {
-            "path": "/dev/zero",
-            "type": "c",
-            "major": 1,
-            "minor": 5,
-            "permissions": "rwm",
-            "fileMode": 0666,
-            "uid": 0,
-            "gid": 0
-        },
-        {
-            "path": "/dev/tty",
-            "type": "c",
-            "major": 5,
-            "minor": 0,
-            "permissions": "rwm",
-            "fileMode": 0666,
-            "uid": 0,
-            "gid": 0
-        },
-        {
-            "path": "/dev/full",
-            "type": "c",
-            "major": 1,
-            "minor": 7,
-            "permissions": "rwm",
-            "fileMode": 0666,
-            "uid": 0,
-            "gid": 0
-        }
-    ]
-```
-
-## Control groups
-
-Also known as cgroups, they are used to restrict resource usage for a container and handle device access.
-cgroups provide controls to restrict cpu, memory, IO, pids and network for the container.
-For more information, see the [kernel cgroups documentation](https://www.kernel.org/doc/Documentation/cgroups/cgroups.txt).
-
-The path to the cgroups can be specified in the Spec via `cgroupsPath`.
-`cgroupsPath` is expected to be relative to the cgroups mount point.
-If not specified, cgroups will be created under '/'.
-Implementations of the Spec can choose to name cgroups in any manner.
-The Spec does not include naming schema for cgroups.
-The Spec does not support [split hierarchy](https://www.kernel.org/doc/Documentation/cgroups/unified-hierarchy.txt).
-The cgroups will be created if they don't exist.
-
-```json
-   "cgroupsPath": "/myRuntime/myContainer"
-```
-
-`cgroupsPath` can be used to either control the cgroups hierarchy for containers or to run a new process in an existing container.
-
-You can configure a container's cgroups via the `resources` field of the Linux configuration.
-Do not specify `resources` unless limits have to be updated.
-For example, to run a new process in an existing container without updating limits, `resources` need not be specified.
-
-#### Disable out-of-memory killer
-
-`disableOOMKiller` contains a boolean (`true` or `false`) that enables or disables the Out of Memory killer for a cgroup.
-If enabled (`false`), tasks that attempt to consume more memory than they are allowed are immediately killed by the OOM killer.
-The OOM killer is enabled by default in every cgroup using the `memory` subsystem.
-To disable it, specify a value of `true`.
-For more information, see [the memory cgroup man page](https://www.kernel.org/doc/Documentation/cgroups/memory.txt).
-
-* **`disableOOMKiller`** *(bool, optional)* - enables or disables the OOM killer
-
-###### Example
-
-```json
-    "disableOOMKiller": false
-```
-
-#### Set oom_score_adj
-
-More information on `oom_score_adj` available [here](https://www.kernel.org/doc/Documentation/filesystems/proc.txt).
-
-```json
-    "oomScoreAdj": 0
-```
-
-#### Memory
-
-`memory` represents the cgroup subsystem `memory` and it's used to set limits on the container's memory usage.
-For more information, see [the memory cgroup man page](https://www.kernel.org/doc/Documentation/cgroups/memory.txt).
-
-The following parameters can be specified to setup the controller:
-
-* **`limit`** *(uint64, optional)* - sets limit of memory usage
-
-* **`reservation`** *(uint64, optional)* - sets soft limit of memory usage
-
-* **`swap`** *(uint64, optional)* - sets limit of memory+Swap usage
-
-* **`kernel`** *(uint64, optional)* - sets hard limit for kernel memory
-
-* **`swappiness`** *(uint64, optional)* - sets swappiness parameter of vmscan (See sysctl's vm.swappiness)
-
-###### Example
-
-```json
-    "memory": {
-        "limit": 0,
-        "reservation": 0,
-        "swap": 0,
-        "kernel": 0,
-        "swappiness": -1
-    }
-```
-
-#### CPU
-
-`cpu` represents the cgroup subsystems `cpu` and `cpusets`.
-For more information, see [the cpusets cgroup man page](https://www.kernel.org/doc/Documentation/cgroups/cpusets.txt).
-
-The following parameters can be specified to setup the controller:
-
-* **`shares`** *(uint64, optional)* - specifies a relative share of CPU time available to the tasks in a cgroup
-
-* **`quota`** *(uint64, optional)* - specifies the total amount of time in microseconds for which all tasks in a cgroup can run during one period (as defined by **`period`** below)
-
-* **`period`** *(uint64, optional)* - specifies a period of time in microseconds for how regularly a cgroup's access to CPU resources should be reallocated (CFS scheduler only)
-
-* **`realtimeRuntime`** *(uint64, optional)* - specifies a period of time in microseconds for the longest continuous period in which the tasks in a cgroup have access to CPU resources
-
-* **`realtimePeriod`** *(uint64, optional)* - same as **`period`** but applies to realtime scheduler only
-
-* **`cpus`** *(cpus, optional)* - list of CPUs the container will run in
-
-* **`mems`** *(mems, optional)* - list of Memory Nodes the container will run in
-
-###### Example
-
-```json
-    "cpu": {
-        "shares": 0,
-        "quota": 0,
-        "period": 0,
-        "realtimeRuntime": 0,
-        "realtimePeriod": 0,
-        "cpus": "",
-        "mems": ""
-    }
-```
-
-#### Block IO Controller
-
-`blockIO` represents the cgroup subsystem `blkio` which implements the block io controller.
-For more information, see [the kernel cgroups documentation about blkio](https://www.kernel.org/doc/Documentation/cgroups/blkio-controller.txt).
-
-The following parameters can be specified to setup the controller:
-
-* **`blkioWeight`** *(uint16, optional)* - specifies per-cgroup weight. This is default weight of the group on all devices until and unless overridden by per-device rules. The range is from 10 to 1000.
-
-* **`blkioLeafWeight`** *(uint16, optional)* - equivalents of `blkioWeight` for the purpose of deciding how much weight tasks in the given cgroup has while competing with the cgroup's child cgroups. The range is from 10 to 1000.
-
-* **`blkioWeightDevice`** *(array, optional)* - specifies the list of devices which will be bandwidth rate limited. The following parameters can be specified per-device:
-    * **`major, minor`** *(int64, required)* - major, minor numbers for device. More info in `man mknod`.
-    * **`weight`** *(uint16, optional)* - bandwidth rate for the device, range is from 10 to 1000
-    * **`leafWeight`** *(uint16, optional)* - bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
-
-    You must specify at least one of `weight` or `leafWeight` in a given entry, and can specify both.
-
-* **`blkioThrottleReadBpsDevice`**, **`blkioThrottleWriteBpsDevice`**, **`blkioThrottleReadIOPSDevice`**, **`blkioThrottleWriteIOPSDevice`** *(array, optional)* - specify the list of devices which will be IO rate limited. The following parameters can be specified per-device:
-    * **`major, minor`** *(int64, required)* - major, minor numbers for device. More info in `man mknod`.
-    * **`rate`** *(uint64, required)* - IO rate limit for the device
-
-###### Example
-
-```json
-    "blockIO": {
-        "blkioWeight": 0,
-        "blkioLeafWeight": 0,
-        "blkioWeightDevice": [
-            {
-                "major": 8,
-                "minor": 0,
-                "weight": 500,
-                "leafWeight": 300
-            },
-            {
-                "major": 8,
-                "minor": 16,
-                "weight": 500
-            }
-        ],
-        "blkioThrottleReadBpsDevice": [
-            {
-                "major": 8,
-                "minor": 0,
-                "rate": 600
-            }
-        ],
-        "blkioThrottleWriteIOPSDevice": [
-            {
-                "major": 8,
-                "minor": 16,
-                "rate": 300
-            }
-        ]
-    }
-```
-
-#### Huge page limits
-
-`hugepageLimits` represents the `hugetlb` controller which allows to limit the
-HugeTLB usage per control group and enforces the controller limit during page fault.
-For more information, see the [kernel cgroups documentation about HugeTLB](https://www.kernel.org/doc/Documentation/cgroups/hugetlb.txt).
-
-`hugepageLimits` is an array of entries, each having the following structure:
-
-* **`pageSize`** *(string, required)* - hugepage size
-
-* **`limit`** *(uint64, required)* - limit in bytes of *hugepagesize* HugeTLB usage
-
-###### Example
-
-```json
-   "hugepageLimits": [
-        {
-            "pageSize": "2MB",
-            "limit": 9223372036854771712
-        }
-   ]
-```
-
-#### Network
-
-`network` represents the cgroup subsystems `net_cls` and `net_prio`.
-For more information, see [the net\_cls cgroup man page](https://www.kernel.org/doc/Documentation/cgroups/net_cls.txt) and [the net\_prio cgroup man page](https://www.kernel.org/doc/Documentation/cgroups/net_prio.txt).
-
-The following parameters can be specified to setup these cgroup controllers:
-
-* **`classID`** *(string, optional)* - is the network class identifier the cgroup's network packets will be tagged with
-
-* **`priorities`** *(array, optional)* - specifies a list of objects of the priorities assigned to traffic originating from
-processes in the group and egressing the system on various interfaces. The following parameters can be specified per-priority:
-    * **`name`** *(string, required)* - interface name
-    * **`priority`** *(uint32, required)* - priority applied to the interface
-
-###### Example
-
-```json
-   "network": {
-        "classID": "0x100001",
-        "priorities": [
-            {
-                "name": "eth0",
-                "priority": 500
-            },
-            {
-                "name": "eth1",
-                "priority": 1000
-            }
-        ]
-   }
-```
-
-#### PIDs
-
-`pids` represents the cgroup subsystem `pids`.
-For more information, see [the pids cgroup man page](https://www.kernel.org/doc/Documentation/cgroups/pids.txt
-).
-
-The following paramters can be specified to setup the controller:
-
-* **`limit`** *(int64, required)* - specifies the maximum number of tasks in the cgroup
-
-###### Example
-
-```json
-   "pids": {
-        "limit": 32771
-   }
-```
-
-## Sysctl
-
-sysctl allows kernel parameters to be modified at runtime for the container.
-For more information, see [the man page](http://man7.org/linux/man-pages/man8/sysctl.8.html)
-
-###### Example
-
-```json
-   "sysctl": {
-        "net.ipv4.ip_forward": "1",
-        "net.core.somaxconn": "256"
-   }
-```
-
-## Rlimits
-
-rlimits allow setting resource limits.
-`type` is a string with a value from those defined in [the man page](http://man7.org/linux/man-pages/man2/setrlimit.2.html).
-The kernel enforces the `soft` limit for a resource while the `hard` limit acts as a ceiling for that value that could be set by an unprivileged process.
-
-###### Example
-
-```json
-   "rlimits": [
-        {
-            "type": "RLIMIT_NPROC",
-            "soft": 1024,
-            "hard": 102400
-        }
-   ]
-```
-
-## SELinux process label
-
-SELinux process label specifies the label with which the processes in a container are run.
-For more information about SELinux, see  [Selinux documentation](http://selinuxproject.org/page/Main_Page)
-
-###### Example
-
-```json
-   "selinuxProcessLabel": "system_u:system_r:svirt_lxc_net_t:s0:c124,c675"
-```
-
-## Apparmor profile
-
-Apparmor profile specifies the name of the apparmor profile that will be used for the container.
-For more information about Apparmor, see [Apparmor documentation](https://wiki.ubuntu.com/AppArmor)
-
-###### Example
-
-```json
-   "apparmorProfile": "acme_secure_profile"
-```
-
-## seccomp
-
-Seccomp provides application sandboxing mechanism in the Linux kernel.
-Seccomp configuration allows one to configure actions to take for matched syscalls and furthermore also allows matching on values passed as arguments to syscalls.
-For more information about Seccomp, see [Seccomp kernel documentation](https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt)
-The actions, architectures, and operators are strings that match the definitions in seccomp.h from [libseccomp](https://github.com/seccomp/libseccomp) and are translated to corresponding values.
-A valid list of constants as of Libseccomp v2.2.3 is contained below.
-
-Architecture Constants
-* `SCMP_ARCH_X86`
-* `SCMP_ARCH_X86_64`
-* `SCMP_ARCH_X32`
-* `SCMP_ARCH_ARM`
-* `SCMP_ARCH_AARCH64`
-* `SCMP_ARCH_MIPS`
-* `SCMP_ARCH_MIPS64`
-* `SCMP_ARCH_MIPS64N32`
-* `SCMP_ARCH_MIPSEL`
-* `SCMP_ARCH_MIPSEL64`
-* `SCMP_ARCH_MIPSEL64N32`
-
-Action Constants:
-* `SCMP_ACT_KILL`
-* `SCMP_ACT_TRAP`
-* `SCMP_ACT_ERRNO`
-* `SCMP_ACT_TRACE`
-* `SCMP_ACT_ALLOW`
-
-Operator Constants:
-* `SCMP_CMP_NE`
-* `SCMP_CMP_LT`
-* `SCMP_CMP_LE`
-* `SCMP_CMP_EQ`
-* `SCMP_CMP_GE`
-* `SCMP_CMP_GT`
-* `SCMP_CMP_MASKED_EQ`
-
-###### Example
-
-```json
-   "seccomp": {
-       "defaultAction": "SCMP_ACT_ALLOW",
-       "architectures": [
-           "SCMP_ARCH_X86"
-       ],
-       "syscalls": [
-           {
-               "name": "getcwd",
-               "action": "SCMP_ACT_ERRNO"
-           }
-       ]
-   }
-```
-
-## Rootfs Mount Propagation
-
-rootfsPropagation sets the rootfs's mount propagation.
-Its value is either slave, private, or shared.
-[The kernel doc](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) has more information about mount propagation.
-
-###### Example
-
-```json
-    "rootfsPropagation": "slave",
-```
diff --git a/vendor/src/github.com/opencontainers/specs/runtime-config.md b/vendor/src/github.com/opencontainers/specs/runtime-config.md
deleted file mode 100644
index 3313a0a..0000000
--- a/vendor/src/github.com/opencontainers/specs/runtime-config.md
+++ /dev/null
@@ -1,121 +0,0 @@
-# Runtime Configuration
-
-## Hooks
-
-Lifecycle hooks allow custom events for different points in a container's runtime.
-Presently there are `Prestart`, `Poststart` and `Poststop`.
-
-* [`Prestart`](#prestart) is a list of hooks to be run before the container process is executed
-* [`Poststart`](#poststart) is a list of hooks to be run immediately after the container process is started
-* [`Poststop`](#poststop) is a list of hooks to be run after the container process exits
-
-Hooks allow one to run code before/after various lifecycle events of the container.
-Hooks MUST be called in the listed order.
-The state of the container is passed to the hooks over stdin, so the hooks could get the information they need to do their work.
-
-Hook paths are absolute and are executed from the host's filesystem.
-
-### Prestart
-
-The pre-start hooks are called after the container process is spawned, but before the user supplied command is executed.
-They are called after the container namespaces are created on Linux, so they provide an opportunity to customize the container.
-In Linux, for e.g., the network namespace could be configured in this hook.
-
-If a hook returns a non-zero exit code, then an error including the exit code and the stderr is returned to the caller and the container is torn down.
-
-### Poststart
-
-The post-start hooks are called after the user process is started.
-For example this hook can notify user that real process is spawned.
-
-If a hook returns a non-zero exit code, then an error is logged and the remaining hooks are executed.
-
-### Poststop
-
-The post-stop hooks are called after the container process is stopped.
-Cleanup or debugging could be performed in such a hook.
-If a hook returns a non-zero exit code, then an error is logged and the remaining hooks are executed.
-
-*Example*
-
-```json
-    "hooks" : {
-        "prestart": [
-            {
-                "path": "/usr/bin/fix-mounts",
-                "args": ["arg1", "arg2"],
-                "env":  [ "key1=value1"]
-            },
-            {
-                "path": "/usr/bin/setup-network"
-            }
-        ],
-        "poststart": [
-            {
-                "path": "/usr/bin/notify-start"
-            }
-        ],
-        "poststop": [
-            {
-                "path": "/usr/sbin/cleanup.sh",
-                "args": ["-f"]
-            }
-        ]
-    }
-```
-
-`path` is required for a hook.
-`args` and `env` are optional.
-
-## Mount Configuration
-
-Additional filesystems can be declared as "mounts", specified in the *mounts* object.
-Keys in this object are names of mount points from portable config.
-Values are objects with configuration of mount points.
-The parameters are similar to the ones in [the Linux mount system call](http://man7.org/linux/man-pages/man2/mount.2.html).
-Only [mounts from the portable config](config.md#mount-points) will be mounted.
-
-* **`type`** (string, required) Linux, *filesystemtype* argument supported by the kernel are listed in */proc/filesystems* (e.g., "minix", "ext2", "ext3", "jfs", "xfs", "reiserfs", "msdos", "proc", "nfs", "iso9660"). Windows: ntfs
-* **`source`** (string, required) a device name, but can also be a directory name or a dummy. Windows, the volume name that is the target of the mount point. \\?\Volume\{GUID}\ (on Windows source is called target)
-* **`options`** (list of strings, optional) in the fstab format [https://wiki.archlinux.org/index.php/Fstab](https://wiki.archlinux.org/index.php/Fstab).
-
-*Example (Linux)*
-
-```json
-"mounts": {
-    "proc": {
-        "type": "proc",
-        "source": "proc",
-        "options": []
-    },
-    "dev": {
-        "type": "tmpfs",
-        "source": "tmpfs",
-        "options": ["nosuid","strictatime","mode=755","size=65536k"]
-    },
-    "devpts": {
-        "type": "devpts",
-        "source": "devpts",
-        "options": ["nosuid","noexec","newinstance","ptmxmode=0666","mode=0620","gid=5"]
-    },
-    "data": {
-        "type": "bind",
-        "source": "/volumes/testing",
-        "options": ["rbind","rw"]
-    }
-}
-```
-
-*Example (Windows)*
-
-```json
-"mounts": {
-    "myfancymountpoint": {
-        "type": "ntfs",
-        "source": "\\\\?\\Volume\\{2eca078d-5cbc-43d3-aff8-7e8511f60d0e}\\",
-        "options": []
-    }
-}
-```
-
-See links for details about [mountvol](http://ss64.com/nt/mountvol.html) and [SetVolumeMountPoint](https://msdn.microsoft.com/en-us/library/windows/desktop/aa365561(v=vs.85).aspx) in Windows.
diff --git a/vendor/src/github.com/opencontainers/specs/runtime-linux.md b/vendor/src/github.com/opencontainers/specs/runtime-linux.md
deleted file mode 100644
index 36277a1..0000000
--- a/vendor/src/github.com/opencontainers/specs/runtime-linux.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Linux Runtime
-
-## File descriptors
-By default, only the `stdin`, `stdout` and `stderr` file descriptors are kept open for the application by the runtime.
-
-The runtime may pass additional file descriptors to the application to support features such as [socket activation](http://0pointer.de/blog/projects/socket-activated-containers.html).
-
-Some of the file descriptors may be redirected to `/dev/null` even though they are open.
diff --git a/vendor/src/github.com/opencontainers/specs/runtime.md b/vendor/src/github.com/opencontainers/specs/runtime.md
deleted file mode 100644
index fdaf965..0000000
--- a/vendor/src/github.com/opencontainers/specs/runtime.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# Runtime and Lifecycle
-
-## State
-
-Runtime MUST store container metadata on disk so that external tools can consume and act on this information.
-It is recommended that this data be stored in a temporary filesystem so that it can be removed on a system reboot.
-On Linux/Unix based systems the metadata MUST be stored under `/run/opencontainer/containers`.
-For non-Linux/Unix based systems the location of the root metadata directory is currently undefined.
-Within that directory there MUST be one directory for each container created, where the name of the directory MUST be the ID of the container.
-For example: for a Linux container with an ID of `173975398351`, there will be a corresponding directory: `/run/opencontainer/containers/173975398351`.
-Within each container's directory, there MUST be a JSON encoded file called `state.json` that contains the runtime state of the container.
-For example: `/run/opencontainer/containers/173975398351/state.json`.
-
-The `state.json` file MUST contain all of the following properties:
-
-* **`version`**: (string) is the OCF specification version used when creating the container.
-* **`id`**: (string) is the container's ID.
-This MUST be unique across all containers on this host.
-There is no requirement that it be unique across hosts.
-The ID is provided in the state because hooks will be executed with the state as the payload.
-This allows the hooks to perform cleanup and teardown logic after the runtime destroys its own state.
-* **`pid`**: (int) is the ID of the main process within the container, as seen by the host.
-* **`bundlePath`**: (string) is the absolute path to the container's bundle directory.
-This is provided so that consumers can find the container's configuration and root filesystem on the host.
-
-*Example*
-
-```json
-{
-    "id": "oc-container",
-    "pid": 4422,
-    "root": "/containers/redis"
-}
-```
-
-## Lifecycle
-
-### Create
-
-Creates the container: file system, namespaces, cgroups, capabilities.
-
-### Start (process)
-
-Runs a process in a container.
-Can be invoked several times.
-
-### Stop (process)
-
-Not sure we need that from runc cli.
-Process is killed from the outside.
-
-This event needs to be captured by runc to run onstop event handlers.
-
-## Hooks
-
-See [runtime configuration for hooks](./runtime-config.md)
diff --git a/vendor/src/github.com/opencontainers/specs/runtime_config.go b/vendor/src/github.com/opencontainers/specs/runtime_config.go
deleted file mode 100644
index 5665ca9..0000000
--- a/vendor/src/github.com/opencontainers/specs/runtime_config.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package specs
-
-// RuntimeSpec contains host-specific configuration information for
-// a container. This information must not be included when the bundle
-// is packaged for distribution.
-type RuntimeSpec struct {
-	// Mounts is a mapping of names to mount configurations.
-	// Which mounts will be mounted and where should be chosen with MountPoints
-	// in Spec.
-	Mounts map[string]Mount `json:"mounts"`
-	// Hooks are the commands run at various lifecycle events of the container.
-	Hooks Hooks `json:"hooks"`
-}
-
-// Hook specifies a command that is run at a particular event in the lifecycle of a container
-type Hook struct {
-	Path string   `json:"path"`
-	Args []string `json:"args"`
-	Env  []string `json:"env"`
-}
-
-// Hooks for container setup and teardown
-type Hooks struct {
-	// Prestart is a list of hooks to be run before the container process is executed.
-	// On Linux, they are run after the container namespaces are created.
-	Prestart []Hook `json:"prestart"`
-	// Poststart is a list of hooks to be run after the container process is started.
-	Poststart []Hook `json:"poststart"`
-	// Poststop is a list of hooks to be run after the container process exits.
-	Poststop []Hook `json:"poststop"`
-}
-
-// Mount specifies a mount for a container
-type Mount struct {
-	// Type specifies the mount kind.
-	Type string `json:"type"`
-	// Source specifies the source path of the mount.  In the case of bind mounts on
-	// linux based systems this would be the file on the host.
-	Source string `json:"source"`
-	// Options are fstab style mount options.
-	Options []string `json:"options"`
-}
diff --git a/vendor/src/github.com/opencontainers/specs/runtime_config_linux.go b/vendor/src/github.com/opencontainers/specs/runtime_config_linux.go
deleted file mode 100644
index bcace7e..0000000
--- a/vendor/src/github.com/opencontainers/specs/runtime_config_linux.go
+++ /dev/null
@@ -1,301 +0,0 @@
-package specs
-
-import "os"
-
-// LinuxStateDirectory holds the container's state information
-const LinuxStateDirectory = "/run/opencontainer/containers"
-
-// LinuxRuntimeSpec is the full specification for linux containers.
-type LinuxRuntimeSpec struct {
-	RuntimeSpec
-	// LinuxRuntime is platform specific configuration for linux based containers.
-	Linux LinuxRuntime `json:"linux"`
-}
-
-// LinuxRuntime hosts the Linux-only runtime information
-type LinuxRuntime struct {
-	// UIDMapping specifies user mappings for supporting user namespaces on linux.
-	UIDMappings []IDMapping `json:"uidMappings"`
-	// GIDMapping specifies group mappings for supporting user namespaces on linux.
-	GIDMappings []IDMapping `json:"gidMappings"`
-	// Rlimits specifies rlimit options to apply to the container's process.
-	Rlimits []Rlimit `json:"rlimits"`
-	// Sysctl are a set of key value pairs that are set for the container on start
-	Sysctl map[string]string `json:"sysctl"`
-	// Resources contain cgroup information for handling resource constraints
-	// for the container
-	Resources *Resources `json:"resources"`
-	// CgroupsPath specifies the path to cgroups that are created and/or joined by the container.
-	// The path is expected to be relative to the cgroups mountpoint.
-	// If resources are specified, the cgroups at CgroupsPath will be updated based on resources.
-	CgroupsPath string `json:"cgroupsPath"`
-	// Namespaces contains the namespaces that are created and/or joined by the container
-	Namespaces []Namespace `json:"namespaces"`
-	// Devices are a list of device nodes that are created and enabled for the container
-	Devices []Device `json:"devices"`
-	// ApparmorProfile specified the apparmor profile for the container.
-	ApparmorProfile string `json:"apparmorProfile"`
-	// SelinuxProcessLabel specifies the selinux context that the container process is run as.
-	SelinuxProcessLabel string `json:"selinuxProcessLabel"`
-	// Seccomp specifies the seccomp security settings for the container.
-	Seccomp Seccomp `json:"seccomp"`
-	// RootfsPropagation is the rootfs mount propagation mode for the container
-	RootfsPropagation string `json:"rootfsPropagation"`
-}
-
-// Namespace is the configuration for a linux namespace
-type Namespace struct {
-	// Type is the type of Linux namespace
-	Type NamespaceType `json:"type"`
-	// Path is a path to an existing namespace persisted on disk that can be joined
-	// and is of the same type
-	Path string `json:"path"`
-}
-
-// NamespaceType is one of the linux namespaces
-type NamespaceType string
-
-const (
-	// PIDNamespace for isolating process IDs
-	PIDNamespace NamespaceType = "pid"
-	// NetworkNamespace for isolating network devices, stacks, ports, etc
-	NetworkNamespace = "network"
-	// MountNamespace for isolating mount points
-	MountNamespace = "mount"
-	// IPCNamespace for isolating System V IPC, POSIX message queues
-	IPCNamespace = "ipc"
-	// UTSNamespace for isolating hostname and NIS domain name
-	UTSNamespace = "uts"
-	// UserNamespace for isolating user and group IDs
-	UserNamespace = "user"
-)
-
-// IDMapping specifies UID/GID mappings
-type IDMapping struct {
-	// HostID is the UID/GID of the host user or group
-	HostID uint32 `json:"hostID"`
-	// ContainerID is the UID/GID of the container's user or group
-	ContainerID uint32 `json:"containerID"`
-	// Size is the length of the range of IDs mapped between the two namespaces
-	Size uint32 `json:"size"`
-}
-
-// Rlimit type and restrictions
-type Rlimit struct {
-	// Type of the rlimit to set
-	Type string `json:"type"`
-	// Hard is the hard limit for the specified type
-	Hard uint64 `json:"hard"`
-	// Soft is the soft limit for the specified type
-	Soft uint64 `json:"soft"`
-}
-
-// HugepageLimit structure corresponds to limiting kernel hugepages
-type HugepageLimit struct {
-	// Pagesize is the hugepage size
-	Pagesize string `json:"pageSize"`
-	// Limit is the limit of "hugepagesize" hugetlb usage
-	Limit uint64 `json:"limit"`
-}
-
-// InterfacePriority for network interfaces
-type InterfacePriority struct {
-	// Name is the name of the network interface
-	Name string `json:"name"`
-	// Priority for the interface
-	Priority uint32 `json:"priority"`
-}
-
-// blockIODevice holds major:minor format supported in blkio cgroup
-type blockIODevice struct {
-	// Major is the device's major number.
-	Major int64 `json:"major"`
-	// Minor is the device's minor number.
-	Minor int64 `json:"minor"`
-}
-
-// WeightDevice struct holds a `major:minor weight` pair for blkioWeightDevice
-type WeightDevice struct {
-	blockIODevice
-	// Weight is the bandwidth rate for the device, range is from 10 to 1000
-	Weight uint16 `json:"weight"`
-	// LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
-	LeafWeight uint16 `json:"leafWeight"`
-}
-
-// ThrottleDevice struct holds a `major:minor rate_per_second` pair
-type ThrottleDevice struct {
-	blockIODevice
-	// Rate is the IO rate limit per cgroup per device
-	Rate uint64 `json:"rate"`
-}
-
-// BlockIO for Linux cgroup 'blkio' resource management
-type BlockIO struct {
-	// Specifies per cgroup weight, range is from 10 to 1000
-	Weight uint16 `json:"blkioWeight"`
-	// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, CFQ scheduler only
-	LeafWeight uint16 `json:"blkioLeafWeight"`
-	// Weight per cgroup per device, can override BlkioWeight
-	WeightDevice []*WeightDevice `json:"blkioWeightDevice"`
-	// IO read rate limit per cgroup per device, bytes per second
-	ThrottleReadBpsDevice []*ThrottleDevice `json:"blkioThrottleReadBpsDevice"`
-	// IO write rate limit per cgroup per device, bytes per second
-	ThrottleWriteBpsDevice []*ThrottleDevice `json:"blkioThrottleWriteBpsDevice"`
-	// IO read rate limit per cgroup per device, IO per second
-	ThrottleReadIOPSDevice []*ThrottleDevice `json:"blkioThrottleReadIOPSDevice"`
-	// IO write rate limit per cgroup per device, IO per second
-	ThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkioThrottleWriteIOPSDevice"`
-}
-
-// Memory for Linux cgroup 'memory' resource management
-type Memory struct {
-	// Memory limit (in bytes)
-	Limit uint64 `json:"limit"`
-	// Memory reservation or soft_limit (in bytes)
-	Reservation uint64 `json:"reservation"`
-	// Total memory usage (memory + swap); set `-1' to disable swap
-	Swap uint64 `json:"swap"`
-	// Kernel memory limit (in bytes)
-	Kernel uint64 `json:"kernel"`
-	// How aggressive the kernel will swap memory pages. Range from 0 to 100. Set -1 to use system default
-	Swappiness uint64 `json:"swappiness"`
-}
-
-// CPU for Linux cgroup 'cpu' resource management
-type CPU struct {
-	// CPU shares (relative weight vs. other cgroups with cpu shares)
-	Shares uint64 `json:"shares"`
-	// CPU hardcap limit (in usecs). Allowed cpu time in a given period
-	Quota uint64 `json:"quota"`
-	// CPU period to be used for hardcapping (in usecs). 0 to use system default
-	Period uint64 `json:"period"`
-	// How many time CPU will use in realtime scheduling (in usecs)
-	RealtimeRuntime uint64 `json:"realtimeRuntime"`
-	// CPU period to be used for realtime scheduling (in usecs)
-	RealtimePeriod uint64 `json:"realtimePeriod"`
-	// CPU to use within the cpuset
-	Cpus string `json:"cpus"`
-	// MEM to use within the cpuset
-	Mems string `json:"mems"`
-}
-
-// Pids for Linux cgroup 'pids' resource management (Linux 4.3)
-type Pids struct {
-	// Maximum number of PIDs. A value <= 0 indicates "no limit".
-	Limit int64 `json:"limit"`
-}
-
-// Network identification and priority configuration
-type Network struct {
-	// Set class identifier for container's network packets
-	// this is actually a string instead of a uint64 to overcome the json
-	// limitation of specifying hex numbers
-	ClassID string `json:"classID"`
-	// Set priority of network traffic for container
-	Priorities []InterfacePriority `json:"priorities"`
-}
-
-// Resources has container runtime resource constraints
-type Resources struct {
-	// DisableOOMKiller disables the OOM killer for out of memory conditions
-	DisableOOMKiller bool `json:"disableOOMKiller"`
-	// Specify an oom_score_adj for the container. Optional.
-	OOMScoreAdj int `json:"oomScoreAdj"`
-	// Memory restriction configuration
-	Memory Memory `json:"memory"`
-	// CPU resource restriction configuration
-	CPU CPU `json:"cpu"`
-	// Task resource restriction configuration.
-	Pids Pids `json:"pids"`
-	// BlockIO restriction configuration
-	BlockIO BlockIO `json:"blockIO"`
-	// Hugetlb limit (in bytes)
-	HugepageLimits []HugepageLimit `json:"hugepageLimits"`
-	// Network restriction configuration
-	Network Network `json:"network"`
-}
-
-// Device represents the information on a Linux special device file
-type Device struct {
-	// Path to the device.
-	Path string `json:"path"`
-	// Device type, block, char, etc.
-	Type rune `json:"type"`
-	// Major is the device's major number.
-	Major int64 `json:"major"`
-	// Minor is the device's minor number.
-	Minor int64 `json:"minor"`
-	// Cgroup permissions format, rwm.
-	Permissions string `json:"permissions"`
-	// FileMode permission bits for the device.
-	FileMode os.FileMode `json:"fileMode"`
-	// UID of the device.
-	UID uint32 `json:"uid"`
-	// Gid of the device.
-	GID uint32 `json:"gid"`
-}
-
-// Seccomp represents syscall restrictions
-type Seccomp struct {
-	DefaultAction Action     `json:"defaultAction"`
-	Architectures []Arch     `json:"architectures"`
-	Syscalls      []*Syscall `json:"syscalls"`
-}
-
-// Additional architectures permitted to be used for system calls
-// By default only the native architecture of the kernel is permitted
-type Arch string
-
-const (
-	ArchX86         Arch = "SCMP_ARCH_X86"
-	ArchX86_64      Arch = "SCMP_ARCH_X86_64"
-	ArchX32         Arch = "SCMP_ARCH_X32"
-	ArchARM         Arch = "SCMP_ARCH_ARM"
-	ArchAARCH64     Arch = "SCMP_ARCH_AARCH64"
-	ArchMIPS        Arch = "SCMP_ARCH_MIPS"
-	ArchMIPS64      Arch = "SCMP_ARCH_MIPS64"
-	ArchMIPS64N32   Arch = "SCMP_ARCH_MIPS64N32"
-	ArchMIPSEL      Arch = "SCMP_ARCH_MIPSEL"
-	ArchMIPSEL64    Arch = "SCMP_ARCH_MIPSEL64"
-	ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
-)
-
-// Action taken upon Seccomp rule match
-type Action string
-
-const (
-	ActKill  Action = "SCMP_ACT_KILL"
-	ActTrap  Action = "SCMP_ACT_TRAP"
-	ActErrno Action = "SCMP_ACT_ERRNO"
-	ActTrace Action = "SCMP_ACT_TRACE"
-	ActAllow Action = "SCMP_ACT_ALLOW"
-)
-
-// Operator used to match syscall arguments in Seccomp
-type Operator string
-
-const (
-	OpNotEqual     Operator = "SCMP_CMP_NE"
-	OpLessThan     Operator = "SCMP_CMP_LT"
-	OpLessEqual    Operator = "SCMP_CMP_LE"
-	OpEqualTo      Operator = "SCMP_CMP_EQ"
-	OpGreaterEqual Operator = "SCMP_CMP_GE"
-	OpGreaterThan  Operator = "SCMP_CMP_GT"
-	OpMaskedEqual  Operator = "SCMP_CMP_MASKED_EQ"
-)
-
-// Arg used for matching specific syscall arguments in Seccomp
-type Arg struct {
-	Index    uint     `json:"index"`
-	Value    uint64   `json:"value"`
-	ValueTwo uint64   `json:"valueTwo"`
-	Op       Operator `json:"op"`
-}
-
-// Syscall is used to match a syscall in Seccomp
-type Syscall struct {
-	Name   string `json:"name"`
-	Action Action `json:"action"`
-	Args   []*Arg `json:"args"`
-}
diff --git a/vendor/src/github.com/opencontainers/specs/state.go b/vendor/src/github.com/opencontainers/specs/state.go
deleted file mode 100644
index a663381..0000000
--- a/vendor/src/github.com/opencontainers/specs/state.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package specs
-
-// State holds information about the runtime state of the container.
-// This information will be stored in a file called `state.json`.
-// The location of this file will be operating system specific. On Linux
-// it will be in `/run/opencontainers/runc/<containerID>/state.json`
-type State struct {
-	// Version is the version of the specification that is supported.
-	Version string `json:"version"`
-	// ID is the container ID
-	ID string `json:"id"`
-	// Pid is the process id for the container's main process.
-	Pid int `json:"pid"`
-	// BundlePath is the path to the container's bundle directory.
-	BundlePath string `json:"bundlePath"`
-}
diff --git a/vendor/src/github.com/opencontainers/specs/version.go b/vendor/src/github.com/opencontainers/specs/version.go
deleted file mode 100644
index c2d1e29..0000000
--- a/vendor/src/github.com/opencontainers/specs/version.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package specs
-
-import "fmt"
-
-const (
-	// VersionMajor is for an API incompatible changes
-	VersionMajor = 0
-	// VersionMinor is for functionality in a backwards-compatible manner
-	VersionMinor = 2
-	// VersionPatch is for backwards-compatible bug fixes
-	VersionPatch = 0
-)
-
-// Version is the specification version that the package types support.
-var Version = fmt.Sprintf("%d.%d.%d", VersionMajor, VersionMinor, VersionPatch)