Merge pull request #35589 from keloyang/close-fd

Close pipe in chrootarchive.invokeUnpack when cmd.Start()/json.NewEncoder failed
diff --git a/.DEREK.yml b/.DEREK.yml
new file mode 100644
index 0000000..3fd6789
--- /dev/null
+++ b/.DEREK.yml
@@ -0,0 +1,17 @@
+curators:
+  - aboch
+  - alexellis
+  - andrewhsu
+  - anonymuse
+  - chanwit
+  - ehazlett
+  - fntlnz
+  - gianarb
+  - mgoelzer
+  - programmerq
+  - rheinwein
+  - ripcurld0
+  - thajeztah
+
+features:
+  - comments
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b7961e1..a38f54d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -311,6 +311,36 @@
 will have time to make yourself available. You don't have to be a
 maintainer to make a difference on the project!
 
+### Manage issues and pull requests using the Derek bot
+
+If you want to help label, assign, close or reopen issues or pull requests
+without commit rights, ask a maintainer to add your Github handle to the 
+`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
+Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
+
+For example:
+
+* Labels
+
+```
+Derek add label: kind/question
+Derek remove label: status/claimed
+```
+
+* Assign work
+
+```
+Derek assign: username
+Derek unassign: me
+```
+
+* Manage issues and PRs
+
+```
+Derek close
+Derek reopen
+```
+
 ## Moby community guidelines
 
 We want to keep the Moby community awesome, growing and collaborative. We need
diff --git a/Dockerfile b/Dockerfile
index e027d48..5f78eda 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -52,6 +52,7 @@
 	libapparmor-dev \
 	libcap-dev \
 	libdevmapper-dev \
+	libnet-dev \
 	libnl-3-dev \
 	libprotobuf-c0-dev \
 	libprotobuf-dev \
@@ -86,7 +87,7 @@
 #            will need updating, to avoid errors. Ping #docker-maintainers on IRC
 #            with a heads-up.
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
 	| tar -xzC /usr/local
 
@@ -94,11 +95,9 @@
 ENV GOPATH /go
 
 # Install CRIU for checkpoint/restore support
-ENV CRIU_VERSION 2.12.1
-# Install dependancy packages specific to criu
-RUN apt-get install libnet-dev -y && \
-	mkdir -p /usr/src/criu \
-	&& curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \
+ENV CRIU_VERSION 3.6
+RUN mkdir -p /usr/src/criu \
+	&& curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
 	&& cd /usr/src/criu \
 	&& make \
 	&& make install-criu
diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64
index 876d809..58ca40d 100644
--- a/Dockerfile.aarch64
+++ b/Dockerfile.aarch64
@@ -73,7 +73,7 @@
 
 # Install Go
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" \
 	| tar -xzC /usr/local
 
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
index 33304b5..bc430e8 100644
--- a/Dockerfile.armhf
+++ b/Dockerfile.armhf
@@ -63,7 +63,7 @@
 
 # Install Go
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \
 	| tar -xzC /usr/local
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
diff --git a/Dockerfile.e2e b/Dockerfile.e2e
index c57d7d3..7c6bb45 100644
--- a/Dockerfile.e2e
+++ b/Dockerfile.e2e
@@ -1,5 +1,5 @@
 ## Step 1: Build tests
-FROM golang:1.8.5-alpine3.6 as builder
+FROM golang:1.9.2-alpine3.6 as builder
 
 RUN apk add --update \
     bash \
diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le
index 4abd889..fa7307b 100644
--- a/Dockerfile.ppc64le
+++ b/Dockerfile.ppc64le
@@ -64,7 +64,7 @@
 # Install Go
 # NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \
 	| tar -xzC /usr/local
 
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
index 33dfc43..e8e7830 100644
--- a/Dockerfile.s390x
+++ b/Dockerfile.s390x
@@ -58,7 +58,7 @@
 	--no-install-recommends
 
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \
 	| tar -xzC /usr/local
 
diff --git a/Dockerfile.simple b/Dockerfile.simple
index c84025e..2a5d30b 100644
--- a/Dockerfile.simple
+++ b/Dockerfile.simple
@@ -40,7 +40,7 @@
 #            will need updating, to avoid errors. Ping #docker-maintainers on IRC
 #            with a heads-up.
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
 	| tar -xzC /usr/local
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
diff --git a/Dockerfile.windows b/Dockerfile.windows
index 2671aea..519a961 100644
--- a/Dockerfile.windows
+++ b/Dockerfile.windows
@@ -161,7 +161,7 @@
 # Environment variable notes:
 #  - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
 #  - FROM_DOCKERFILE is used for detection of building within a container.
-ENV GO_VERSION=1.8.5 `
+ENV GO_VERSION=1.9.2 `
     GIT_VERSION=2.11.1 `
     GOPATH=C:\go `
     FROM_DOCKERFILE=1
diff --git a/Makefile b/Makefile
index 3298815..6f5145a 100644
--- a/Makefile
+++ b/Makefile
@@ -53,7 +53,8 @@
 	-e http_proxy \
 	-e https_proxy \
 	-e no_proxy \
-	-e VERSION
+	-e VERSION \
+	-e PLATFORM
 # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
 
 # to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go
index 8f6aecd..535956d 100644
--- a/api/server/router/system/system_routes.go
+++ b/api/server/router/system/system_routes.go
@@ -6,7 +6,6 @@
 	"net/http"
 	"time"
 
-	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/server/httputils"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/events"
@@ -65,7 +64,6 @@
 
 func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	info := s.backend.SystemVersion()
-	info.APIVersion = api.DefaultVersion
 
 	return httputils.WriteJSON(w, http.StatusOK, info)
 }
diff --git a/api/swagger.yaml b/api/swagger.yaml
index a6b5ee5..b1533f6 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -254,6 +254,7 @@
         properties:
           Propagation:
             description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
+            type: "string"
             enum:
               - "private"
               - "rprivate"
@@ -332,6 +333,7 @@
       Memory:
         description: "Memory limit in bytes."
         type: "integer"
+        format: "int64"
         default: 0
       # Applicable to UNIX platforms
       CgroupParent:
@@ -606,17 +608,7 @@
             description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken
               as a custom network's name to which this container should connect to."
           PortBindings:
-            type: "object"
-            description: "A map of exposed container ports and the host port they should map to."
-            additionalProperties:
-              type: "object"
-              properties:
-                HostIp:
-                  type: "string"
-                  description: "The host IP address"
-                HostPort:
-                  type: "string"
-                  description: "The host port number, as a string"
+            $ref: "#/definitions/PortMap"
           RestartPolicy:
             $ref: "#/definitions/RestartPolicy"
           AutoRemove:
@@ -832,9 +824,7 @@
           type: "string"
       Cmd:
         description: "Command to run specified as a string or an array of strings."
-        type:
-          - "array"
-          - "string"
+        type: "array"
         items:
           type: "string"
       Healthcheck:
@@ -862,9 +852,7 @@
           The entry point for the container as a string or an array of strings.
 
           If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
-        type:
-          - "array"
-          - "string"
+        type: "array"
         items:
           type: "string"
       NetworkDisabled:
@@ -6773,6 +6761,28 @@
           schema:
             type: "object"
             properties:
+              Platform:
+                type: "object"
+                required: [Name]
+                properties:
+                  Name:
+                    type: "string"
+              Components:
+                type: "array"
+                items:
+                  type: "object"
+                  x-go-name: ComponentVersion
+                  required: [Name, Version]
+                  properties:
+                    Name:
+                      type: "string"
+                    Version:
+                      type: "string"
+                      x-nullable: false
+                    Details:
+                      type: "object"
+                      x-nullable: true
+
               Version:
                 type: "string"
               ApiVersion:
@@ -7268,6 +7278,9 @@
               User:
                 type: "string"
                 description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`."
+              WorkingDir:
+                type: "string"
+                description: "The working directory for the exec process inside the container."
             example:
               AttachStdin: false
               AttachStdout: true
diff --git a/api/types/configs.go b/api/types/configs.go
index 20c19f2..54d3e39 100644
--- a/api/types/configs.go
+++ b/api/types/configs.go
@@ -50,6 +50,7 @@
 	Detach       bool     // Execute in detach mode
 	DetachKeys   string   // Escape keys for detach
 	Env          []string // Environment variables
+	WorkingDir   string   // Working directory
 	Cmd          []string // Execution commands and args
 }
 
diff --git a/api/types/swarm/runtime/plugin.proto b/api/types/swarm/runtime/plugin.proto
index 06eb7ba..6d63b77 100644
--- a/api/types/swarm/runtime/plugin.proto
+++ b/api/types/swarm/runtime/plugin.proto
@@ -1,5 +1,7 @@
 syntax = "proto3";
 
+option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime";
+
 // PluginSpec defines the base payload which clients can specify for creating
 // a service with the plugin runtime.
 message PluginSpec {
diff --git a/api/types/types.go b/api/types/types.go
index f7ac772..7814e6b 100644
--- a/api/types/types.go
+++ b/api/types/types.go
@@ -107,9 +107,21 @@
 	Experimental bool
 }
 
+// ComponentVersion describes the version information for a specific component.
+type ComponentVersion struct {
+	Name    string
+	Version string
+	Details map[string]string `json:",omitempty"`
+}
+
 // Version contains response of Engine API:
 // GET "/version"
 type Version struct {
+	Platform   struct{ Name string } `json:",omitempty"`
+	Components []ComponentVersion    `json:",omitempty"`
+
+	// The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
+
 	Version       string
 	APIVersion    string `json:"ApiVersion"`
 	MinAPIVersion string `json:"MinAPIVersion,omitempty"`
diff --git a/builder/dockerfile/parser/testfiles/docker/Dockerfile b/builder/dockerfile/parser/testfiles/docker/Dockerfile
index 5153453..77aecd9 100644
--- a/builder/dockerfile/parser/testfiles/docker/Dockerfile
+++ b/builder/dockerfile/parser/testfiles/docker/Dockerfile
@@ -9,7 +9,7 @@
 # docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash
 #
 # # Run the test suite:
-# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py
+# docker run --privileged docker hack/make.sh test-unit test-integration test-docker-py
 #
 # # Publish a release:
 # docker run --privileged \
diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go
index b9d586a..c4ae197 100644
--- a/cmd/dockerd/config.go
+++ b/cmd/dockerd/config.go
@@ -59,6 +59,8 @@
 	flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull")
 	flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push")
 	flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout")
+	flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server")
+	flags.MarkHidden("network-diagnostic-port")
 
 	flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address")
 	flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features")
diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go
index 41e6b61..a65d8ed 100644
--- a/cmd/dockerd/daemon_unix.go
+++ b/cmd/dockerd/daemon_unix.go
@@ -14,7 +14,6 @@
 	"github.com/docker/docker/cmd/dockerd/hack"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/libcontainerd"
-	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/libnetwork/portallocator"
 	"golang.org/x/sys/unix"
 )
@@ -38,24 +37,13 @@
 }
 
 func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) {
-	// On older kernel, letting putting the containerd-shim in its own
-	// namespace will effectively prevent operations such as unlink, rename
-	// and remove on mountpoints that were present at the time the shim
-	// namespace was created. This would led to a famous EBUSY will trying to
-	// remove shm mounts.
-	var noNewNS bool
-	if !kernel.CheckKernelVersion(3, 18, 0) {
-		noNewNS = true
-	}
-
 	opts := []libcontainerd.RemoteOption{
 		libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust),
 		libcontainerd.WithPlugin("linux", &linux.Config{
-			Shim:          daemon.DefaultShimBinary,
-			Runtime:       daemon.DefaultRuntimeBinary,
-			RuntimeRoot:   filepath.Join(cli.Config.Root, "runc"),
-			ShimDebug:     cli.Config.Debug,
-			ShimNoMountNS: noNewNS,
+			Shim:        daemon.DefaultShimBinary,
+			Runtime:     daemon.DefaultRuntimeBinary,
+			RuntimeRoot: filepath.Join(cli.Config.Root, "runc"),
+			ShimDebug:   cli.Config.Debug,
 		}),
 	}
 	if cli.Config.Debug {
diff --git a/container/container.go b/container/container.go
index 3e8a370..11814b7 100644
--- a/container/container.go
+++ b/container/container.go
@@ -15,7 +15,7 @@
 	"syscall"
 	"time"
 
-	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	containertypes "github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	networktypes "github.com/docker/docker/api/types/network"
@@ -1004,7 +1004,7 @@
 }
 
 // InitializeStdio is called by libcontainerd to connect the stdio.
-func (container *Container) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, error) {
+func (container *Container) InitializeStdio(iop *libcontainerd.IOPipe) (cio.IO, error) {
 	if err := container.startLogging(); err != nil {
 		container.Reset(false)
 		return nil, err
@@ -1020,7 +1020,7 @@
 		}
 	}
 
-	return &cio{IO: iop, sc: container.StreamConfig}, nil
+	return &rio{IO: iop, sc: container.StreamConfig}, nil
 }
 
 // SecretMountPath returns the path of the secret mount for the container
@@ -1078,19 +1078,19 @@
 	return env
 }
 
-type cio struct {
-	containerd.IO
+type rio struct {
+	cio.IO
 
 	sc *stream.Config
 }
 
-func (i *cio) Close() error {
+func (i *rio) Close() error {
 	i.IO.Close()
 
 	return i.sc.CloseStreams()
 }
 
-func (i *cio) Wait() {
+func (i *rio) Wait() {
 	i.sc.Wait()
 
 	i.IO.Wait()
diff --git a/contrib/builder/deb/aarch64/debian-jessie/Dockerfile b/contrib/builder/deb/aarch64/debian-jessie/Dockerfile
index 865d6aa..a0a6612 100644
--- a/contrib/builder/deb/aarch64/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/aarch64/debian-jessie/Dockerfile
@@ -7,7 +7,7 @@
 RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/aarch64/debian-stretch/Dockerfile b/contrib/builder/deb/aarch64/debian-stretch/Dockerfile
index 2b561be..90525db 100644
--- a/contrib/builder/deb/aarch64/debian-stretch/Dockerfile
+++ b/contrib/builder/deb/aarch64/debian-stretch/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile
index e1b85ec..5d2d58e 100644
--- a/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile
index 6f8bc95..cb9b681 100644
--- a/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/contrib/builder/deb/amd64/debian-jessie/Dockerfile
index 668fc3c..5b8b785 100644
--- a/contrib/builder/deb/amd64/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/amd64/debian-jessie/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/contrib/builder/deb/amd64/debian-stretch/Dockerfile
index a23eba3..aa96240 100644
--- a/contrib/builder/deb/amd64/debian-stretch/Dockerfile
+++ b/contrib/builder/deb/amd64/debian-stretch/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile
index 2652706..2f4e051 100644
--- a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile
+++ b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile
@@ -12,7 +12,7 @@
 RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y apparmor bash-completion  build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile
index 4fce6f3..a010566 100644
--- a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile
index ed9c4a9..e2768c3 100644
--- a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile
index a7dd9b7..419522c 100644
--- a/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile
index 5074efe..98314f1 100644
--- a/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/contrib/builder/deb/armhf/debian-jessie/Dockerfile
index 558d353..048e774 100644
--- a/contrib/builder/deb/armhf/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/armhf/debian-jessie/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile
index 31a6688..c80a3f6 100644
--- a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile
+++ b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 # GOARM is the ARM architecture version which is unrelated to the above Golang version
 ENV GOARM 6
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
diff --git a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile
index a9899a0..b6fc393 100644
--- a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile
index 2766f33..cc9284f 100644
--- a/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile
index 27edd04..57a77ac 100644
--- a/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile
index b85a68e..d29ac51 100644
--- a/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile
index abb5b23..730bacb 100644
--- a/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile
index d725816..27cfd29 100644
--- a/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile
index 6d61ed7..2233897 100644
--- a/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile
index e30e875..b2a0cf5 100644
--- a/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile b/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile
index 8e755cd..1f0a0b7 100644
--- a/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile
+++ b/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile
@@ -5,9 +5,9 @@
 FROM amazonlinux:latest
 
 RUN yum groupinstall -y "Development Tools"
-RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel  tar git cmake vim-common
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel  tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/centos-7/Dockerfile b/contrib/builder/rpm/amd64/centos-7/Dockerfile
index 8a22064..5767b8c 100644
--- a/contrib/builder/rpm/amd64/centos-7/Dockerfile
+++ b/contrib/builder/rpm/amd64/centos-7/Dockerfile
@@ -6,9 +6,9 @@
 
 RUN yum groupinstall -y "Development Tools"
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
-RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/contrib/builder/rpm/amd64/fedora-24/Dockerfile
index e0b369a..1e07523 100644
--- a/contrib/builder/rpm/amd64/fedora-24/Dockerfile
+++ b/contrib/builder/rpm/amd64/fedora-24/Dockerfile
@@ -6,9 +6,9 @@
 
 RUN dnf -y upgrade
 RUN dnf install -y @development-tools fedora-packager
-RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
+RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/contrib/builder/rpm/amd64/fedora-25/Dockerfile
index f259a5c..23e2463 100644
--- a/contrib/builder/rpm/amd64/fedora-25/Dockerfile
+++ b/contrib/builder/rpm/amd64/fedora-25/Dockerfile
@@ -6,9 +6,9 @@
 
 RUN dnf -y upgrade
 RUN dnf install -y @development-tools fedora-packager
-RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
+RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile
index 7f13863..b55bfee 100644
--- a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile
+++ b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile
@@ -5,9 +5,9 @@
 FROM opensuse:13.2
 
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
-RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim systemd-rpm-macros
+RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim systemd-rpm-macros
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile
index b75f2dc..1b395c2 100644
--- a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile
+++ b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile
@@ -8,9 +8,9 @@
 RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek
 
 RUN yum groupinstall -y "Development Tools"
-RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel  tar git cmake vim-common
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static  libselinux-devel pkgconfig selinux-policy selinux-policy-devel  tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile
index f4dc894..ec6db98 100644
--- a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile
+++ b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile
@@ -5,9 +5,9 @@
 FROM oraclelinux:7
 
 RUN yum groupinstall -y "Development Tools"
-RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
+RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/contrib/builder/rpm/amd64/photon-1.0/Dockerfile
index 01d5fea..be9463c 100644
--- a/contrib/builder/rpm/amd64/photon-1.0/Dockerfile
+++ b/contrib/builder/rpm/amd64/photon-1.0/Dockerfile
@@ -5,9 +5,9 @@
 FROM photon:1.0
 
 RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils
-RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
+RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/armhf/centos-7/Dockerfile b/contrib/builder/rpm/armhf/centos-7/Dockerfile
index 79c2ef1..8e77e8b 100644
--- a/contrib/builder/rpm/armhf/centos-7/Dockerfile
+++ b/contrib/builder/rpm/armhf/centos-7/Dockerfile
@@ -7,9 +7,9 @@
 RUN yum install -y yum-plugin-ovl
 RUN yum groupinstall --skip-broken -y "Development Tools"
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
-RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/ppc64le/centos-7/Dockerfile b/contrib/builder/rpm/ppc64le/centos-7/Dockerfile
index ebce3c0..8bcc4be 100644
--- a/contrib/builder/rpm/ppc64le/centos-7/Dockerfile
+++ b/contrib/builder/rpm/ppc64le/centos-7/Dockerfile
@@ -6,10 +6,10 @@
 
 RUN yum groupinstall -y "Development Tools"
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
-RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
+RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
-RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
+ENV GO_VERSION 1.9.2
+RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
 ENV AUTO_GOPATH 1
diff --git a/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile b/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile
index 18dd7d4..32321fe 100644
--- a/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile
+++ b/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile
@@ -6,9 +6,9 @@
 
 RUN dnf -y upgrade
 RUN dnf install -y @development-tools fedora-packager
-RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake
+RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile b/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile
index 3343f02..04f158c 100644
--- a/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile
+++ b/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile
@@ -7,10 +7,10 @@
 RUN zypper addrepo -n ppc64le-oss -f https://download.opensuse.org/ports/ppc/distribution/leap/42.1/repo/oss/ ppc64le-oss
 RUN zypper addrepo -n ppc64le-updates -f https://download.opensuse.org/ports/update/42.1/ ppc64le-updates
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
-RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim
+RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim
 
-ENV GO_VERSION 1.8.5
-RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
+ENV GO_VERSION 1.9.2
+RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
 ENV AUTO_GOPATH 1
diff --git a/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile b/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile
index ef875b8..27195d3 100644
--- a/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile
+++ b/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile
@@ -6,9 +6,9 @@
 
 
 RUN touch /var/lib/rpm/* && yum groupinstall -y "Development Tools"
-RUN touch /var/lib/rpm/* && yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
+RUN touch /var/lib/rpm/* && yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile b/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile
index a05bee5..275fab5 100644
--- a/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile
+++ b/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile
@@ -7,9 +7,9 @@
 
 RUN zypper ar https://download.opensuse.org/ports/zsystems/tumbleweed/repo/oss/ tumbleweed
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
-RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros
+RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros
 
-ENV GO_VERSION 1.8.5
+ENV GO_VERSION 1.9.2
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/daemon/build.go b/daemon/build.go
index 6a00814..3674ad9 100644
--- a/daemon/build.go
+++ b/daemon/build.go
@@ -71,11 +71,7 @@
 	if err != nil {
 		return nil, err
 	}
-
-	if layer.IsEmpty(newLayer.DiffID()) {
-		_, err := rl.layerStore.Release(newLayer)
-		return &releaseableLayer{layerStore: rl.layerStore}, err
-	}
+	// TODO: An optimization woudld be to handle empty layers before returning
 	return &releaseableLayer{layerStore: rl.layerStore, roLayer: newLayer}, nil
 }
 
diff --git a/daemon/checkpoint.go b/daemon/checkpoint.go
index 5765af7..482e3b8 100644
--- a/daemon/checkpoint.go
+++ b/daemon/checkpoint.go
@@ -34,9 +34,6 @@
 			err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName)
 		case err != nil && os.IsNotExist(err):
 			err2 = os.MkdirAll(checkpointAbsDir, 0700)
-			if os.IsExist(err2) {
-				err2 = nil
-			}
 		case err != nil:
 			err2 = err
 		case err == nil:
diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go
index dda1259..df14a7a 100644
--- a/daemon/cluster/executor/container/controller.go
+++ b/daemon/cluster/executor/container/controller.go
@@ -183,7 +183,7 @@
 
 	for {
 		if err := r.adapter.start(ctx); err != nil {
-			if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok {
+			if _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork); ok {
 				// Retry network creation again if we
 				// failed because some of the networks
 				// were not found.
diff --git a/daemon/config/config.go b/daemon/config/config.go
index 1e22a6f..199fae6 100644
--- a/daemon/config/config.go
+++ b/daemon/config/config.go
@@ -85,26 +85,27 @@
 // It includes json tags to deserialize configuration from a file
 // using the same names that the flags in the command line use.
 type CommonConfig struct {
-	AuthzMiddleware      *authorization.Middleware `json:"-"`
-	AuthorizationPlugins []string                  `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
-	AutoRestart          bool                      `json:"-"`
-	Context              map[string][]string       `json:"-"`
-	DisableBridge        bool                      `json:"-"`
-	DNS                  []string                  `json:"dns,omitempty"`
-	DNSOptions           []string                  `json:"dns-opts,omitempty"`
-	DNSSearch            []string                  `json:"dns-search,omitempty"`
-	ExecOptions          []string                  `json:"exec-opts,omitempty"`
-	GraphDriver          string                    `json:"storage-driver,omitempty"`
-	GraphOptions         []string                  `json:"storage-opts,omitempty"`
-	Labels               []string                  `json:"labels,omitempty"`
-	Mtu                  int                       `json:"mtu,omitempty"`
-	Pidfile              string                    `json:"pidfile,omitempty"`
-	RawLogs              bool                      `json:"raw-logs,omitempty"`
-	RootDeprecated       string                    `json:"graph,omitempty"`
-	Root                 string                    `json:"data-root,omitempty"`
-	ExecRoot             string                    `json:"exec-root,omitempty"`
-	SocketGroup          string                    `json:"group,omitempty"`
-	CorsHeaders          string                    `json:"api-cors-header,omitempty"`
+	AuthzMiddleware       *authorization.Middleware `json:"-"`
+	AuthorizationPlugins  []string                  `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
+	AutoRestart           bool                      `json:"-"`
+	Context               map[string][]string       `json:"-"`
+	DisableBridge         bool                      `json:"-"`
+	DNS                   []string                  `json:"dns,omitempty"`
+	DNSOptions            []string                  `json:"dns-opts,omitempty"`
+	DNSSearch             []string                  `json:"dns-search,omitempty"`
+	ExecOptions           []string                  `json:"exec-opts,omitempty"`
+	GraphDriver           string                    `json:"storage-driver,omitempty"`
+	GraphOptions          []string                  `json:"storage-opts,omitempty"`
+	Labels                []string                  `json:"labels,omitempty"`
+	Mtu                   int                       `json:"mtu,omitempty"`
+	NetworkDiagnosticPort int                       `json:"network-diagnostic-port,omitempty"`
+	Pidfile               string                    `json:"pidfile,omitempty"`
+	RawLogs               bool                      `json:"raw-logs,omitempty"`
+	RootDeprecated        string                    `json:"graph,omitempty"`
+	Root                  string                    `json:"data-root,omitempty"`
+	ExecRoot              string                    `json:"exec-root,omitempty"`
+	SocketGroup           string                    `json:"group,omitempty"`
+	CorsHeaders           string                    `json:"api-cors-header,omitempty"`
 
 	// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
 	// when pushing to a registry which does not support schema 2. This field is marked as
diff --git a/daemon/daemon.go b/daemon/daemon.go
index 96b39e8..e63e209 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -675,14 +675,14 @@
 	}
 
 	daemonRepo := filepath.Join(config.Root, "containers")
-	if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil {
 		return nil, err
 	}
 
 	// Create the directory where we'll store the runtime scripts (i.e. in
 	// order to support runtimeArgs)
 	daemonRuntimes := filepath.Join(config.Root, "runtimes")
-	if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil && !os.IsExist(err) {
+	if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil {
 		return nil, err
 	}
 	if err := d.loadRuntimes(); err != nil {
@@ -690,7 +690,7 @@
 	}
 
 	if runtime.GOOS == "windows" {
-		if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil && !os.IsExist(err) {
+		if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil {
 			return nil, err
 		}
 	}
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index 422be1f..b70e00e 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -7,6 +7,7 @@
 	"runtime"
 	"testing"
 
+	"github.com/docker/docker/api/errdefs"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/container"
 	_ "github.com/docker/docker/pkg/discovery/memory"
@@ -17,6 +18,8 @@
 	"github.com/docker/docker/volume/local"
 	"github.com/docker/docker/volume/store"
 	"github.com/docker/go-connections/nat"
+	"github.com/docker/libnetwork"
+	"github.com/pkg/errors"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -311,3 +314,12 @@
 	_, err := d.verifyContainerSettings(runtime.GOOS, &containertypes.HostConfig{Isolation: containertypes.Isolation("invalid")}, nil, false)
 	assert.EqualError(t, err, "invalid isolation 'invalid' on "+runtime.GOOS)
 }
+
+func TestFindNetworkErrorType(t *testing.T) {
+	d := Daemon{}
+	_, err := d.FindNetwork("fakeNet")
+	_, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork)
+	if !errdefs.IsNotFound(err) || !ok {
+		assert.Fail(t, "The FindNetwork method MUST always return an error that implements the NotFound interface and is ErrNoSuchNetwork")
+	}
+}
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index b0b6247..1f9885d 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -1514,7 +1514,7 @@
 
 func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
 	if sysinfoPresent && configValue != 0 {
-		if err := os.MkdirAll(path, 0755); err != nil && !os.IsExist(err) {
+		if err := os.MkdirAll(path, 0755); err != nil {
 			return err
 		}
 		if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil {
diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go
index 8545a2f..8029bbf 100644
--- a/daemon/daemon_windows.go
+++ b/daemon/daemon_windows.go
@@ -3,7 +3,6 @@
 import (
 	"context"
 	"fmt"
-	"os"
 	"path/filepath"
 	"strings"
 
@@ -485,7 +484,7 @@
 func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error {
 	config.Root = rootDir
 	// Create the root directory if it doesn't exists
-	if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil && !os.IsExist(err) {
+	if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil {
 		return err
 	}
 	return nil
diff --git a/daemon/errors.go b/daemon/errors.go
index 889261f..8e0756a 100644
--- a/daemon/errors.go
+++ b/daemon/errors.go
@@ -21,10 +21,6 @@
 	return objNotFoundError{"volume", id}
 }
 
-func networkNotFound(id string) error {
-	return objNotFoundError{"network", id}
-}
-
 type objNotFoundError struct {
 	object string
 	id     string
@@ -230,3 +226,20 @@
 	// TODO: it would be nice to get some better errors from containerd so we can return better errors here
 	return retErr
 }
+
+// TODO: cpuguy83 take care of it once the new library is ready
+type errNotFound struct{ error }
+
+func (errNotFound) NotFound() {}
+
+func (e errNotFound) Cause() error {
+	return e.error
+}
+
+// notFound is a helper to create an error of the class with the same name from any error type
+func notFound(err error) error {
+	if err == nil {
+		return nil
+	}
+	return errNotFound{err}
+}
diff --git a/daemon/exec.go b/daemon/exec.go
index afdfc9c..83b7de2 100644
--- a/daemon/exec.go
+++ b/daemon/exec.go
@@ -31,14 +31,6 @@
 	d.execCommands.Add(config.ID, config)
 }
 
-func (d *Daemon) registerExecPidUnlocked(container *container.Container, config *exec.Config) {
-	logrus.Debugf("registering pid %v for exec %v", config.Pid, config.ID)
-	// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
-	container.ExecCommands.SetPidUnlocked(config.ID, config.Pid)
-	// Storing execs in daemon for easy access via Engine API.
-	d.execCommands.SetPidUnlocked(config.ID, config.Pid)
-}
-
 // ExecExists looks up the exec instance and returns a bool if it exists or not.
 // It will also return the error produced by `getConfig`
 func (d *Daemon) ExecExists(name string) (bool, error) {
@@ -130,6 +122,7 @@
 	execConfig.Tty = config.Tty
 	execConfig.Privileged = config.Privileged
 	execConfig.User = config.User
+	execConfig.WorkingDir = config.WorkingDir
 
 	linkedEnv, err := d.setupLinkedContainers(cntr)
 	if err != nil {
@@ -139,6 +132,9 @@
 	if len(execConfig.User) == 0 {
 		execConfig.User = cntr.Config.User
 	}
+	if len(execConfig.WorkingDir) == 0 {
+		execConfig.WorkingDir = cntr.Config.WorkingDir
+	}
 
 	d.registerExecCommand(cntr, execConfig)
 
@@ -219,7 +215,7 @@
 		Args:     append([]string{ec.Entrypoint}, ec.Args...),
 		Env:      ec.Env,
 		Terminal: ec.Tty,
-		Cwd:      c.Config.WorkingDir,
+		Cwd:      ec.WorkingDir,
 	}
 	if p.Cwd == "" {
 		p.Cwd = "/"
@@ -253,7 +249,6 @@
 		return translateContainerdStartErr(ec.Entrypoint, ec.SetExitCode, err)
 	}
 	ec.Pid = systemPid
-	d.registerExecPidUnlocked(c, ec)
 	c.ExecCommands.Unlock()
 	ec.Unlock()
 
diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go
index 7aa2383..370b403 100644
--- a/daemon/exec/exec.go
+++ b/daemon/exec/exec.go
@@ -4,7 +4,7 @@
 	"runtime"
 	"sync"
 
-	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	"github.com/docker/docker/container/stream"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/pkg/stringid"
@@ -31,6 +31,7 @@
 	Tty          bool
 	Privileged   bool
 	User         string
+	WorkingDir   string
 	Env          []string
 	Pid          int
 }
@@ -43,26 +44,26 @@
 	}
 }
 
-type cio struct {
-	containerd.IO
+type rio struct {
+	cio.IO
 
 	sc *stream.Config
 }
 
-func (i *cio) Close() error {
+func (i *rio) Close() error {
 	i.IO.Close()
 
 	return i.sc.CloseStreams()
 }
 
-func (i *cio) Wait() {
+func (i *rio) Wait() {
 	i.sc.Wait()
 
 	i.IO.Wait()
 }
 
 // InitializeStdio is called by libcontainerd to connect the stdio.
-func (c *Config) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, error) {
+func (c *Config) InitializeStdio(iop *libcontainerd.IOPipe) (cio.IO, error) {
 	c.StreamConfig.CopyToPipe(iop)
 
 	if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
@@ -73,7 +74,7 @@
 		}
 	}
 
-	return &cio{IO: iop, sc: c.StreamConfig}, nil
+	return &rio{IO: iop, sc: c.StreamConfig}, nil
 }
 
 // CloseStreams closes the stdio streams for the exec
@@ -88,16 +89,14 @@
 
 // Store keeps track of the exec configurations.
 type Store struct {
-	byID  map[string]*Config
-	byPid map[int]*Config
+	byID map[string]*Config
 	sync.RWMutex
 }
 
 // NewStore initializes a new exec store.
 func NewStore() *Store {
 	return &Store{
-		byID:  make(map[string]*Config),
-		byPid: make(map[int]*Config),
+		byID: make(map[string]*Config),
 	}
 }
 
@@ -119,14 +118,6 @@
 	e.Unlock()
 }
 
-// SetPidUnlocked adds an association between a Pid and a config, it does not
-// synchronized with other operations.
-func (e *Store) SetPidUnlocked(id string, pid int) {
-	if config, ok := e.byID[id]; ok {
-		e.byPid[pid] = config
-	}
-}
-
 // Get returns an exec configuration by its id.
 func (e *Store) Get(id string) *Config {
 	e.RLock()
@@ -135,18 +126,9 @@
 	return res
 }
 
-// ByPid returns an exec configuration by its pid.
-func (e *Store) ByPid(pid int) *Config {
-	e.RLock()
-	res := e.byPid[pid]
-	e.RUnlock()
-	return res
-}
-
 // Delete removes an exec configuration from the store.
 func (e *Store) Delete(id string, pid int) {
 	e.Lock()
-	delete(e.byPid, pid)
 	delete(e.byID, id)
 	e.Unlock()
 }
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index 5a1f3d1..248b8bf 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -89,7 +89,16 @@
 		return nil, graphdriver.ErrNotSupported
 	}
 
-	fsMagic, err := graphdriver.GetFSMagic(root)
+	// Perform feature detection on /var/lib/docker/aufs if it's an existing directory.
+	// This covers situations where /var/lib/docker/aufs is a mount, and on a different
+	// filesystem than /var/lib/docker.
+	// If the path does not exist, fall back to using /var/lib/docker for feature detection.
+	testdir := root
+	if _, err := os.Stat(testdir); os.IsNotExist(err) {
+		testdir = filepath.Dir(testdir)
+	}
+
+	fsMagic, err := graphdriver.GetFSMagic(testdir)
 	if err != nil {
 		return nil, err
 	}
@@ -122,13 +131,8 @@
 	if err != nil {
 		return nil, err
 	}
-	// Create the root aufs driver dir and return
-	// if it already exists
-	// If not populate the dir structure
+	// Create the root aufs driver dir
 	if err := idtools.MkdirAllAndChown(root, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil {
-		if os.IsExist(err) {
-			return a, nil
-		}
 		return nil, err
 	}
 
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index 0dabf71..57313c9 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -51,7 +51,16 @@
 // An error is returned if BTRFS is not supported.
 func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
 
-	fsMagic, err := graphdriver.GetFSMagic(home)
+	// Perform feature detection on /var/lib/docker/btrfs if it's an existing directory.
+	// This covers situations where /var/lib/docker/btrfs is a mount, and on a different
+	// filesystem than /var/lib/docker.
+	// If the path does not exist, fall back to using /var/lib/docker for feature detection.
+	testdir := home
+	if _, err := os.Stat(testdir); os.IsNotExist(err) {
+		testdir = filepath.Dir(testdir)
+	}
+
+	fsMagic, err := graphdriver.GetFSMagic(testdir)
 	if err != nil {
 		return nil, err
 	}
diff --git a/daemon/graphdriver/copy/copy.go b/daemon/graphdriver/copy/copy.go
index 8ec458d..7a98bec 100644
--- a/daemon/graphdriver/copy/copy.go
+++ b/daemon/graphdriver/copy/copy.go
@@ -11,6 +11,7 @@
 */
 import "C"
 import (
+	"container/list"
 	"fmt"
 	"io"
 	"os"
@@ -65,7 +66,7 @@
 		// as the ioctl may not have been available (therefore EINVAL)
 		if err == unix.EXDEV || err == unix.ENOSYS {
 			*copyWithFileRange = false
-		} else if err != nil {
+		} else {
 			return err
 		}
 	}
@@ -106,11 +107,28 @@
 	return nil
 }
 
+type fileID struct {
+	dev uint64
+	ino uint64
+}
+
+type dirMtimeInfo struct {
+	dstPath *string
+	stat    *syscall.Stat_t
+}
+
 // DirCopy copies or hardlinks the contents of one directory to another,
 // properly handling xattrs, and soft links
-func DirCopy(srcDir, dstDir string, copyMode Mode) error {
+//
+// Copying xattrs can be opted out of by passing false for copyXattrs.
+func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
 	copyWithFileRange := true
 	copyWithFileClone := true
+
+	// This is a map of source file inodes to dst file paths
+	copiedFiles := make(map[fileID]string)
+
+	dirsToSetMtimes := list.New()
 	err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
 		if err != nil {
 			return err
@@ -136,15 +154,21 @@
 
 		switch f.Mode() & os.ModeType {
 		case 0: // Regular file
+			id := fileID{dev: stat.Dev, ino: stat.Ino}
 			if copyMode == Hardlink {
 				isHardlink = true
 				if err2 := os.Link(srcPath, dstPath); err2 != nil {
 					return err2
 				}
+			} else if hardLinkDstPath, ok := copiedFiles[id]; ok {
+				if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
+					return err2
+				}
 			} else {
 				if err2 := copyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil {
 					return err2
 				}
+				copiedFiles[id] = dstPath
 			}
 
 		case os.ModeDir:
@@ -192,16 +216,10 @@
 			return err
 		}
 
-		if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
-			return err
-		}
-
-		// We need to copy this attribute if it appears in an overlay upper layer, as
-		// this function is used to copy those. It is set by overlay if a directory
-		// is removed and then re-created and should not inherit anything from the
-		// same dir in the lower dir.
-		if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
-			return err
+		if copyXattrs {
+			if err := doCopyXattrs(srcPath, dstPath); err != nil {
+				return err
+			}
 		}
 
 		isSymlink := f.Mode()&os.ModeSymlink != 0
@@ -216,7 +234,9 @@
 
 		// system.Chtimes doesn't support a NOFOLLOW flag atm
 		// nolint: unconvert
-		if !isSymlink {
+		if f.IsDir() {
+			dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
+		} else if !isSymlink {
 			aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
 			mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
 			if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
@@ -230,5 +250,31 @@
 		}
 		return nil
 	})
-	return err
+	if err != nil {
+		return err
+	}
+	for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
+		mtimeInfo := e.Value.(*dirMtimeInfo)
+		ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
+		if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func doCopyXattrs(srcPath, dstPath string) error {
+	if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+		return err
+	}
+
+	// We need to copy this attribute if it appears in an overlay upper layer, as
+	// this function is used to copy those. It is set by overlay if a directory
+	// is removed and then re-created and should not inherit anything from the
+	// same dir in the lower dir.
+	if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
+		return err
+	}
+	return nil
 }
diff --git a/daemon/graphdriver/copy/copy_test.go b/daemon/graphdriver/copy/copy_test.go
index 6976503..d216991 100644
--- a/daemon/graphdriver/copy/copy_test.go
+++ b/daemon/graphdriver/copy/copy_test.go
@@ -3,15 +3,20 @@
 package copy
 
 import (
+	"fmt"
 	"io/ioutil"
 	"math/rand"
 	"os"
 	"path/filepath"
+	"syscall"
 	"testing"
+	"time"
 
 	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/docker/docker/pkg/system"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
+	"golang.org/x/sys/unix"
 )
 
 func TestIsCopyFileRangeSyscallAvailable(t *testing.T) {
@@ -45,6 +50,84 @@
 	doCopyTest(t, &copyWithFileRange, &copyWithFileClone)
 }
 
+func TestCopyDir(t *testing.T) {
+	srcDir, err := ioutil.TempDir("", "srcDir")
+	require.NoError(t, err)
+	populateSrcDir(t, srcDir, 3)
+
+	dstDir, err := ioutil.TempDir("", "testdst")
+	require.NoError(t, err)
+	defer os.RemoveAll(dstDir)
+
+	assert.NoError(t, DirCopy(srcDir, dstDir, Content, false))
+	require.NoError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		relPath, err := filepath.Rel(srcDir, srcPath)
+		require.NoError(t, err)
+		if relPath == "." {
+			return nil
+		}
+
+		dstPath := filepath.Join(dstDir, relPath)
+		require.NoError(t, err)
+
+		// If we add non-regular dirs and files to the test
+		// then we need to add more checks here.
+		dstFileInfo, err := os.Lstat(dstPath)
+		require.NoError(t, err)
+
+		srcFileSys := f.Sys().(*syscall.Stat_t)
+		dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t)
+
+		t.Log(relPath)
+		if srcFileSys.Dev == dstFileSys.Dev {
+			assert.NotEqual(t, srcFileSys.Ino, dstFileSys.Ino)
+		}
+		// Todo: check size, and ctim is not equal
+		/// on filesystems that have granular ctimes
+		assert.Equal(t, srcFileSys.Mode, dstFileSys.Mode)
+		assert.Equal(t, srcFileSys.Uid, dstFileSys.Uid)
+		assert.Equal(t, srcFileSys.Gid, dstFileSys.Gid)
+		assert.Equal(t, srcFileSys.Mtim, dstFileSys.Mtim)
+
+		return nil
+	}))
+}
+
+func randomMode(baseMode int) os.FileMode {
+	for i := 0; i < 7; i++ {
+		baseMode = baseMode | (1&rand.Intn(2))<<uint(i)
+	}
+	return os.FileMode(baseMode)
+}
+
+func populateSrcDir(t *testing.T, srcDir string, remainingDepth int) {
+	if remainingDepth == 0 {
+		return
+	}
+	aTime := time.Unix(rand.Int63(), 0)
+	mTime := time.Unix(rand.Int63(), 0)
+
+	for i := 0; i < 10; i++ {
+		dirName := filepath.Join(srcDir, fmt.Sprintf("srcdir-%d", i))
+		// Owner all bits set
+		require.NoError(t, os.Mkdir(dirName, randomMode(0700)))
+		populateSrcDir(t, dirName, remainingDepth-1)
+		require.NoError(t, system.Chtimes(dirName, aTime, mTime))
+	}
+
+	for i := 0; i < 10; i++ {
+		fileName := filepath.Join(srcDir, fmt.Sprintf("srcfile-%d", i))
+		// Owner read bit set
+		require.NoError(t, ioutil.WriteFile(fileName, []byte{}, randomMode(0400)))
+		require.NoError(t, system.Chtimes(fileName, aTime, mTime))
+	}
+}
+
 func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) {
 	dir, err := ioutil.TempDir("", "docker-copy-check")
 	require.NoError(t, err)
@@ -65,3 +148,32 @@
 	require.NoError(t, err)
 	assert.Equal(t, buf, readBuf)
 }
+
+func TestCopyHardlink(t *testing.T) {
+	var srcFile1FileInfo, srcFile2FileInfo, dstFile1FileInfo, dstFile2FileInfo unix.Stat_t
+
+	srcDir, err := ioutil.TempDir("", "srcDir")
+	require.NoError(t, err)
+	defer os.RemoveAll(srcDir)
+
+	dstDir, err := ioutil.TempDir("", "dstDir")
+	require.NoError(t, err)
+	defer os.RemoveAll(dstDir)
+
+	srcFile1 := filepath.Join(srcDir, "file1")
+	srcFile2 := filepath.Join(srcDir, "file2")
+	dstFile1 := filepath.Join(dstDir, "file1")
+	dstFile2 := filepath.Join(dstDir, "file2")
+	require.NoError(t, ioutil.WriteFile(srcFile1, []byte{}, 0777))
+	require.NoError(t, os.Link(srcFile1, srcFile2))
+
+	assert.NoError(t, DirCopy(srcDir, dstDir, Content, false))
+
+	require.NoError(t, unix.Stat(srcFile1, &srcFile1FileInfo))
+	require.NoError(t, unix.Stat(srcFile2, &srcFile2FileInfo))
+	require.Equal(t, srcFile1FileInfo.Ino, srcFile2FileInfo.Ino)
+
+	require.NoError(t, unix.Stat(dstFile1, &dstFile1FileInfo))
+	require.NoError(t, unix.Stat(dstFile2, &dstFile2FileInfo))
+	assert.Equal(t, dstFile1FileInfo.Ino, dstFile2FileInfo.Ino)
+}
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 160ba46..db41f05 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -268,7 +268,7 @@
 	if err != nil {
 		return "", err
 	}
-	if err := idtools.MkdirAllAndChown(dirname, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAllAndChown(dirname, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil {
 		return "", err
 	}
 
@@ -1697,10 +1697,10 @@
 	if err != nil {
 		return err
 	}
-	if err := idtools.MkdirAndChown(devices.root, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAndChown(devices.root, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil {
 		return err
 	}
-	if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) {
+	if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil {
 		return err
 	}
 
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index 288f501..707094a 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -189,7 +189,7 @@
 	}
 
 	// Create the target directories if they don't exist
-	if err := idtools.MkdirAllAndChown(path.Join(d.home, "mnt"), 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAllAndChown(path.Join(d.home, "mnt"), 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil {
 		d.ctr.Decrement(mp)
 		return nil, err
 	}
@@ -204,7 +204,7 @@
 		return nil, err
 	}
 
-	if err := idtools.MkdirAllAndChown(rootFs, 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAllAndChown(rootFs, 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil {
 		d.ctr.Decrement(mp)
 		d.DeviceSet.UnmountDevice(id, mp)
 		return nil, err
diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go
index 7a3a0d1..ceb49b9 100644
--- a/daemon/graphdriver/driver.go
+++ b/daemon/graphdriver/driver.go
@@ -1,7 +1,6 @@
 package graphdriver
 
 import (
-	"errors"
 	"fmt"
 	"io"
 	"os"
@@ -28,13 +27,6 @@
 var (
 	// All registered drivers
 	drivers map[string]InitFunc
-
-	// ErrNotSupported returned when driver is not supported.
-	ErrNotSupported = errors.New("driver not supported")
-	// ErrPrerequisites returned when driver does not meet prerequisites.
-	ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
-	// ErrIncompatibleFS returned when file system is not supported.
-	ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
 )
 
 //CreateOpts contains optional arguments for Create() and CreateReadWrite()
@@ -248,7 +240,7 @@
 	for _, name := range list {
 		driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
 		if err != nil {
-			if isDriverNotSupported(err) {
+			if IsDriverNotSupported(err) {
 				continue
 			}
 			return nil, err
@@ -260,7 +252,7 @@
 	for name, initFunc := range drivers {
 		driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
 		if err != nil {
-			if isDriverNotSupported(err) {
+			if IsDriverNotSupported(err) {
 				continue
 			}
 			return nil, err
@@ -270,12 +262,6 @@
 	return nil, fmt.Errorf("No supported storage backend found")
 }
 
-// isDriverNotSupported returns true if the error initializing
-// the graph driver is a non-supported error.
-func isDriverNotSupported(err error) bool {
-	return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
-}
-
 // scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
 func scanPriorDrivers(root string) map[string]bool {
 	driversMap := make(map[string]bool)
@@ -283,8 +269,39 @@
 	for driver := range drivers {
 		p := filepath.Join(root, driver)
 		if _, err := os.Stat(p); err == nil && driver != "vfs" {
-			driversMap[driver] = true
+			if !isEmptyDir(p) {
+				driversMap[driver] = true
+			}
 		}
 	}
 	return driversMap
 }
+
+// IsInitialized checks if the driver's home-directory exists and is non-empty.
+func IsInitialized(driverHome string) bool {
+	_, err := os.Stat(driverHome)
+	if os.IsNotExist(err) {
+		return false
+	}
+	if err != nil {
+		logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err)
+	}
+	return !isEmptyDir(driverHome)
+}
+
+// isEmptyDir checks if a directory is empty. It is used to check if prior
+// storage-driver directories exist. If an error occurs, it also assumes the
+// directory is not empty (which preserves the behavior _before_ this check
+// was added)
+func isEmptyDir(name string) bool {
+	f, err := os.Open(name)
+	if err != nil {
+		return false
+	}
+	defer f.Close()
+
+	if _, err = f.Readdirnames(1); err == io.EOF {
+		return true
+	}
+	return false
+}
diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go
index aa3cfc9..f59862d 100644
--- a/daemon/graphdriver/driver_linux.go
+++ b/daemon/graphdriver/driver_linux.go
@@ -3,8 +3,6 @@
 package graphdriver
 
 import (
-	"path/filepath"
-
 	"github.com/docker/docker/pkg/mount"
 	"golang.org/x/sys/unix"
 )
@@ -82,7 +80,7 @@
 // GetFSMagic returns the filesystem id given the path.
 func GetFSMagic(rootpath string) (FsMagic, error) {
 	var buf unix.Statfs_t
-	if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
+	if err := unix.Statfs(rootpath, &buf); err != nil {
 		return 0, err
 	}
 	return FsMagic(buf.Type), nil
diff --git a/daemon/graphdriver/driver_test.go b/daemon/graphdriver/driver_test.go
new file mode 100644
index 0000000..40084be
--- /dev/null
+++ b/daemon/graphdriver/driver_test.go
@@ -0,0 +1,37 @@
+package graphdriver
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestIsEmptyDir(t *testing.T) {
+	tmp, err := ioutil.TempDir("", "test-is-empty-dir")
+	require.NoError(t, err)
+	defer os.RemoveAll(tmp)
+
+	d := filepath.Join(tmp, "empty-dir")
+	err = os.Mkdir(d, 0755)
+	require.NoError(t, err)
+	empty := isEmptyDir(d)
+	assert.True(t, empty)
+
+	d = filepath.Join(tmp, "dir-with-subdir")
+	err = os.MkdirAll(filepath.Join(d, "subdir"), 0755)
+	require.NoError(t, err)
+	empty = isEmptyDir(d)
+	assert.False(t, empty)
+
+	d = filepath.Join(tmp, "dir-with-empty-file")
+	err = os.Mkdir(d, 0755)
+	require.NoError(t, err)
+	_, err = ioutil.TempFile(d, "file")
+	require.NoError(t, err)
+	empty = isEmptyDir(d)
+	assert.False(t, empty)
+}
diff --git a/daemon/graphdriver/errors.go b/daemon/graphdriver/errors.go
new file mode 100644
index 0000000..dd52ee4
--- /dev/null
+++ b/daemon/graphdriver/errors.go
@@ -0,0 +1,36 @@
+package graphdriver
+
+const (
+	// ErrNotSupported returned when driver is not supported.
+	ErrNotSupported NotSupportedError = "driver not supported"
+	// ErrPrerequisites returned when driver does not meet prerequisites.
+	ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)"
+	// ErrIncompatibleFS returned when file system is not supported.
+	ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver"
+)
+
+// ErrUnSupported signals that the graph-driver is not supported on the current configuration
+type ErrUnSupported interface {
+	NotSupported()
+}
+
+// NotSupportedError signals that the graph-driver is not supported on the current configuration
+type NotSupportedError string
+
+func (e NotSupportedError) Error() string {
+	return string(e)
+}
+
+// NotSupported signals that a graph-driver is not supported.
+func (e NotSupportedError) NotSupported() {}
+
+// IsDriverNotSupported returns true if the error initializing
+// the graph driver is a non-supported error.
+func IsDriverNotSupported(err error) bool {
+	switch err.(type) {
+	case ErrUnSupported:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/daemon/graphdriver/graphtest/graphtest_unix.go b/daemon/graphdriver/graphtest/graphtest_unix.go
index c25d482..da9443e 100644
--- a/daemon/graphdriver/graphtest/graphtest_unix.go
+++ b/daemon/graphdriver/graphtest/graphtest_unix.go
@@ -42,7 +42,7 @@
 	d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root})
 	if err != nil {
 		t.Logf("graphdriver: %v\n", err)
-		if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS {
+		if graphdriver.IsDriverNotSupported(err) {
 			t.Skipf("Driver %s not supported", name)
 		}
 		t.Fatal(err)
diff --git a/daemon/graphdriver/lcow/lcow.go b/daemon/graphdriver/lcow/lcow.go
index 5ec8b8b..058c69f 100644
--- a/daemon/graphdriver/lcow/lcow.go
+++ b/daemon/graphdriver/lcow/lcow.go
@@ -824,7 +824,7 @@
 		return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
 	}
 
-	// TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues.
+	// TODO @jhowardmsft - the retries are temporary to overcome platform reliability issues.
 	// Obviously this will be removed as platform bugs are fixed.
 	retries := 0
 	for {
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index 853318d..bcb21d8 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -10,6 +10,7 @@
 	"os"
 	"os/exec"
 	"path"
+	"path/filepath"
 	"strconv"
 
 	"github.com/docker/docker/daemon/graphdriver"
@@ -109,15 +110,26 @@
 }
 
 // Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem.
-// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
-// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
+// If overlay filesystem is not supported on the host, the error
+// graphdriver.ErrNotSupported is returned.
+// If an overlay filesystem is not supported over an existing filesystem then
+// error graphdriver.ErrIncompatibleFS is returned.
 func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
 
 	if err := supportsOverlay(); err != nil {
 		return nil, graphdriver.ErrNotSupported
 	}
 
-	fsMagic, err := graphdriver.GetFSMagic(home)
+	// Perform feature detection on /var/lib/docker/overlay if it's an existing directory.
+	// This covers situations where /var/lib/docker/overlay is a mount, and on a different
+	// filesystem than /var/lib/docker.
+	// If the path does not exist, fall back to using /var/lib/docker for feature detection.
+	testdir := home
+	if _, err := os.Stat(testdir); os.IsNotExist(err) {
+		testdir = filepath.Dir(testdir)
+	}
+
+	fsMagic, err := graphdriver.GetFSMagic(testdir)
 	if err != nil {
 		return nil, err
 	}
@@ -126,17 +138,29 @@
 	}
 
 	switch fsMagic {
-	case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs:
+	case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs:
 		logrus.Errorf("'overlay' is not supported over %s", backingFs)
 		return nil, graphdriver.ErrIncompatibleFS
 	}
 
+	supportsDType, err := fsutils.SupportsDType(testdir)
+	if err != nil {
+		return nil, err
+	}
+	if !supportsDType {
+		if !graphdriver.IsInitialized(home) {
+			return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
+		}
+		// allow running without d_type only for existing setups (#27443)
+		logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
+	}
+
 	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
 	if err != nil {
 		return nil, err
 	}
 	// Create the driver home dir
-	if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{rootUID, rootGID}); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{rootUID, rootGID}); err != nil {
 		return nil, err
 	}
 
@@ -144,15 +168,6 @@
 		return nil, err
 	}
 
-	supportsDType, err := fsutils.SupportsDType(home)
-	if err != nil {
-		return nil, err
-	}
-	if !supportsDType {
-		// not a fatal error until v17.12 (#27443)
-		logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
-	}
-
 	d := &Driver{
 		home:          home,
 		uidMaps:       uidMaps,
@@ -199,7 +214,8 @@
 	}
 }
 
-// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data.
+// GetMetadata returns metadata about the overlay driver such as root,
+// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
 func (d *Driver) GetMetadata(id string) (map[string]string, error) {
 	dir := d.dir(id)
 	if _, err := os.Stat(dir); err != nil {
@@ -293,9 +309,6 @@
 		if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil {
 			return err
 		}
-		if err := idtools.MkdirAndChown(path.Join(dir, "merged"), 0700, root); err != nil {
-			return err
-		}
 		if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil {
 			return err
 		}
@@ -326,11 +339,8 @@
 	if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil {
 		return err
 	}
-	if err := idtools.MkdirAndChown(path.Join(dir, "merged"), 0700, root); err != nil {
-		return err
-	}
 
-	return copy.DirCopy(parentUpperDir, upperDir, copy.Content)
+	return copy.DirCopy(parentUpperDir, upperDir, copy.Content, true)
 }
 
 func (d *Driver) dir(id string) string {
@@ -357,6 +367,7 @@
 	if _, err := os.Stat(rootDir); err == nil {
 		return containerfs.NewLocalContainerFS(rootDir), nil
 	}
+
 	mergedDir := path.Join(dir, "merged")
 	if count := d.ctr.Increment(mergedDir); count > 1 {
 		return containerfs.NewLocalContainerFS(mergedDir), nil
@@ -364,7 +375,13 @@
 	defer func() {
 		if err != nil {
 			if c := d.ctr.Decrement(mergedDir); c <= 0 {
-				unix.Unmount(mergedDir, 0)
+				if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
+					logrus.Debugf("Failed to unmount %s: %v: %v", id, mntErr, err)
+				}
+				// Cleanup the created merged directory; see the comment in Put's rmdir
+				if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) {
+					logrus.Warnf("Failed to remove %s: %v: %v", id, rmErr, err)
+				}
 			}
 		}
 	}()
@@ -372,6 +389,13 @@
 	if err != nil {
 		return nil, err
 	}
+	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+	if err != nil {
+		return nil, err
+	}
+	if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.IDPair{rootUID, rootGID}); err != nil {
+		return nil, err
+	}
 	var (
 		lowerDir = path.Join(d.dir(string(lowerID)), "root")
 		upperDir = path.Join(dir, "upper")
@@ -383,10 +407,6 @@
 	}
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
 	// user namespace requires this to move a directory from lower to upper.
-	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
-	if err != nil {
-		return nil, err
-	}
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
 		return nil, err
 	}
@@ -394,6 +414,8 @@
 }
 
 // Put unmounts the mount path created for the give id.
+// It also removes the 'merged' directory to force the kernel to unmount the
+// overlay mount in other namespaces.
 func (d *Driver) Put(id string) error {
 	d.locker.Lock(id)
 	defer d.locker.Unlock(id)
@@ -408,6 +430,17 @@
 	if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
 		logrus.Debugf("Failed to unmount %s overlay: %v", id, err)
 	}
+
+	// Remove the mountpoint here. Removing the mountpoint (in newer kernels)
+	// will cause all other instances of this mount in other mount namespaces
+	// to be unmounted. This is necessary to avoid cases where an overlay mount
+	// that is present in another namespace will cause subsequent mounts
+	// operations to fail with ebusy.  We ignore any errors here because this may
+	// fail on older kernels which don't have
+	// torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied.
+	if err := unix.Rmdir(mountpoint); err != nil {
+		logrus.Debugf("Failed to remove %s overlay: %v", id, err)
+	}
 	return nil
 }
 
@@ -446,7 +479,7 @@
 		}
 	}()
 
-	if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink); err != nil {
+	if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink, true); err != nil {
 		return 0, err
 	}
 
diff --git a/daemon/graphdriver/overlay2/check.go b/daemon/graphdriver/overlay2/check.go
index f29630b..c1c4420 100644
--- a/daemon/graphdriver/overlay2/check.go
+++ b/daemon/graphdriver/overlay2/check.go
@@ -100,3 +100,35 @@
 
 	return nil
 }
+
+// supportsMultipleLowerDir checks if the system supports multiple lowerdirs,
+// which is required for the overlay2 driver. On 4.x kernels, multiple lowerdirs
+// are always available (so this check isn't needed), and backported to RHEL and
+// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect
+// support on those kernels, without doing a kernel version compare.
+func supportsMultipleLowerDir(d string) error {
+	td, err := ioutil.TempDir(d, "multiple-lowerdir-check")
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err := os.RemoveAll(td); err != nil {
+			logrus.Warnf("Failed to remove check directory %v: %v", td, err)
+		}
+	}()
+
+	for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
+		if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
+			return err
+		}
+	}
+
+	opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "lower2"), path.Join(td, "lower1"), path.Join(td, "upper"), path.Join(td, "work"))
+	if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
+		return errors.Wrap(err, "failed to mount overlay")
+	}
+	if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
+		logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
+	}
+	return nil
+}
diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go
index c2023c7..f1731ea 100644
--- a/daemon/graphdriver/overlay2/overlay.go
+++ b/daemon/graphdriver/overlay2/overlay.go
@@ -16,8 +16,6 @@
 	"strings"
 	"sync"
 
-	"github.com/sirupsen/logrus"
-
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/overlayutils"
 	"github.com/docker/docker/daemon/graphdriver/quota"
@@ -32,9 +30,9 @@
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/system"
-	units "github.com/docker/go-units"
-
+	"github.com/docker/go-units"
 	"github.com/opencontainers/selinux/go-selinux/label"
+	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
@@ -77,7 +75,7 @@
 	maxDepth   = 128
 
 	// idLength represents the number of random characters
-	// which can be used to create the unique link identifer
+	// which can be used to create the unique link identifier
 	// for every layer. If this value is too long then the
 	// page size limit for the mount command may be exceeded.
 	// The idLength should be selected such that following equation
@@ -91,7 +89,8 @@
 	quota               quota.Quota
 }
 
-// Driver contains information about the home directory and the list of active mounts that are created using this driver.
+// Driver contains information about the home directory and the list of active
+// mounts that are created using this driver.
 type Driver struct {
 	home          string
 	uidMaps       []idtools.IDMap
@@ -116,9 +115,11 @@
 	graphdriver.Register(driverName, Init)
 }
 
-// Init returns the a native diff driver for overlay filesystem.
-// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
-// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
+// Init returns the native diff driver for overlay filesystem.
+// If overlay filesystem is not supported on the host, the error
+// graphdriver.ErrNotSupported is returned.
+// If an overlay filesystem is not supported over an existing filesystem then
+// the error graphdriver.ErrIncompatibleFS is returned.
 func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
 	opts, err := parseOptions(options)
 	if err != nil {
@@ -134,14 +135,17 @@
 	if err != nil {
 		return nil, err
 	}
-	if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
-		if !opts.overrideKernelCheck {
-			return nil, graphdriver.ErrNotSupported
-		}
-		logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
+
+	// Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory.
+	// This covers situations where /var/lib/docker/overlay2 is a mount, and on a different
+	// filesystem than /var/lib/docker.
+	// If the path does not exist, fall back to using /var/lib/docker for feature detection.
+	testdir := home
+	if _, err := os.Stat(testdir); os.IsNotExist(err) {
+		testdir = filepath.Dir(testdir)
 	}
 
-	fsMagic, err := graphdriver.GetFSMagic(home)
+	fsMagic, err := graphdriver.GetFSMagic(testdir)
 	if err != nil {
 		return nil, err
 	}
@@ -149,9 +153,8 @@
 		backingFs = fsName
 	}
 
-	// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
 	switch fsMagic {
-	case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
+	case graphdriver.FsMagicAufs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs:
 		logrus.Errorf("'overlay2' is not supported over %s", backingFs)
 		return nil, graphdriver.ErrIncompatibleFS
 	case graphdriver.FsMagicBtrfs:
@@ -166,12 +169,34 @@
 		}
 	}
 
+	if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
+		if opts.overrideKernelCheck {
+			logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
+		} else {
+			if err := supportsMultipleLowerDir(testdir); err != nil {
+				logrus.Debugf("Multiple lower dirs not supported: %v", err)
+				return nil, graphdriver.ErrNotSupported
+			}
+		}
+	}
+	supportsDType, err := fsutils.SupportsDType(testdir)
+	if err != nil {
+		return nil, err
+	}
+	if !supportsDType {
+		if !graphdriver.IsInitialized(home) {
+			return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs)
+		}
+		// allow running without d_type only for existing setups (#27443)
+		logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs))
+	}
+
 	rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
 	if err != nil {
 		return nil, err
 	}
 	// Create the driver home dir
-	if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0700, idtools.IDPair{rootUID, rootGID}); err != nil && !os.IsExist(err) {
+	if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0700, idtools.IDPair{rootUID, rootGID}); err != nil {
 		return nil, err
 	}
 
@@ -179,15 +204,6 @@
 		return nil, err
 	}
 
-	supportsDType, err := fsutils.SupportsDType(home)
-	if err != nil {
-		return nil, err
-	}
-	if !supportsDType {
-		// not a fatal error until v17.12 (#27443)
-		logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs))
-	}
-
 	d := &Driver{
 		home:          home,
 		uidMaps:       uidMaps,
@@ -289,8 +305,8 @@
 	}
 }
 
-// GetMetadata returns meta data about the overlay driver such as
-// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
+// GetMetadata returns metadata about the overlay driver such as the LowerDir,
+// UpperDir, WorkDir, and MergeDir used to store data.
 func (d *Driver) GetMetadata(id string) (map[string]string, error) {
 	dir := d.dir(id)
 	if _, err := os.Stat(dir); err != nil {
@@ -414,9 +430,6 @@
 	if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil {
 		return err
 	}
-	if err := idtools.MkdirAndChown(path.Join(dir, "merged"), 0700, root); err != nil {
-		return err
-	}
 
 	lower, err := d.getLower(parent)
 	if err != nil {
@@ -545,6 +558,10 @@
 				if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
 					logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
 				}
+				// Cleanup the created merged directory; see the comment in Put's rmdir
+				if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) {
+					logrus.Debugf("Failed to remove %s: %v: %v", id, rmErr, err)
+				}
 			}
 		}
 	}()
@@ -560,6 +577,14 @@
 	mount := unix.Mount
 	mountTarget := mergedDir
 
+	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
+	if err != nil {
+		return nil, err
+	}
+	if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.IDPair{rootUID, rootGID}); err != nil {
+		return nil, err
+	}
+
 	pageSize := unix.Getpagesize()
 
 	// Go can return a larger page size than supported by the system
@@ -594,11 +619,6 @@
 
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
 	// user namespace requires this to move a directory from lower to upper.
-	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
-	if err != nil {
-		return nil, err
-	}
-
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
 		return nil, err
 	}
@@ -607,6 +627,8 @@
 }
 
 // Put unmounts the mount path created for the give id.
+// It also removes the 'merged' directory to force the kernel to unmount the
+// overlay mount in other namespaces.
 func (d *Driver) Put(id string) error {
 	d.locker.Lock(id)
 	defer d.locker.Unlock(id)
@@ -627,6 +649,16 @@
 	if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
 		logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
 	}
+	// Remove the mountpoint here. Removing the mountpoint (in newer kernels)
+	// will cause all other instances of this mount in other mount namespaces
+	// to be unmounted. This is necessary to avoid cases where an overlay mount
+	// that is present in another namespace will cause subsequent mounts
+	// operations to fail with ebusy.  We ignore any errors here because this may
+	// fail on older kernels which don't have
+	// torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied.
+	if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
+		logrus.Debugf("Failed to remove %s overlay: %v", id, err)
+	}
 	return nil
 }
 
@@ -636,7 +668,8 @@
 	return err == nil
 }
 
-// isParent returns if the passed in parent is the direct parent of the passed in layer
+// isParent determines whether the given parent is the direct parent of the
+// given layer id
 func (d *Driver) isParent(id, parent string) bool {
 	lowers, err := d.getLowerDirs(id)
 	if err != nil {
@@ -711,8 +744,8 @@
 	})
 }
 
-// Changes produces a list of changes between the specified layer
-// and its parent layer. If parent is "", then all changes will be ADD changes.
+// Changes produces a list of changes between the specified layer and its
+// parent layer. If parent is "", then all changes will be ADD changes.
 func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
 	if useNaiveDiff(d.home) || !d.isParent(id, parent) {
 		return d.naiveDiff.Changes(id, parent)
diff --git a/daemon/graphdriver/overlayutils/overlayutils.go b/daemon/graphdriver/overlayutils/overlayutils.go
index 7491c34..9f71c60 100644
--- a/daemon/graphdriver/overlayutils/overlayutils.go
+++ b/daemon/graphdriver/overlayutils/overlayutils.go
@@ -3,8 +3,9 @@
 package overlayutils
 
 import (
-	"errors"
 	"fmt"
+
+	"github.com/docker/docker/daemon/graphdriver"
 )
 
 // ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
@@ -13,6 +14,7 @@
 	if backingFs == "xfs" {
 		msg += " Reformat the filesystem with ftype=1 to enable d_type support."
 	}
-	msg += " Running without d_type support will no longer be supported in Docker 17.12."
-	return errors.New(msg)
+	msg += " Backing filesystems without d_type support are not supported."
+
+	return graphdriver.NotSupportedError(msg)
 }
diff --git a/daemon/graphdriver/vfs/copy_linux.go b/daemon/graphdriver/vfs/copy_linux.go
new file mode 100644
index 0000000..a632d35
--- /dev/null
+++ b/daemon/graphdriver/vfs/copy_linux.go
@@ -0,0 +1,9 @@
+// +build linux
+
+package vfs
+
+import "github.com/docker/docker/daemon/graphdriver/copy"
+
+func dirCopy(srcDir, dstDir string) error {
+	return copy.DirCopy(srcDir, dstDir, copy.Content, false)
+}
diff --git a/daemon/graphdriver/vfs/copy_unsupported.go b/daemon/graphdriver/vfs/copy_unsupported.go
new file mode 100644
index 0000000..fcc4b69
--- /dev/null
+++ b/daemon/graphdriver/vfs/copy_unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package vfs
+
+import "github.com/docker/docker/pkg/chrootarchive"
+
+func dirCopy(srcDir, dstDir string) error {
+	return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
+}
diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go
index 610476f..a85d6a7 100644
--- a/daemon/graphdriver/vfs/driver.go
+++ b/daemon/graphdriver/vfs/driver.go
@@ -7,7 +7,6 @@
 
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/quota"
-	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/system"
@@ -16,8 +15,8 @@
 )
 
 var (
-	// CopyWithTar defines the copy method to use.
-	CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+	// CopyDir defines the copy method to use.
+	CopyDir = dirCopy
 )
 
 func init() {
@@ -133,7 +132,7 @@
 	if err != nil {
 		return fmt.Errorf("%s: %s", parent, err)
 	}
-	return CopyWithTar(parentDir.Path(), dir)
+	return CopyDir(parentDir.Path(), dir)
 }
 
 func (d *Driver) dir(id string) string {
diff --git a/daemon/health.go b/daemon/health.go
index 26ae20f..f40c0dd 100644
--- a/daemon/health.go
+++ b/daemon/health.go
@@ -254,6 +254,8 @@
 		return &cmdProbe{shell: false}
 	case "CMD-SHELL":
 		return &cmdProbe{shell: true}
+	case "NONE":
+		return nil
 	default:
 		logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID)
 		return nil
diff --git a/daemon/info.go b/daemon/info.go
index b14e7ba..bbb027e 100644
--- a/daemon/info.go
+++ b/daemon/info.go
@@ -154,24 +154,46 @@
 
 // SystemVersion returns version information about the daemon.
 func (daemon *Daemon) SystemVersion() types.Version {
-	v := types.Version{
-		Version:       dockerversion.Version,
-		GitCommit:     dockerversion.GitCommit,
-		MinAPIVersion: api.MinVersion,
-		GoVersion:     runtime.Version(),
-		Os:            runtime.GOOS,
-		Arch:          runtime.GOARCH,
-		BuildTime:     dockerversion.BuildTime,
-		Experimental:  daemon.configStore.Experimental,
-	}
-
 	kernelVersion := "<unknown>"
 	if kv, err := kernel.GetKernelVersion(); err != nil {
 		logrus.Warnf("Could not get kernel version: %v", err)
 	} else {
 		kernelVersion = kv.String()
 	}
-	v.KernelVersion = kernelVersion
+
+	v := types.Version{
+		Components: []types.ComponentVersion{
+			{
+				Name:    "Engine",
+				Version: dockerversion.Version,
+				Details: map[string]string{
+					"GitCommit":     dockerversion.GitCommit,
+					"ApiVersion":    api.DefaultVersion,
+					"MinAPIVersion": api.MinVersion,
+					"GoVersion":     runtime.Version(),
+					"Os":            runtime.GOOS,
+					"Arch":          runtime.GOARCH,
+					"BuildTime":     dockerversion.BuildTime,
+					"KernelVersion": kernelVersion,
+					"Experimental":  fmt.Sprintf("%t", daemon.configStore.Experimental),
+				},
+			},
+		},
+
+		// Populate deprecated fields for older clients
+		Version:       dockerversion.Version,
+		GitCommit:     dockerversion.GitCommit,
+		APIVersion:    api.DefaultVersion,
+		MinAPIVersion: api.MinVersion,
+		GoVersion:     runtime.Version(),
+		Os:            runtime.GOOS,
+		Arch:          runtime.GOARCH,
+		BuildTime:     dockerversion.BuildTime,
+		KernelVersion: kernelVersion,
+		Experimental:  daemon.configStore.Experimental,
+	}
+
+	v.Platform.Name = dockerversion.PlatformName
 
 	return v
 }
diff --git a/daemon/kill.go b/daemon/kill.go
index a230eaa..1292f86 100644
--- a/daemon/kill.go
+++ b/daemon/kill.go
@@ -64,6 +64,8 @@
 	container.Lock()
 	defer container.Unlock()
 
+	daemon.stopHealthchecks(container)
+
 	if !container.Running {
 		return errNotRunning(container.ID)
 	}
diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go
index 6a0653e..cde36d4 100644
--- a/daemon/logger/fluentd/fluentd.go
+++ b/daemon/logger/fluentd/fluentd.go
@@ -48,11 +48,12 @@
 	defaultRetryWait  = 1000
 	defaultMaxRetries = math.MaxInt32
 
-	addressKey      = "fluentd-address"
-	bufferLimitKey  = "fluentd-buffer-limit"
-	retryWaitKey    = "fluentd-retry-wait"
-	maxRetriesKey   = "fluentd-max-retries"
-	asyncConnectKey = "fluentd-async-connect"
+	addressKey            = "fluentd-address"
+	bufferLimitKey        = "fluentd-buffer-limit"
+	retryWaitKey          = "fluentd-retry-wait"
+	maxRetriesKey         = "fluentd-max-retries"
+	asyncConnectKey       = "fluentd-async-connect"
+	subSecondPrecisionKey = "fluentd-sub-second-precision"
 )
 
 func init() {
@@ -117,15 +118,23 @@
 		}
 	}
 
+	subSecondPrecision := false
+	if info.Config[subSecondPrecisionKey] != "" {
+		if subSecondPrecision, err = strconv.ParseBool(info.Config[subSecondPrecisionKey]); err != nil {
+			return nil, err
+		}
+	}
+
 	fluentConfig := fluent.Config{
-		FluentPort:       loc.port,
-		FluentHost:       loc.host,
-		FluentNetwork:    loc.protocol,
-		FluentSocketPath: loc.path,
-		BufferLimit:      bufferLimit,
-		RetryWait:        retryWait,
-		MaxRetry:         maxRetries,
-		AsyncConnect:     asyncConnect,
+		FluentPort:         loc.port,
+		FluentHost:         loc.host,
+		FluentNetwork:      loc.protocol,
+		FluentSocketPath:   loc.path,
+		BufferLimit:        bufferLimit,
+		RetryWait:          retryWait,
+		MaxRetry:           maxRetries,
+		AsyncConnect:       asyncConnect,
+		SubSecondPrecision: subSecondPrecision,
 	}
 
 	logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig).
@@ -183,6 +192,7 @@
 		case retryWaitKey:
 		case maxRetriesKey:
 		case asyncConnectKey:
+		case subSecondPrecisionKey:
 			// Accepted
 		default:
 			return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key)
diff --git a/daemon/logger/logentries/logentries.go b/daemon/logger/logentries/logentries.go
index e28707c..255e87d 100644
--- a/daemon/logger/logentries/logentries.go
+++ b/daemon/logger/logentries/logentries.go
@@ -50,8 +50,10 @@
 		return nil, errors.Wrap(err, "error connecting to logentries")
 	}
 	var lineOnly bool
-	if lineOnly, err = strconv.ParseBool(info.Config[lineonly]); err != nil {
-		return nil, errors.Wrap(err, "error parsing lineonly option")
+	if info.Config[lineonly] != "" {
+		if lineOnly, err = strconv.ParseBool(info.Config[lineonly]); err != nil {
+			return nil, errors.Wrap(err, "error parsing lineonly option")
+		}
 	}
 	return &logentries{
 		containerID:   info.ContainerID,
@@ -76,7 +78,7 @@
 		logger.PutMessage(msg)
 		f.writer.Println(f.tag, ts, data)
 	} else {
-		line := msg.Line
+		line := string(msg.Line)
 		logger.PutMessage(msg)
 		f.writer.Println(line)
 	}
diff --git a/daemon/logger/splunk/splunk.go b/daemon/logger/splunk/splunk.go
index 31a0487..eb80a0b 100644
--- a/daemon/logger/splunk/splunk.go
+++ b/daemon/logger/splunk/splunk.go
@@ -5,6 +5,7 @@
 import (
 	"bytes"
 	"compress/gzip"
+	"context"
 	"crypto/tls"
 	"crypto/x509"
 	"encoding/json"
@@ -63,6 +64,8 @@
 	envVarStreamChannelSize     = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE"
 )
 
+var batchSendTimeout = 30 * time.Second
+
 type splunkLoggerInterface interface {
 	logger.Logger
 	worker()
@@ -416,13 +419,18 @@
 
 func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage {
 	messagesLen := len(messages)
+
+	ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout)
+	defer cancel()
+
 	for i := 0; i < messagesLen; i += l.postMessagesBatchSize {
 		upperBound := i + l.postMessagesBatchSize
 		if upperBound > messagesLen {
 			upperBound = messagesLen
 		}
-		if err := l.tryPostMessages(messages[i:upperBound]); err != nil {
-			logrus.Error(err)
+
+		if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil {
+			logrus.WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs")
 			if messagesLen-i >= l.bufferMaximum || lastChance {
 				// If this is last chance - print them all to the daemon log
 				if lastChance {
@@ -447,7 +455,7 @@
 	return messages[:0]
 }
 
-func (l *splunkLogger) tryPostMessages(messages []*splunkMessage) error {
+func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error {
 	if len(messages) == 0 {
 		return nil
 	}
@@ -486,6 +494,7 @@
 	if err != nil {
 		return err
 	}
+	req = req.WithContext(ctx)
 	req.Header.Set("Authorization", l.auth)
 	// Tell if we are sending gzip compressed body
 	if l.gzipCompression {
diff --git a/daemon/logger/splunk/splunk_test.go b/daemon/logger/splunk/splunk_test.go
index ebf835c..9761377 100644
--- a/daemon/logger/splunk/splunk_test.go
+++ b/daemon/logger/splunk/splunk_test.go
@@ -2,8 +2,10 @@
 
 import (
 	"compress/gzip"
+	"context"
 	"fmt"
 	"os"
+	"runtime"
 	"testing"
 	"time"
 
@@ -1062,7 +1064,7 @@
 		t.Fatal("No messages should be accepted at this point")
 	}
 
-	hec.simulateServerError = false
+	hec.simulateErr(false)
 
 	for i := defaultStreamChannelSize * 2; i < defaultStreamChannelSize*4; i++ {
 		if err := loggerDriver.Log(&logger.Message{Line: []byte(fmt.Sprintf("%d", i)), Source: "stdout", Timestamp: time.Now()}); err != nil {
@@ -1110,7 +1112,7 @@
 	}
 
 	hec := NewHTTPEventCollectorMock(t)
-	hec.simulateServerError = true
+	hec.simulateErr(true)
 	go hec.Serve()
 
 	info := logger.Info{
@@ -1308,3 +1310,48 @@
 		t.Fatal(err)
 	}
 }
+
+func TestDeadlockOnBlockedEndpoint(t *testing.T) {
+	hec := NewHTTPEventCollectorMock(t)
+	go hec.Serve()
+	info := logger.Info{
+		Config: map[string]string{
+			splunkURLKey:   hec.URL(),
+			splunkTokenKey: hec.token,
+		},
+		ContainerID:        "containeriid",
+		ContainerName:      "/container_name",
+		ContainerImageID:   "contaimageid",
+		ContainerImageName: "container_image_name",
+	}
+
+	l, err := New(info)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ctx, unblock := context.WithCancel(context.Background())
+	hec.withBlock(ctx)
+	defer unblock()
+
+	batchSendTimeout = 1 * time.Second
+
+	if err := l.Log(&logger.Message{}); err != nil {
+		t.Fatal(err)
+	}
+
+	done := make(chan struct{})
+	go func() {
+		l.Close()
+		close(done)
+	}()
+
+	select {
+	case <-time.After(60 * time.Second):
+		buf := make([]byte, 1e6)
+		buf = buf[:runtime.Stack(buf, true)]
+		t.Logf("STACK DUMP: \n\n%s\n\n", string(buf))
+		t.Fatal("timeout waiting for close to finish")
+	case <-done:
+	}
+}
diff --git a/daemon/logger/splunk/splunkhecmock_test.go b/daemon/logger/splunk/splunkhecmock_test.go
index e508948..0135ac7 100644
--- a/daemon/logger/splunk/splunkhecmock_test.go
+++ b/daemon/logger/splunk/splunkhecmock_test.go
@@ -2,12 +2,14 @@
 
 import (
 	"compress/gzip"
+	"context"
 	"encoding/json"
 	"fmt"
 	"io"
 	"io/ioutil"
 	"net"
 	"net/http"
+	"sync"
 	"testing"
 )
 
@@ -29,8 +31,10 @@
 	tcpAddr     *net.TCPAddr
 	tcpListener *net.TCPListener
 
+	mu                  sync.Mutex
 	token               string
 	simulateServerError bool
+	blockingCtx         context.Context
 
 	test *testing.T
 
@@ -55,6 +59,18 @@
 		connectionVerified:  false}
 }
 
+func (hec *HTTPEventCollectorMock) simulateErr(b bool) {
+	hec.mu.Lock()
+	hec.simulateServerError = b
+	hec.mu.Unlock()
+}
+
+func (hec *HTTPEventCollectorMock) withBlock(ctx context.Context) {
+	hec.mu.Lock()
+	hec.blockingCtx = ctx
+	hec.mu.Unlock()
+}
+
 func (hec *HTTPEventCollectorMock) URL() string {
 	return "http://" + hec.tcpListener.Addr().String()
 }
@@ -72,7 +88,16 @@
 
 	hec.numOfRequests++
 
-	if hec.simulateServerError {
+	hec.mu.Lock()
+	simErr := hec.simulateServerError
+	ctx := hec.blockingCtx
+	hec.mu.Unlock()
+
+	if ctx != nil {
+		<-hec.blockingCtx.Done()
+	}
+
+	if simErr {
 		if request.Body != nil {
 			defer request.Body.Close()
 		}
diff --git a/daemon/monitor.go b/daemon/monitor.go
index c0a265d..dd60239 100644
--- a/daemon/monitor.go
+++ b/daemon/monitor.go
@@ -39,19 +39,23 @@
 		if runtime.GOOS == "windows" {
 			return errors.New("received StateOOM from libcontainerd on Windows. This should never happen")
 		}
+
+		c.Lock()
+		defer c.Unlock()
 		daemon.updateHealthMonitor(c)
 		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
 			return err
 		}
+
 		daemon.LogContainerEvent(c, "oom")
 	case libcontainerd.EventExit:
 		if int(ei.Pid) == c.Pid {
+			c.Lock()
 			_, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID)
 			if err != nil {
 				logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID)
 			}
 
-			c.Lock()
 			c.StreamConfig.Wait()
 			c.Reset(false)
 
@@ -68,6 +72,7 @@
 				c.SetStopped(&exitStatus)
 				defer daemon.autoRemove(c)
 			}
+			defer c.Unlock() // needs to be called before autoRemove
 
 			// cancel healthcheck here, they will be automatically
 			// restarted if/when the container is started again
@@ -91,7 +96,9 @@
 						}
 					}
 					if err != nil {
+						c.Lock()
 						c.SetStopped(&exitStatus)
+						c.Unlock()
 						defer daemon.autoRemove(c)
 						if err != restartmanager.ErrRestartCanceled {
 							logrus.Errorf("restartmanger wait error: %+v", err)
@@ -101,14 +108,13 @@
 			}
 
 			daemon.setStateCounter(c)
-			defer c.Unlock()
 			if err := c.CheckpointTo(daemon.containersReplica); err != nil {
 				return err
 			}
 			return daemon.postRunProcessing(c, ei)
 		}
 
-		if execConfig := c.ExecCommands.ByPid(int(ei.Pid)); execConfig != nil {
+		if execConfig := c.ExecCommands.Get(ei.ProcessID); execConfig != nil {
 			ec := int(ei.ExitCode)
 			execConfig.Lock()
 			defer execConfig.Unlock()
@@ -125,6 +131,7 @@
 		} else {
 			logrus.WithFields(logrus.Fields{
 				"container": c.ID,
+				"exec-id":   ei.ProcessID,
 				"exec-pid":  ei.Pid,
 			}).Warnf("Ignoring Exit Event, no such exec command found")
 		}
diff --git a/daemon/network.go b/daemon/network.go
index 61548c5..573901e 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -3,6 +3,7 @@
 import (
 	"fmt"
 	"net"
+	"runtime"
 	"sort"
 	"strings"
 	"sync"
@@ -45,7 +46,9 @@
 	// 3. match by ID prefix
 	list := daemon.GetNetworksByIDPrefix(idName)
 	if len(list) == 0 {
-		return nil, errors.WithStack(networkNotFound(idName))
+		// Be very careful to change the error type here, the libnetwork.ErrNoSuchNetwork error is used by the controller
+		// to retry the creation of the network as managed through the swarm manager
+		return nil, errors.WithStack(notFound(libnetwork.ErrNoSuchNetwork(idName)))
 	}
 	if len(list) > 1 {
 		return nil, errors.WithStack(invalidIdentifier(idName))
@@ -181,21 +184,14 @@
 		// Otherwise continue down the call to create or recreate sandbox.
 	}
 
-	n, err := daemon.GetNetworkByID(create.ID)
+	_, err := daemon.GetNetworkByID(create.ID)
 	if err != nil {
 		logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
 	}
-
-	if err = daemon.createLoadBalancerSandbox("ingress", create.ID, ip, n, libnetwork.OptionIngress()); err != nil {
-		logrus.Errorf("Failed creating load balancer sandbox for ingress network: %v", err)
-	}
 }
 
 func (daemon *Daemon) releaseIngress(id string) {
 	controller := daemon.netController
-	if err := controller.SandboxDestroy("ingress-sbox"); err != nil {
-		logrus.Errorf("Failed to delete ingress sandbox: %v", err)
-	}
 
 	if id == "" {
 		return
@@ -207,13 +203,6 @@
 		return
 	}
 
-	for _, ep := range n.Endpoints() {
-		if err := ep.Delete(true); err != nil {
-			logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
-			return
-		}
-	}
-
 	if err := n.Delete(); err != nil {
 		logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
 		return
@@ -268,34 +257,6 @@
 	return resp, err
 }
 
-func (daemon *Daemon) createLoadBalancerSandbox(prefix, id string, ip net.IP, n libnetwork.Network, options ...libnetwork.SandboxOption) error {
-	c := daemon.netController
-	sandboxName := prefix + "-sbox"
-	sb, err := c.NewSandbox(sandboxName, options...)
-	if err != nil {
-		if _, ok := err.(networktypes.ForbiddenError); !ok {
-			return errors.Wrapf(err, "Failed creating %s sandbox", sandboxName)
-		}
-		return nil
-	}
-
-	endpointName := prefix + "-endpoint"
-	ep, err := n.CreateEndpoint(endpointName, libnetwork.CreateOptionIpam(ip, nil, nil, nil), libnetwork.CreateOptionLoadBalancer())
-	if err != nil {
-		return errors.Wrapf(err, "Failed creating %s in sandbox %s", endpointName, sandboxName)
-	}
-
-	if err := ep.Join(sb, nil); err != nil {
-		return errors.Wrapf(err, "Failed joining %s to sandbox %s", endpointName, sandboxName)
-	}
-
-	if err := sb.EnableService(); err != nil {
-		return errors.Wrapf(err, "Failed enabling service in %s sandbox", sandboxName)
-	}
-
-	return nil
-}
-
 func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
 	if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
 		err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
@@ -358,6 +319,15 @@
 		nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
 	}
 
+	if agent && driver == "overlay" && (create.Ingress || runtime.GOOS == "windows") {
+		nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
+		if !exists {
+			return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
+		}
+
+		nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
+	}
+
 	n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
 	if err != nil {
 		if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
@@ -373,18 +343,6 @@
 	}
 	daemon.LogNetworkEvent(n, "create")
 
-	if agent && !n.Info().Ingress() && n.Type() == "overlay" {
-		nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
-		if !exists {
-			return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
-		}
-
-		if err := daemon.createLoadBalancerSandbox(create.Name, id, nodeIP, n); err != nil {
-			return nil, err
-		}
-
-	}
-
 	return &types.NetworkCreateResponse{
 		ID:      n.ID(),
 		Warning: warning,
@@ -515,43 +473,16 @@
 	return daemon.deleteNetwork(networkID, false)
 }
 
-func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) {
-	controller := daemon.netController
-
-	//The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint")
-	endpoints := n.Endpoints()
-	if len(endpoints) == 1 {
-		sandboxName := n.Name() + "-sbox"
-
-		info := endpoints[0].Info()
-		if info != nil {
-			sb := info.Sandbox()
-			if sb != nil {
-				if err := sb.DisableService(); err != nil {
-					logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
-					//Ignore error and attempt to delete the load balancer endpoint
-				}
-			}
-		}
-
-		if err := endpoints[0].Delete(true); err != nil {
-			logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err)
-			//Ignore error and attempt to delete the sandbox.
-		}
-
-		if err := controller.SandboxDestroy(sandboxName); err != nil {
-			logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
-			//Ignore error and attempt to delete the network.
-		}
-	}
-}
-
 func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
 	nw, err := daemon.FindNetwork(networkID)
 	if err != nil {
 		return err
 	}
 
+	if nw.Info().Ingress() {
+		return nil
+	}
+
 	if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
 		err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
 		return notAllowedError{err}
@@ -567,10 +498,6 @@
 		return notAllowedError{err}
 	}
 
-	if !nw.Info().Ingress() && nw.Type() == "overlay" {
-		daemon.deleteLoadBalancerSandbox(nw)
-	}
-
 	if err := nw.Delete(); err != nil {
 		return err
 	}
diff --git a/daemon/reload.go b/daemon/reload.go
index 0d16bc8..a20eb68 100644
--- a/daemon/reload.go
+++ b/daemon/reload.go
@@ -61,6 +61,9 @@
 	if err := daemon.reloadLiveRestore(conf, attributes); err != nil {
 		return err
 	}
+	if err := daemon.reloadNetworkDiagnosticPort(conf, attributes); err != nil {
+		return err
+	}
 	return nil
 }
 
@@ -308,3 +311,18 @@
 	attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
 	return nil
 }
+
+// reloadNetworkDiagnosticPort updates the network controller starting the diagnose mode if the config is valid
+func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error {
+	if conf == nil || daemon.netController == nil {
+		return nil
+	}
+	// Enable the network diagnose if the flag is set with a valid port withing the range
+	if conf.IsValueSet("network-diagnostic-port") && conf.NetworkDiagnosticPort > 0 && conf.NetworkDiagnosticPort < 65536 {
+		logrus.Warnf("Calling the diagnostic start with %d", conf.NetworkDiagnosticPort)
+		daemon.netController.StartDiagnose(conf.NetworkDiagnosticPort)
+	} else {
+		daemon.netController.StopDiagnose()
+	}
+	return nil
+}
diff --git a/daemon/reload_test.go b/daemon/reload_test.go
index 96b1a24..03b249b 100644
--- a/daemon/reload_test.go
+++ b/daemon/reload_test.go
@@ -10,6 +10,7 @@
 	"github.com/docker/docker/pkg/discovery"
 	_ "github.com/docker/docker/pkg/discovery/memory"
 	"github.com/docker/docker/registry"
+	"github.com/docker/libnetwork"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -479,3 +480,71 @@
 		t.Fatal(e)
 	}
 }
+
+func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &config.Config{}
+
+	valuesSet := make(map[string]interface{})
+	valuesSet["network-diagnostic-port"] = 2000
+	enableConfig := &config.Config{
+		CommonConfig: config.CommonConfig{
+			NetworkDiagnosticPort: 2000,
+			ValuesSet:             valuesSet,
+		},
+	}
+	disableConfig := &config.Config{
+		CommonConfig: config.CommonConfig{},
+	}
+
+	netOptions, err := daemon.networkOptions(enableConfig, nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	controller, err := libnetwork.New(netOptions...)
+	if err != nil {
+		t.Fatal(err)
+	}
+	daemon.netController = controller
+
+	// Enable/Disable the server for some iterations
+	for i := 0; i < 10; i++ {
+		enableConfig.CommonConfig.NetworkDiagnosticPort++
+		if err := daemon.Reload(enableConfig); err != nil {
+			t.Fatal(err)
+		}
+		// Check that the diagnose is enabled
+		if !daemon.netController.IsDiagnoseEnabled() {
+			t.Fatalf("diagnosed should be enable")
+		}
+
+		// Reload
+		if err := daemon.Reload(disableConfig); err != nil {
+			t.Fatal(err)
+		}
+		// Check that the diagnose is disabled
+		if daemon.netController.IsDiagnoseEnabled() {
+			t.Fatalf("diagnosed should be disable")
+		}
+	}
+
+	enableConfig.CommonConfig.NetworkDiagnosticPort++
+	// 2 times the enable should not create problems
+	if err := daemon.Reload(enableConfig); err != nil {
+		t.Fatal(err)
+	}
+	// Check that the diagnose is enabled
+	if !daemon.netController.IsDiagnoseEnabled() {
+		t.Fatalf("diagnosed should be enable")
+	}
+
+	// Check that another reload does not cause issues
+	if err := daemon.Reload(enableConfig); err != nil {
+		t.Fatal(err)
+	}
+	// Check that the diagnose is enable
+	if !daemon.netController.IsDiagnoseEnabled() {
+		t.Fatalf("diagnosed should be enable")
+	}
+
+}
diff --git a/daemon/start_unix.go b/daemon/start_unix.go
index a8402bb..119b0a9 100644
--- a/daemon/start_unix.go
+++ b/daemon/start_unix.go
@@ -7,7 +7,7 @@
 	"os/exec"
 	"path/filepath"
 
-	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/containerd/linux/runctypes"
 	"github.com/docker/docker/container"
 	"github.com/pkg/errors"
 )
@@ -42,7 +42,7 @@
 	if err != nil {
 		return nil, err
 	}
-	opts := &runcopts.RuncOptions{
+	opts := &runctypes.RuncOptions{
 		Runtime: path,
 		RuntimeRoot: filepath.Join(daemon.configStore.ExecRoot,
 			fmt.Sprintf("runtime-%s", container.HostConfig.Runtime)),
diff --git a/daemon/stop.go b/daemon/stop.go
index 7eadba7..71d0b2a 100644
--- a/daemon/stop.go
+++ b/daemon/stop.go
@@ -43,8 +43,6 @@
 		return nil
 	}
 
-	daemon.stopHealthchecks(container)
-
 	stopSignal := container.StopSignal()
 	// 1. Send a stop signal
 	if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil {
diff --git a/distribution/errors.go b/distribution/errors.go
index dd6ff0a..355e9da 100644
--- a/distribution/errors.go
+++ b/distribution/errors.go
@@ -126,21 +126,25 @@
 
 // continueOnError returns true if we should fallback to the next endpoint
 // as a result of this error.
-func continueOnError(err error) bool {
+func continueOnError(err error, mirrorEndpoint bool) bool {
 	switch v := err.(type) {
 	case errcode.Errors:
 		if len(v) == 0 {
 			return true
 		}
-		return continueOnError(v[0])
+		return continueOnError(v[0], mirrorEndpoint)
 	case ErrNoSupport:
-		return continueOnError(v.Err)
+		return continueOnError(v.Err, mirrorEndpoint)
 	case errcode.Error:
-		return shouldV2Fallback(v)
+		return mirrorEndpoint || shouldV2Fallback(v)
 	case *client.UnexpectedHTTPResponseError:
 		return true
 	case ImageConfigPullError:
-		return false
+		// ImageConfigPullError only happens with v2 images, v1 fallback is
+		// unnecessary.
+		// Failures from a mirror endpoint should result in fallback to the
+		// canonical repo.
+		return mirrorEndpoint
 	case error:
 		return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error()))
 	}
diff --git a/distribution/errors_test.go b/distribution/errors_test.go
new file mode 100644
index 0000000..aa9ef4f
--- /dev/null
+++ b/distribution/errors_test.go
@@ -0,0 +1,85 @@
+package distribution
+
+import (
+	"errors"
+	"strings"
+	"syscall"
+	"testing"
+
+	"github.com/docker/distribution/registry/api/errcode"
+	"github.com/docker/distribution/registry/api/v2"
+	"github.com/docker/distribution/registry/client"
+)
+
+var alwaysContinue = []error{
+	&client.UnexpectedHTTPResponseError{},
+
+	// Some errcode.Errors that don't disprove the existence of a V1 image
+	errcode.Error{Code: errcode.ErrorCodeUnauthorized},
+	errcode.Error{Code: v2.ErrorCodeManifestUnknown},
+	errcode.Error{Code: v2.ErrorCodeNameUnknown},
+
+	errors.New("some totally unexpected error"),
+}
+
+var continueFromMirrorEndpoint = []error{
+	ImageConfigPullError{},
+
+	// Some other errcode.Error that doesn't indicate we should search for a V1 image.
+	errcode.Error{Code: errcode.ErrorCodeTooManyRequests},
+}
+
+var neverContinue = []error{
+	errors.New(strings.ToLower(syscall.ESRCH.Error())), // No such process
+}
+
+func TestContinueOnError_NonMirrorEndpoint(t *testing.T) {
+	for _, err := range alwaysContinue {
+		if !continueOnError(err, false) {
+			t.Errorf("Should continue from non-mirror endpoint: %T: '%s'", err, err.Error())
+		}
+	}
+
+	for _, err := range continueFromMirrorEndpoint {
+		if continueOnError(err, false) {
+			t.Errorf("Should only continue from mirror endpoint: %T: '%s'", err, err.Error())
+		}
+	}
+}
+
+func TestContinueOnError_MirrorEndpoint(t *testing.T) {
+	errs := []error{}
+	errs = append(errs, alwaysContinue...)
+	errs = append(errs, continueFromMirrorEndpoint...)
+	for _, err := range errs {
+		if !continueOnError(err, true) {
+			t.Errorf("Should continue from mirror endpoint: %T: '%s'", err, err.Error())
+		}
+	}
+}
+
+func TestContinueOnError_NeverContinue(t *testing.T) {
+	for _, isMirrorEndpoint := range []bool{true, false} {
+		for _, err := range neverContinue {
+			if continueOnError(err, isMirrorEndpoint) {
+				t.Errorf("Should never continue: %T: '%s'", err, err.Error())
+			}
+		}
+	}
+}
+
+func TestContinueOnError_UnnestsErrors(t *testing.T) {
+	// ContinueOnError should evaluate nested errcode.Errors.
+
+	// Assumes that v2.ErrorCodeNameUnknown is a continueable error code.
+	err := errcode.Errors{errcode.Error{Code: v2.ErrorCodeNameUnknown}}
+	if !continueOnError(err, false) {
+		t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors")
+	}
+
+	// Assumes that errcode.ErrorCodeTooManyRequests is not a V1-fallback indication
+	err = errcode.Errors{errcode.Error{Code: errcode.ErrorCodeTooManyRequests}}
+	if continueOnError(err, false) {
+		t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors")
+	}
+}
diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go
index c8d784c..35ff529 100644
--- a/distribution/pull_v2.go
+++ b/distribution/pull_v2.go
@@ -74,7 +74,7 @@
 		if _, ok := err.(fallbackError); ok {
 			return err
 		}
-		if continueOnError(err) {
+		if continueOnError(err, p.endpoint.Mirror) {
 			return fallbackError{
 				err:         err,
 				confirmedV2: p.confirmedV2,
diff --git a/distribution/push_v2.go b/distribution/push_v2.go
index 7ffce5b..2aecc18 100644
--- a/distribution/push_v2.go
+++ b/distribution/push_v2.go
@@ -67,7 +67,7 @@
 	}
 
 	if err = p.pushV2Repository(ctx); err != nil {
-		if continueOnError(err) {
+		if continueOnError(err, p.endpoint.Mirror) {
 			return fallbackError{
 				err:         err,
 				confirmedV2: p.pushState.confirmedV2,
diff --git a/dockerversion/version_lib.go b/dockerversion/version_lib.go
index 33f77d3..72f4893 100644
--- a/dockerversion/version_lib.go
+++ b/dockerversion/version_lib.go
@@ -13,4 +13,5 @@
 	ContainerdCommitID string = "library-import"
 	RuncCommitID       string = "library-import"
 	InitCommitID       string = "library-import"
+	PlatformName       string = ""
 )
diff --git a/docs/contributing/set-up-dev-env.md b/docs/contributing/set-up-dev-env.md
index acd6888..28bea5b 100644
--- a/docs/contributing/set-up-dev-env.md
+++ b/docs/contributing/set-up-dev-env.md
@@ -10,8 +10,7 @@
 You use the `moby/moby` repository and its `Dockerfile` to create a Docker image,
 run a Docker container, and develop code in the container.
 
-If you followed the procedures that <a href="/opensource/project/set-up-git/" target="_blank">
-set up Git for contributing</a>, you should have a fork of the `moby/moby`
+If you followed the procedures that [set up Git for contributing](./set-up-git.md), you should have a fork of the `moby/moby`
 repository. You also created a branch called `dry-run-test`. In this section,
 you continue working with your fork on this branch.
 
@@ -106,8 +105,7 @@
    ```
 
    If you are following along with this guide, you created a `dry-run-test`
-   branch when you <a href="/opensource/project/set-up-git/" target="_blank">
-   set up Git for contributing</a>.
+   branch when you [set up Git for contributing](./set-up-git.md).
 
 3. Ensure you are on your `dry-run-test` branch.
 
@@ -132,14 +130,14 @@
    ```none
    Successfully built 3d872560918e
    docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/moby/moby/bundles" -t "docker-dev:dry-run-test" bash
-   root@f31fa223770f:/go/src/github.com/moby/moby#
+   root@f31fa223770f:/go/src/github.com/docker/docker#
    ```
 
    At this point, your prompt reflects the container's BASH shell.
 
 5. List the contents of the current directory (`/go/src/github.com/moby/moby`).
 
-   You should see the image's source from the  `/go/src/github.com/moby/moby`
+   You should see the image's source from the  `/go/src/github.com/docker/docker`
    directory.
 
    ![List example](images/list_example.png)
@@ -147,7 +145,7 @@
 6. Make a `dockerd` binary.
 
    ```none
-   root@a8b2885ab900:/go/src/github.com/moby/moby# hack/make.sh binary
+   root@a8b2885ab900:/go/src/github.com/docker/docker# hack/make.sh binary
    Removing bundles/
 
    ---> Making bundle: binary (in bundles/binary)
@@ -161,7 +159,7 @@
    `/usr/local/bin/` directory.
 
    ```none
-   root@a8b2885ab900:/go/src/github.com/moby/moby# make install
+   root@a8b2885ab900:/go/src/github.com/docker/docker# make install
    ```
 
 8. Start the Engine daemon running in the background.
@@ -190,7 +188,7 @@
 9. Inside your container, check your Docker version.
 
    ```none
-   root@5f8630b873fe:/go/src/github.com/moby/moby# docker --version
+   root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version
    Docker version 1.12.0-dev, build 6e728fb
    ```
 
@@ -201,13 +199,13 @@
 10. Run the `hello-world` image.
 
     ```none
-    root@5f8630b873fe:/go/src/github.com/moby/moby# docker run hello-world
+    root@5f8630b873fe:/go/src/github.com/docker/docker# docker run hello-world
     ```
 
 11. List the image you just downloaded.
 
     ```none
-    root@5f8630b873fe:/go/src/github.com/moby/moby# docker images
+    root@5f8630b873fe:/go/src/github.com/docker/docker# docker images
 	REPOSITORY   TAG     IMAGE ID      CREATED        SIZE
 	hello-world  latest  c54a2cc56cbb  3 months ago   1.85 kB
     ```
@@ -296,7 +294,7 @@
 10. To view your change, run the `dockerd --help` command in the docker development container shell.
 
    ```bash
-   root@b0cb4f22715d:/go/src/github.com/moby/moby# dockerd --help
+   root@b0cb4f22715d:/go/src/github.com/docker/docker# dockerd --help
 
    Usage:        dockerd COMMAND
 
diff --git a/docs/contributing/test.md b/docs/contributing/test.md
index 9a63a12..6a4c984 100644
--- a/docs/contributing/test.md
+++ b/docs/contributing/test.md
@@ -47,7 +47,7 @@
 | ---------------------- | ---------------------------------------------- |
 | `test`                 | Run the unit, integration, and docker-py tests |
 | `test-unit`            | Run just the unit tests                        |
-| `test-integration-cli` | Run the integration tests for the CLI          |
+| `test-integration`     | Run the integration tests                      |
 | `test-docker-py`       | Run the tests for the Docker API client        |
 
 Running the entire test suite on your current repository can take over half an
@@ -113,7 +113,7 @@
 3.  Run the tests using the `hack/make.sh` script.
 
     ```bash
-    root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py
+    root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py
     ```
 
     The tests run just as they did within your local host.
@@ -164,13 +164,13 @@
 your local host you can run the `TestBuild` test with this command:
 
 ```bash
-$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli
+$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration
 ```
 
 To run the same test inside your Docker development container, you do this:
 
 ```bash
-root@5f8630b873fe:/go/src/github.com/moby/moby# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli
+root@5f8630b873fe:/go/src/github.com/moby/moby# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration
 ```
 
 ## Test the Windows binary against a Linux daemon
@@ -207,14 +207,14 @@
 5.  Make the binary and run the tests:
 
     ```bash
-    $ hack/make.sh binary test-integration-cli
+    $ hack/make.sh binary test-integration
     ```
     Some tests are skipped on Windows for various reasons. You can see which
     tests were skipped by re-running the make and passing in the
    `TESTFLAGS='-test.v'` value. For example
 
     ```bash
-    $ TESTFLAGS='-test.v' hack/make.sh binary test-integration-cli
+    $ TESTFLAGS='-test.v' hack/make.sh binary test-integration
     ```
 
     Should you wish to run a single test such as one with the name
@@ -222,7 +222,7 @@
     example
 
     ```bash
-    $ TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration-cli
+    $ TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration
     ```
 
 You can now choose to make changes to the Moby source or the tests. If you
diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits
index 1a20352..abe8bfe 100644
--- a/hack/dockerfile/binaries-commits
+++ b/hack/dockerfile/binaries-commits
@@ -4,7 +4,7 @@
 
 # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
 RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
-CONTAINERD_COMMIT=v1.0.0-beta.3
+CONTAINERD_COMMIT=v1.0.0
 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
 LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
 VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
diff --git a/hack/dockerfile/install-binaries.sh b/hack/dockerfile/install-binaries.sh
index e97385e..160ff1a 100755
--- a/hack/dockerfile/install-binaries.sh
+++ b/hack/dockerfile/install-binaries.sh
@@ -48,7 +48,7 @@
 	git checkout -q "$CONTAINERD_COMMIT"
 	(
 		export GOPATH
-		make EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS="-extldflags \\\"-fno-PIC -static\\\""
+		make BUILDTAGS='static_build' EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"'
 	)
 	cp bin/containerd /usr/local/bin/docker-containerd
 	cp bin/containerd-shim /usr/local/bin/docker-containerd-shim
diff --git a/hack/integration-cli-on-swarm/README.md b/hack/integration-cli-on-swarm/README.md
index 1cea525..4f4f67d 100644
--- a/hack/integration-cli-on-swarm/README.md
+++ b/hack/integration-cli-on-swarm/README.md
@@ -12,7 +12,7 @@
 ### Worker service
 
   - Works as a funker callee
-  - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
+  - Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration` using the bind-mounted API socket (`docker.sock`)
 
 ### Client
 
diff --git a/hack/integration-cli-on-swarm/agent/worker/executor.go b/hack/integration-cli-on-swarm/agent/worker/executor.go
index 3442b09..eef80d4 100644
--- a/hack/integration-cli-on-swarm/agent/worker/executor.go
+++ b/hack/integration-cli-on-swarm/agent/worker/executor.go
@@ -43,7 +43,7 @@
 			}
 			graphdriver = info.Driver
 		}
-		// `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration-cli`)
+		// `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration`)
 		// but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work.
 		//
 		// Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs`
@@ -65,7 +65,7 @@
 				"org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.",
 			},
 			Entrypoint: []string{"hack/dind"},
-			Cmd:        []string{"hack/make.sh", "test-integration-cli"},
+			Cmd:        []string{"hack/make.sh", "test-integration"},
 		}
 		hostConfig := container.HostConfig{
 			AutoRemove: autoRemove,
diff --git a/hack/integration-cli-on-swarm/host/enumerate.go b/hack/integration-cli-on-swarm/host/enumerate.go
index 56c03e3..3354c23 100644
--- a/hack/integration-cli-on-swarm/host/enumerate.go
+++ b/hack/integration-cli-on-swarm/host/enumerate.go
@@ -26,7 +26,7 @@
 
 // enumerateTests enumerates valid `-check.f` strings for all the test functions.
 // Note that we use regexp rather than parsing Go files for performance reason.
-// (Try `TESTFLAGS=-check.list make test-integration-cli` to see the slowness of parsing)
+// (Try `TESTFLAGS=-check.list make test-integration` to see the slowness of parsing)
 // The files needs to be `gofmt`-ed
 //
 // The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`):
diff --git a/hack/make.ps1 b/hack/make.ps1
index 3380a5b..42a2b31 100644
--- a/hack/make.ps1
+++ b/hack/make.ps1
@@ -365,7 +365,7 @@
     # Run autogen if building binaries or running unit tests.
     if ($Client -or $Daemon -or $TestUnit) {
         Write-Host "INFO: Invoking autogen..."
-        Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion }
+        Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion -Platform "$env:PLATFORM" }
         Catch [Exception] { Throw $_ }
     }
 
diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen
index b68e3a7..850c3ec 100644
--- a/hack/make/.go-autogen
+++ b/hack/make/.go-autogen
@@ -18,6 +18,7 @@
 	BuildTime          string = "$BUILDTIME"
 	IAmStatic          string = "${IAMSTATIC:-true}"
 	ContainerdCommitID string = "${CONTAINERD_COMMIT}"
+	PlatformName       string = "${PLATFORM}"
 )
 
 // AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen
diff --git a/hack/make/.go-autogen.ps1 b/hack/make/.go-autogen.ps1
index 768badb..cc14e9e 100644
--- a/hack/make/.go-autogen.ps1
+++ b/hack/make/.go-autogen.ps1
@@ -14,7 +14,8 @@
 
 param(
     [Parameter(Mandatory=$true)][string]$CommitString,
-    [Parameter(Mandatory=$true)][string]$DockerVersion
+    [Parameter(Mandatory=$true)][string]$DockerVersion,
+    [Parameter(Mandatory=$false)][string]$Platform
 )
 
 $ErrorActionPreference = "Stop"
@@ -43,6 +44,7 @@
     GitCommit          string = "'+$CommitString+'"
     Version            string = "'+$DockerVersion+'"
     BuildTime          string = "'+$buildDateTime+'"
+    PlatformName       string = "'+$Platform+'"
 )
 
 // AUTOGENERATED FILE; see hack\make\.go-autogen.ps1
diff --git a/integration-cli/docker_cli_cp_to_container_test.go b/integration-cli/docker_cli_cp_to_container_test.go
index 57a850c..24c1fe2 100644
--- a/integration-cli/docker_cli_cp_to_container_test.go
+++ b/integration-cli/docker_cli_cp_to_container_test.go
@@ -2,6 +2,7 @@
 
 import (
 	"os"
+	"runtime"
 	"strings"
 
 	"github.com/docker/docker/integration-cli/checker"
@@ -35,7 +36,22 @@
 	c.Assert(os.IsNotExist(srcStatErr), checker.True)
 
 	err := runDockerCp(c, srcPath, dstPath, nil)
-	c.Assert(strings.ToLower(err.Error()), checker.Contains, strings.ToLower(srcStatErr.Error()))
+	if runtime.GOOS == "windows" {
+		// Go 1.9+ on Windows returns a different error for `os.Stat()`, see
+		// https://github.com/golang/go/commit/6144c7270e5812d9de8fb97456ee4e5ae657fcbb#diff-f63e1a4b4377b2fe0b05011db3df9599
+		//
+		// Go 1.8: CreateFile C:\not-exist: The system cannot find the file specified.
+		// Go 1.9: GetFileAttributesEx C:\not-exist: The system cannot find the file specified.
+		//
+		// Due to the CLI using a different version than the daemon, comparing the
+		// error message won't work, so just hard-code the common part here.
+		//
+		// TODO this should probably be a test in the CLI repository instead
+		c.Assert(strings.ToLower(err.Error()), checker.Contains, "cannot find the file specified")
+		c.Assert(strings.ToLower(err.Error()), checker.Contains, strings.ToLower(tmpDir))
+	} else {
+		c.Assert(strings.ToLower(err.Error()), checker.Contains, strings.ToLower(srcStatErr.Error()))
+	}
 }
 
 // Test for error when SRC ends in a trailing
diff --git a/integration-cli/docker_cli_netmode_test.go b/integration-cli/docker_cli_netmode_test.go
index abf1ff2..2b134d4 100644
--- a/integration-cli/docker_cli_netmode_test.go
+++ b/integration-cli/docker_cli_netmode_test.go
@@ -49,7 +49,7 @@
 	c.Assert(out, checker.Contains, "Invalid network mode: invalid container format container:<name|id>")
 
 	out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps")
-	c.Assert(strings.ToLower(out), checker.Contains, "no such network")
+	c.Assert(strings.ToLower(out), checker.Contains, "not found")
 }
 
 func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) {
diff --git a/integration-cli/docker_cli_plugins_test.go b/integration-cli/docker_cli_plugins_test.go
index 13ae2b0..310067e 100644
--- a/integration-cli/docker_cli_plugins_test.go
+++ b/integration-cli/docker_cli_plugins_test.go
@@ -168,8 +168,19 @@
 	defer cancel()
 
 	initialValue := "0"
+	mntSrc := "foo"
+	devPath := "/dev/bar"
+
 	err = plugin.Create(ctx, client, name, func(cfg *plugin.Config) {
 		cfg.Env = []types.PluginEnv{{Name: "DEBUG", Value: &initialValue, Settable: []string{"value"}}}
+		cfg.Mounts = []types.PluginMount{
+			{Name: "pmount1", Settable: []string{"source"}, Type: "none", Source: &mntSrc},
+			{Name: "pmount2", Settable: []string{"source"}, Type: "none"}, // Mount without source is invalid.
+		}
+		cfg.Linux.Devices = []types.PluginDevice{
+			{Name: "pdev1", Path: &devPath, Settable: []string{"path"}},
+			{Name: "pdev2", Settable: []string{"path"}}, // Device without Path is invalid.
+		}
 	})
 	c.Assert(err, checker.IsNil, check.Commentf("failed to create test plugin"))
 
@@ -180,6 +191,23 @@
 
 	env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{.Settings.Env}}", name)
 	c.Assert(strings.TrimSpace(env), checker.Equals, "[DEBUG=1]")
+
+	env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}", name)
+	c.Assert(strings.TrimSpace(env), checker.Contains, mntSrc)
+
+	dockerCmd(c, "plugin", "set", name, "pmount1.source=bar")
+
+	env, _ = dockerCmd(c, "plugin", "inspect", "-f", "{{with $mount := index .Settings.Mounts 0}}{{$mount.Source}}{{end}}", name)
+	c.Assert(strings.TrimSpace(env), checker.Contains, "bar")
+
+	out, _, err := dockerCmdWithError("plugin", "set", name, "pmount2.source=bar2")
+	c.Assert(err, checker.NotNil)
+	c.Assert(out, checker.Contains, "Plugin config has no mount source")
+
+	out, _, err = dockerCmdWithError("plugin", "set", name, "pdev2.path=/dev/bar2")
+	c.Assert(err, checker.NotNil)
+	c.Assert(out, checker.Contains, "Plugin config has no device path")
+
 }
 
 func (ps *DockerPluginSuite) TestPluginInstallArgs(c *check.C) {
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index 6dbbb67..e6c1d91 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -4284,6 +4284,17 @@
 func (s *DockerSuite) TestRunServicingContainer(c *check.C) {
 	testRequires(c, DaemonIsWindows, SameHostDaemon)
 
+	// This functionality does not exist in post-RS3 builds.
+	// Note we get the version number from the full build string, as Windows
+	// reports Windows 8 version 6.2 build 9200 from non-manifested binaries.
+	// Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724451(v=vs.85).aspx
+	v, err := kernel.GetKernelVersion()
+	c.Assert(err, checker.IsNil)
+	build, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0])
+	if build > 16299 {
+		c.Skip("Disabled on post-RS3 builds")
+	}
+
 	out := cli.DockerCmd(c, "run", "-d", testEnv.MinimalBaseImage(), "cmd", "/c", "mkdir c:\\programdata\\Microsoft\\Windows\\ContainerUpdates\\000_000_d99f45d0-ffc8-4af7-bd9c-ea6a62e035c9_200 && sc control cexecsvc 255").Combined()
 	containerID := strings.TrimSpace(out)
 	cli.WaitExited(c, containerID, 60*time.Second)
diff --git a/integration/build/build_test.go b/integration/build/build_test.go
index cbaa7dc..b447b62 100644
--- a/integration/build/build_test.go
+++ b/integration/build/build_test.go
@@ -169,3 +169,31 @@
 	assert.Equal(t, "/foo/sub2", image.Config.WorkingDir)
 	assert.Contains(t, image.Config.Env, "WHO=parent")
 }
+
+func TestBuildWithEmptyLayers(t *testing.T) {
+	dockerfile := `
+		FROM    busybox
+		COPY    1/ /target/
+		COPY    2/ /target/
+		COPY    3/ /target/
+	`
+	ctx := context.Background()
+	source := fakecontext.New(t, "",
+		fakecontext.WithDockerfile(dockerfile),
+		fakecontext.WithFile("1/a", "asdf"),
+		fakecontext.WithFile("2/a", "asdf"),
+		fakecontext.WithFile("3/a", "asdf"))
+	defer source.Close()
+
+	apiclient := testEnv.APIClient()
+	resp, err := apiclient.ImageBuild(ctx,
+		source.AsTarReader(t),
+		types.ImageBuildOptions{
+			Remove:      true,
+			ForceRemove: true,
+		})
+	require.NoError(t, err)
+	_, err = io.Copy(ioutil.Discard, resp.Body)
+	resp.Body.Close()
+	require.NoError(t, err)
+}
diff --git a/integration/container/exec_test.go b/integration/container/exec_test.go
new file mode 100644
index 0000000..22d7ec0
--- /dev/null
+++ b/integration/container/exec_test.go
@@ -0,0 +1,60 @@
+package container
+
+import (
+	"context"
+	"io/ioutil"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/api/types/network"
+	"github.com/docker/docker/api/types/strslice"
+	"github.com/docker/docker/integration/util/request"
+	"github.com/stretchr/testify/require"
+)
+
+func TestExec(t *testing.T) {
+	defer setupTest(t)()
+	ctx := context.Background()
+	client := request.NewAPIClient(t)
+
+	container, err := client.ContainerCreate(ctx,
+		&container.Config{
+			Image:      "busybox",
+			Tty:        true,
+			WorkingDir: "/root",
+			Cmd:        strslice.StrSlice([]string{"top"}),
+		},
+		&container.HostConfig{},
+		&network.NetworkingConfig{},
+		"foo",
+	)
+	require.NoError(t, err)
+	err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{})
+	require.NoError(t, err)
+
+	id, err := client.ContainerExecCreate(ctx, container.ID,
+		types.ExecConfig{
+			WorkingDir:   "/tmp",
+			Env:          strslice.StrSlice([]string{"FOO=BAR"}),
+			AttachStdout: true,
+			Cmd:          strslice.StrSlice([]string{"sh", "-c", "env"}),
+		},
+	)
+	require.NoError(t, err)
+
+	resp, err := client.ContainerExecAttach(ctx, id.ID,
+		types.ExecStartCheck{
+			Detach: false,
+			Tty:    false,
+		},
+	)
+	require.NoError(t, err)
+	defer resp.Close()
+	r, err := ioutil.ReadAll(resp.Reader)
+	require.NoError(t, err)
+	out := string(r)
+	require.NoError(t, err)
+	require.Contains(t, out, "PWD=/tmp", "exec command not running in expected /tmp working directory")
+	require.Contains(t, out, "FOO=BAR", "exec command not running with expected environment variable FOO")
+}
diff --git a/integration/image/import_test.go b/integration/image/import_test.go
index 955891f..2bb7ffb 100644
--- a/integration/image/import_test.go
+++ b/integration/image/import_test.go
@@ -5,6 +5,7 @@
 	"bytes"
 	"context"
 	"io"
+	"runtime"
 	"testing"
 
 	"github.com/docker/docker/api/types"
@@ -14,12 +15,17 @@
 
 // Ensure we don't regress on CVE-2017-14992.
 func TestImportExtremelyLargeImageWorks(t *testing.T) {
+	if runtime.GOARCH == "arm64" {
+		t.Skip("effective test will be time out")
+	}
+
 	client := request.NewAPIClient(t)
 
 	// Construct an empty tar archive with about 8GB of junk padding at the
 	// end. This should not cause any crashes (the padding should be mostly
 	// ignored).
 	var tarBuffer bytes.Buffer
+
 	tw := tar.NewWriter(&tarBuffer)
 	if err := tw.Close(); err != nil {
 		t.Fatal(err)
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
index e94185a..6cfb27e 100644
--- a/integration/service/create_test.go
+++ b/integration/service/create_test.go
@@ -11,12 +11,11 @@
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/request"
 	"github.com/gotestyourself/gotestyourself/poll"
-	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"golang.org/x/net/context"
 )
 
-func TestCreateWithLBSandbox(t *testing.T) {
+func TestCreateServiceMultipleTimes(t *testing.T) {
 	defer setupTest(t)()
 	d := newSwarm(t)
 	defer d.Stop(t)
@@ -33,9 +32,8 @@
 	require.NoError(t, err)
 	overlayID := netResp.ID
 
-	var instances uint64 = 1
+	var instances uint64 = 4
 	serviceSpec := swarmServiceSpec("TestService", instances)
-
 	serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: overlayName})
 
 	serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
@@ -56,14 +54,26 @@
 	_, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
 	require.NoError(t, err)
 
-	network, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{})
-	require.NoError(t, err)
-	assert.Contains(t, network.Containers, overlayName+"-sbox")
-
 	err = client.ServiceRemove(context.Background(), serviceID)
 	require.NoError(t, err)
 
 	poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
+	poll.WaitOn(t, noTasks(client), pollSettings)
+
+	serviceResp, err = client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
+		QueryRegistry: false,
+	})
+	require.NoError(t, err)
+
+	serviceID2 := serviceResp.ID
+	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), pollSettings)
+
+	err = client.ServiceRemove(context.Background(), serviceID2)
+	require.NoError(t, err)
+
+	poll.WaitOn(t, serviceIsRemoved(client, serviceID2), pollSettings)
+	poll.WaitOn(t, noTasks(client), pollSettings)
+
 	err = client.NetworkRemove(context.Background(), overlayID)
 	require.NoError(t, err)
 
@@ -112,6 +122,23 @@
 	}
 }
 
+func noTasks(client client.ServiceAPIClient) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		filter := filters.NewArgs()
+		tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
+			Filters: filter,
+		})
+		switch {
+		case err != nil:
+			return poll.Error(err)
+		case len(tasks) == 0:
+			return poll.Success()
+		default:
+			return poll.Continue("task count at %d waiting for 0", len(tasks))
+		}
+	}
+}
+
 func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result {
 	return func(log poll.LogT) poll.Result {
 		filter := filters.NewArgs()
diff --git a/layer/layer_test.go b/layer/layer_test.go
index 6936fae..f632d44 100644
--- a/layer/layer_test.go
+++ b/layer/layer_test.go
@@ -23,7 +23,7 @@
 func init() {
 	graphdriver.ApplyUncompressedLayer = archive.UnpackLayer
 	defaultArchiver := archive.NewDefaultArchiver()
-	vfs.CopyWithTar = defaultArchiver.CopyWithTar
+	vfs.CopyDir = defaultArchiver.CopyWithTar
 }
 
 func newVFSGraphDriver(td string) (graphdriver.Driver, error) {
diff --git a/libcontainerd/client_daemon.go b/libcontainerd/client_daemon.go
index f1b5f01..0a3502c 100644
--- a/libcontainerd/client_daemon.go
+++ b/libcontainerd/client_daemon.go
@@ -21,12 +21,14 @@
 	"google.golang.org/grpc/status"
 
 	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/api/events"
 	eventsapi "github.com/containerd/containerd/api/services/events/v1"
 	"github.com/containerd/containerd/api/types"
 	"github.com/containerd/containerd/archive"
+	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/containerd/linux/runctypes"
 	"github.com/containerd/typeurl"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/opencontainers/image-spec/specs-go/v1"
@@ -70,7 +72,7 @@
 	c.Lock()
 	defer c.Unlock()
 
-	var cio containerd.IO
+	var rio cio.IO
 	defer func() {
 		err = wrapError(err)
 	}()
@@ -81,20 +83,20 @@
 	}
 
 	defer func() {
-		if err != nil && cio != nil {
-			cio.Cancel()
-			cio.Close()
+		if err != nil && rio != nil {
+			rio.Cancel()
+			rio.Close()
 		}
 	}()
 
-	t, err := ctr.Task(ctx, func(fifos *containerd.FIFOSet) (containerd.IO, error) {
+	t, err := ctr.Task(ctx, func(fifos *cio.FIFOSet) (cio.IO, error) {
 		io, err := newIOPipe(fifos)
 		if err != nil {
 			return nil, err
 		}
 
-		cio, err = attachStdio(io)
-		return cio, err
+		rio, err = attachStdio(io)
+		return rio, err
 	})
 	if err != nil && !strings.Contains(err.Error(), "no running task found") {
 		return false, -1, err
@@ -168,7 +170,7 @@
 	var (
 		cp             *types.Descriptor
 		t              containerd.Task
-		cio            containerd.IO
+		rio            cio.IO
 		err            error
 		stdinCloseSync = make(chan struct{})
 	)
@@ -203,14 +205,14 @@
 	}
 	uid, gid := getSpecUser(spec)
 	t, err = ctr.ctr.NewTask(ctx,
-		func(id string) (containerd.IO, error) {
+		func(id string) (cio.IO, error) {
 			fifos := newFIFOSet(ctr.bundleDir, id, InitProcessName, withStdin, spec.Process.Terminal)
-			cio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio)
-			return cio, err
+			rio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio)
+			return rio, err
 		},
 		func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
 			info.Checkpoint = cp
-			info.Options = &runcopts.CreateOptions{
+			info.Options = &runctypes.CreateOptions{
 				IoUid: uint32(uid),
 				IoGid: uint32(gid),
 			}
@@ -218,9 +220,9 @@
 		})
 	if err != nil {
 		close(stdinCloseSync)
-		if cio != nil {
-			cio.Cancel()
-			cio.Close()
+		if rio != nil {
+			rio.Cancel()
+			rio.Close()
 		}
 		return -1, err
 	}
@@ -259,7 +261,7 @@
 
 	var (
 		p              containerd.Process
-		cio            containerd.IO
+		rio            cio.IO
 		err            error
 		stdinCloseSync = make(chan struct{})
 	)
@@ -268,23 +270,23 @@
 
 	defer func() {
 		if err != nil {
-			if cio != nil {
-				cio.Cancel()
-				cio.Close()
+			if rio != nil {
+				rio.Cancel()
+				rio.Close()
 			}
 			rmFIFOSet(fifos)
 		}
 	}()
 
-	p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (containerd.IO, error) {
-		cio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
-		return cio, err
+	p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
+		rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
+		return rio, err
 	})
 	if err != nil {
 		close(stdinCloseSync)
-		if cio != nil {
-			cio.Cancel()
-			cio.Close()
+		if rio != nil {
+			rio.Cancel()
+			rio.Close()
 		}
 		return -1, err
 	}
@@ -569,7 +571,7 @@
 
 // createIO creates the io to be used by a process
 // This needs to get a pointer to interface as upon closure the process may not have yet been registered
-func (c *client) createIO(fifos *containerd.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (containerd.IO, error) {
+func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (cio.IO, error) {
 	io, err := newIOPipe(fifos)
 	if err != nil {
 		return nil, err
@@ -601,12 +603,12 @@
 		})
 	}
 
-	cio, err := attachStdio(io)
+	rio, err := attachStdio(io)
 	if err != nil {
 		io.Cancel()
 		io.Close()
 	}
-	return cio, err
+	return rio, err
 }
 
 func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
@@ -679,7 +681,10 @@
 	}()
 
 	eventStream, err = c.remote.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
-		Filters: []string{"namespace==" + c.namespace + ",topic~=/tasks/.+"},
+		Filters: []string{
+			"namespace==" + c.namespace,
+			"topic~=/tasks/",
+		},
 	}, grpc.FailFast(false))
 	if err != nil {
 		return
@@ -710,21 +715,21 @@
 		c.logger.WithField("topic", ev.Topic).Debug("event")
 
 		switch t := v.(type) {
-		case *eventsapi.TaskCreate:
+		case *events.TaskCreate:
 			et = EventCreate
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
 				ProcessID:   t.ContainerID,
 				Pid:         t.Pid,
 			}
-		case *eventsapi.TaskStart:
+		case *events.TaskStart:
 			et = EventStart
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
 				ProcessID:   t.ContainerID,
 				Pid:         t.Pid,
 			}
-		case *eventsapi.TaskExit:
+		case *events.TaskExit:
 			et = EventExit
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
@@ -733,32 +738,32 @@
 				ExitCode:    t.ExitStatus,
 				ExitedAt:    t.ExitedAt,
 			}
-		case *eventsapi.TaskOOM:
+		case *events.TaskOOM:
 			et = EventOOM
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
 				OOMKilled:   true,
 			}
 			oomKilled = true
-		case *eventsapi.TaskExecAdded:
+		case *events.TaskExecAdded:
 			et = EventExecAdded
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
 				ProcessID:   t.ExecID,
 			}
-		case *eventsapi.TaskExecStarted:
+		case *events.TaskExecStarted:
 			et = EventExecStarted
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
 				ProcessID:   t.ExecID,
 				Pid:         t.Pid,
 			}
-		case *eventsapi.TaskPaused:
+		case *events.TaskPaused:
 			et = EventPaused
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
 			}
-		case *eventsapi.TaskResumed:
+		case *events.TaskResumed:
 			et = EventResumed
 			ei = EventInfo{
 				ContainerID: t.ContainerID,
diff --git a/libcontainerd/client_daemon_linux.go b/libcontainerd/client_daemon_linux.go
index 14966f0..9a98fbd 100644
--- a/libcontainerd/client_daemon_linux.go
+++ b/libcontainerd/client_daemon_linux.go
@@ -8,6 +8,7 @@
 	"strings"
 
 	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	"github.com/docker/docker/pkg/idtools"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/sirupsen/logrus"
@@ -79,8 +80,8 @@
 	return p, nil
 }
 
-func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *containerd.FIFOSet {
-	fifos := &containerd.FIFOSet{
+func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *cio.FIFOSet {
+	fifos := &cio.FIFOSet{
 		Terminal: withTerminal,
 		Out:      filepath.Join(bundleDir, processID+"-stdout"),
 	}
@@ -96,7 +97,7 @@
 	return fifos
 }
 
-func rmFIFOSet(fset *containerd.FIFOSet) {
+func rmFIFOSet(fset *cio.FIFOSet) {
 	for _, fn := range []string{fset.Out, fset.In, fset.Err} {
 		if fn != "" {
 			if err := os.RemoveAll(fn); err != nil {
diff --git a/libcontainerd/client_daemon_windows.go b/libcontainerd/client_daemon_windows.go
index 9bb5d86..10309cd 100644
--- a/libcontainerd/client_daemon_windows.go
+++ b/libcontainerd/client_daemon_windows.go
@@ -3,7 +3,7 @@
 import (
 	"fmt"
 
-	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/windows/hcsshimtypes"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
@@ -35,8 +35,8 @@
 	return fmt.Sprintf(`\\.\pipe\containerd-%s-%s-%s`, containerID, processID, name)
 }
 
-func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *containerd.FIFOSet {
-	fifos := &containerd.FIFOSet{
+func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *cio.FIFOSet {
+	fifos := &cio.FIFOSet{
 		Terminal: withTerminal,
 		Out:      pipeName(containerID, processID, "stdout"),
 	}
diff --git a/libcontainerd/client_local_windows.go b/libcontainerd/client_local_windows.go
index 8ce9dfe..0987290 100644
--- a/libcontainerd/client_local_windows.go
+++ b/libcontainerd/client_local_windows.go
@@ -934,7 +934,7 @@
 		"width":     width,
 		"pid":       p.pid,
 	}).Debug("resizing")
-	return p.hcsProcess.ResizeConsole(uint16(height), uint16(width))
+	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
 }
 
 func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
diff --git a/libcontainerd/io.go b/libcontainerd/io.go
index 2c4af58..25a894b 100644
--- a/libcontainerd/io.go
+++ b/libcontainerd/io.go
@@ -1,9 +1,9 @@
 package libcontainerd
 
-import "github.com/containerd/containerd"
+import "github.com/containerd/containerd/cio"
 
 // Config returns the containerd.IOConfig of this pipe set
-func (p *IOPipe) Config() containerd.IOConfig {
+func (p *IOPipe) Config() cio.Config {
 	return p.config
 }
 
diff --git a/libcontainerd/io_unix.go b/libcontainerd/io_unix.go
index 0c08b20..8e59791 100644
--- a/libcontainerd/io_unix.go
+++ b/libcontainerd/io_unix.go
@@ -7,12 +7,12 @@
 	"io"
 	"syscall"
 
-	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	"github.com/containerd/fifo"
 	"github.com/pkg/errors"
 )
 
-func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) {
+func newIOPipe(fifos *cio.FIFOSet) (*IOPipe, error) {
 	var (
 		err         error
 		ctx, cancel = context.WithCancel(context.Background())
@@ -20,7 +20,7 @@
 		iop         = &IOPipe{
 			Terminal: fifos.Terminal,
 			cancel:   cancel,
-			config: containerd.IOConfig{
+			config: cio.Config{
 				Terminal: fifos.Terminal,
 				Stdin:    fifos.In,
 				Stdout:   fifos.Out,
diff --git a/libcontainerd/io_windows.go b/libcontainerd/io_windows.go
index 312bdbd..f2e5a93 100644
--- a/libcontainerd/io_windows.go
+++ b/libcontainerd/io_windows.go
@@ -7,7 +7,7 @@
 	"sync"
 
 	winio "github.com/Microsoft/go-winio"
-	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	"github.com/pkg/errors"
 )
 
@@ -90,7 +90,7 @@
 	}
 }
 
-func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) {
+func newIOPipe(fifos *cio.FIFOSet) (*IOPipe, error) {
 	var (
 		err         error
 		ctx, cancel = context.WithCancel(context.Background())
@@ -98,7 +98,7 @@
 		iop         = &IOPipe{
 			Terminal: fifos.Terminal,
 			cancel:   cancel,
-			config: containerd.IOConfig{
+			config: cio.Config{
 				Terminal: fifos.Terminal,
 				Stdin:    fifos.In,
 				Stdout:   fifos.Out,
diff --git a/libcontainerd/remote_daemon_options_linux.go b/libcontainerd/remote_daemon_options_linux.go
index 1e5a981..7376c06 100644
--- a/libcontainerd/remote_daemon_options_linux.go
+++ b/libcontainerd/remote_daemon_options_linux.go
@@ -27,7 +27,7 @@
 
 func (s subreaper) Apply(r Remote) error {
 	if remote, ok := r.(*remote); ok {
-		remote.Subreaper = bool(s)
+		remote.NoSubreaper = !bool(s)
 		return nil
 	}
 	return fmt.Errorf("WithSubreaper option not supported for this remote")
diff --git a/libcontainerd/types.go b/libcontainerd/types.go
index 9eede43..346fd24 100644
--- a/libcontainerd/types.go
+++ b/libcontainerd/types.go
@@ -6,6 +6,7 @@
 	"time"
 
 	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/cio"
 	"github.com/opencontainers/runtime-spec/specs-go"
 )
 
@@ -106,7 +107,7 @@
 }
 
 // StdioCallback is called to connect a container or process stdio.
-type StdioCallback func(*IOPipe) (containerd.IO, error)
+type StdioCallback func(*IOPipe) (cio.IO, error)
 
 // IOPipe contains the stdio streams.
 type IOPipe struct {
@@ -116,7 +117,7 @@
 	Terminal bool // Whether stderr is connected on Windows
 
 	cancel context.CancelFunc
-	config containerd.IOConfig
+	config cio.Config
 }
 
 // ServerVersion contains version information as retrieved from the
diff --git a/pkg/idtools/idtools.go b/pkg/idtools/idtools.go
index 49cc97c..6108ae3 100644
--- a/pkg/idtools/idtools.go
+++ b/pkg/idtools/idtools.go
@@ -42,7 +42,9 @@
 }
 
 // MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
-// If the directory already exists, this function still changes ownership
+// If the directory already exists, this function still changes ownership.
+// Note that unlike os.Mkdir(), this function does not return IsExist error
+// in case path already exists.
 func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error {
 	return mkdirAs(path, mode, owner.UID, owner.GID, false, true)
 }
diff --git a/pkg/idtools/idtools_unix.go b/pkg/idtools/idtools_unix.go
index ff7968f..aedf8ad 100644
--- a/pkg/idtools/idtools_unix.go
+++ b/pkg/idtools/idtools_unix.go
@@ -10,6 +10,7 @@
 	"path/filepath"
 	"strings"
 	"sync"
+	"syscall"
 
 	"github.com/docker/docker/pkg/system"
 	"github.com/opencontainers/runc/libcontainer/user"
@@ -29,6 +30,9 @@
 
 	stat, err := system.Stat(path)
 	if err == nil {
+		if !stat.IsDir() {
+			return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+		}
 		if !chownExisting {
 			return nil
 		}
@@ -54,7 +58,7 @@
 				paths = append(paths, dirPath)
 			}
 		}
-		if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+		if err := system.MkdirAll(path, mode, ""); err != nil {
 			return err
 		}
 	} else {
diff --git a/pkg/idtools/idtools_unix_test.go b/pkg/idtools/idtools_unix_test.go
index afefdb3..396ff20 100644
--- a/pkg/idtools/idtools_unix_test.go
+++ b/pkg/idtools/idtools_unix_test.go
@@ -378,6 +378,20 @@
 	assert.Error(t, err)
 }
 
+// TestMkdirIsNotDir checks that mkdirAs() function (used by MkdirAll...)
+// returns a correct error in case a directory which it is about to create
+// already exists but is a file (rather than a directory).
+func TestMkdirIsNotDir(t *testing.T) {
+	file, err := ioutil.TempFile("", t.Name())
+	if err != nil {
+		t.Fatalf("Couldn't create temp dir: %v", err)
+	}
+	defer os.Remove(file.Name())
+
+	err = mkdirAs(file.Name(), 0755, 0, 0, false, false)
+	assert.EqualError(t, err, "mkdir "+file.Name()+": not a directory")
+}
+
 func RequiresRoot(t *testing.T) {
 	skip.IfCondition(t, os.Getuid() != 0, "skipping test that requires root")
 }
diff --git a/pkg/idtools/idtools_windows.go b/pkg/idtools/idtools_windows.go
index 45d2878..94ca33a 100644
--- a/pkg/idtools/idtools_windows.go
+++ b/pkg/idtools/idtools_windows.go
@@ -11,7 +11,7 @@
 // Platforms such as Windows do not support the UID/GID concept. So make this
 // just a wrapper around system.MkdirAll.
 func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
-	if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) {
+	if err := system.MkdirAll(path, mode, ""); err != nil {
 		return err
 	}
 	return nil
diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go
index 6352bc7..8c4851c 100644
--- a/pkg/namesgenerator/names-generator.go
+++ b/pkg/namesgenerator/names-generator.go
@@ -192,6 +192,9 @@
 		// Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar
 		"chandrasekhar",
 
+		// Asima Chatterjee was an indian organic chemist noted for her research on vinca alkaloids, development of drugs for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee
+		"chatterjee",
+
 		//Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon)
 		"shannon",
 
@@ -557,6 +560,9 @@
 		// Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions
 		"varahamihira",
 
+		// Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan
+		"vaughan",
+
 		// Sir Mokshagundam Visvesvaraya - is a notable Indian engineer.  He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya
 		"visvesvaraya",
 
diff --git a/pkg/system/stat_unix.go b/pkg/system/stat_unix.go
index 91c7d12..9dcec6a 100644
--- a/pkg/system/stat_unix.go
+++ b/pkg/system/stat_unix.go
@@ -47,6 +47,11 @@
 	return s.mtim
 }
 
+// IsDir reports whether s describes a directory.
+func (s StatT) IsDir() bool {
+	return s.mode&syscall.S_IFDIR != 0
+}
+
 // Stat takes a path to a file and returns
 // a system.StatT type pertaining to that file.
 //
diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go
index d93b8b7..9839467 100644
--- a/plugin/executor/containerd/containerd.go
+++ b/plugin/executor/containerd/containerd.go
@@ -6,8 +6,8 @@
 	"path/filepath"
 	"sync"
 
-	"github.com/containerd/containerd"
-	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/containerd/cio"
+	"github.com/containerd/containerd/linux/runctypes"
 	"github.com/docker/docker/api/errdefs"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/opencontainers/runtime-spec/specs-go"
@@ -46,7 +46,7 @@
 
 // Create creates a new container
 func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error {
-	opts := runcopts.RuncOptions{
+	opts := runctypes.RuncOptions{
 		RuntimeRoot: filepath.Join(e.rootDir, "runtime-root"),
 	}
 	ctx := context.Background()
@@ -110,37 +110,37 @@
 	return nil
 }
 
-type cio struct {
-	containerd.IO
+type rio struct {
+	cio.IO
 
 	wg sync.WaitGroup
 }
 
-func (c *cio) Wait() {
+func (c *rio) Wait() {
 	c.wg.Wait()
 	c.IO.Wait()
 }
 
 func attachStreamsFunc(stdout, stderr io.WriteCloser) libcontainerd.StdioCallback {
-	return func(iop *libcontainerd.IOPipe) (containerd.IO, error) {
+	return func(iop *libcontainerd.IOPipe) (cio.IO, error) {
 		if iop.Stdin != nil {
 			iop.Stdin.Close()
 			// closing stdin shouldn't be needed here, it should never be open
 			panic("plugin stdin shouldn't have been created!")
 		}
 
-		cio := &cio{IO: iop}
-		cio.wg.Add(2)
+		rio := &rio{IO: iop}
+		rio.wg.Add(2)
 		go func() {
 			io.Copy(stdout, iop.Stdout)
 			stdout.Close()
-			cio.wg.Done()
+			rio.wg.Done()
 		}()
 		go func() {
 			io.Copy(stderr, iop.Stderr)
 			stderr.Close()
-			cio.wg.Done()
+			rio.wg.Done()
 		}()
-		return cio, nil
+		return rio, nil
 	}
 }
diff --git a/plugin/manager_linux_test.go b/plugin/manager_linux_test.go
new file mode 100644
index 0000000..3259ca8
--- /dev/null
+++ b/plugin/manager_linux_test.go
@@ -0,0 +1,79 @@
+package plugin
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/plugin/v2"
+)
+
+func TestManagerWithPluginMounts(t *testing.T) {
+	root, err := ioutil.TempDir("", "test-store-with-plugin-mounts")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer system.EnsureRemoveAll(root)
+
+	s := NewStore()
+	managerRoot := filepath.Join(root, "manager")
+	p1 := newTestPlugin(t, "test1", "testcap", managerRoot)
+
+	p2 := newTestPlugin(t, "test2", "testcap", managerRoot)
+	p2.PluginObj.Enabled = true
+
+	m, err := NewManager(
+		ManagerConfig{
+			Store:          s,
+			Root:           managerRoot,
+			ExecRoot:       filepath.Join(root, "exec"),
+			CreateExecutor: func(*Manager) (Executor, error) { return nil, nil },
+			LogPluginEvent: func(_, _, _ string) {},
+		})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	if err := s.Add(p1); err != nil {
+		t.Fatal(err)
+	}
+	if err := s.Add(p2); err != nil {
+		t.Fatal(err)
+	}
+
+	// Create a mount to simulate a plugin that has created it's own mounts
+	p2Mount := filepath.Join(p2.Rootfs, "testmount")
+	if err := os.MkdirAll(p2Mount, 0755); err != nil {
+		t.Fatal(err)
+	}
+	if err := mount.Mount("tmpfs", p2Mount, "tmpfs", ""); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := m.Remove(p1.Name(), &types.PluginRmConfig{ForceRemove: true}); err != nil {
+		t.Fatal(err)
+	}
+	if mounted, err := mount.Mounted(p2Mount); !mounted || err != nil {
+		t.Fatalf("expected %s to be mounted, err: %v", p2Mount, err)
+	}
+}
+
+func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
+	rootfs := filepath.Join(root, name)
+	if err := os.MkdirAll(rootfs, 0755); err != nil {
+		t.Fatal(err)
+	}
+
+	p := v2.Plugin{PluginObj: types.Plugin{Name: name}}
+	p.Rootfs = rootfs
+	iType := types.PluginInterfaceType{Capability: cap, Prefix: "docker", Version: "1.0"}
+	i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
+	p.PluginObj.Config.Interface = i
+	p.PluginObj.ID = name
+
+	return &p
+}
diff --git a/plugin/v2/plugin.go b/plugin/v2/plugin.go
index b77536c..ce3257c 100644
--- a/plugin/v2/plugin.go
+++ b/plugin/v2/plugin.go
@@ -142,6 +142,9 @@
 				}
 
 				// it is, so lets update the settings in memory
+				if mount.Source == nil {
+					return fmt.Errorf("Plugin config has no mount source")
+				}
 				*mount.Source = s.value
 				continue next
 			}
@@ -159,6 +162,9 @@
 				}
 
 				// it is, so lets update the settings in memory
+				if device.Path == nil {
+					return fmt.Errorf("Plugin config has no device path")
+				}
 				*device.Path = s.value
 				continue next
 			}
diff --git a/vendor.conf b/vendor.conf
index 4487925..8c0b15a 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -30,7 +30,7 @@
 github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
 
 #get libnetwork packages
-github.com/docker/libnetwork 72fd7e5495eba86e28012e39b5ed63ef9ca9a97b
+github.com/docker/libnetwork 9bca9a4a220b158cc94402e0f8c2c7714eb6f503
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -42,7 +42,7 @@
 github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
 github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
@@ -79,10 +79,10 @@
 # gelf logging driver deps
 github.com/Graylog2/go-gelf v2
 
-github.com/fluent/fluent-logger-golang v1.2.1
+github.com/fluent/fluent-logger-golang v1.3.0
 # fluent-logger-golang deps
 github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
-github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad
 
 # fsnotify
 github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1
@@ -103,14 +103,15 @@
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 
 # containerd
-github.com/containerd/containerd v1.0.0-beta.3
+github.com/containerd/containerd v1.0.0
 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
 github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
-github.com/containerd/cgroups f7dd103d3e4e696aa67152f6b4ddd1779a3455a9
+github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
 github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
 github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
 github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
-github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
+github.com/dmcgowan/go-tar go1.10
+github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
 
 # cluster
 github.com/docker/swarmkit de950a7ed842c7b7e47e9451cde9bf8f96031894
diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go
index d9fa75f..d1c36bd 100644
--- a/vendor/github.com/containerd/cgroups/cgroup.go
+++ b/vendor/github.com/containerd/cgroups/cgroup.go
@@ -310,7 +310,8 @@
 }
 
 // OOMEventFD returns the memory cgroup's out of memory event fd that triggers
-// when processes inside the cgroup receive an oom event
+// when processes inside the cgroup receive an oom event. Returns
+// ErrMemoryNotSupported if memory cgroups is not supported.
 func (c *cgroup) OOMEventFD() (uintptr, error) {
 	c.mu.Lock()
 	defer c.mu.Unlock()
diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md
index 3832446..84d1eec 100644
--- a/vendor/github.com/containerd/containerd/README.md
+++ b/vendor/github.com/containerd/containerd/README.md
@@ -13,7 +13,37 @@
 
 ## Getting Started
 
-If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
+See our documentation on [containerd.io](containerd.io):
+* [for ops and admins](docs/ops.md)
+* [namespaces](docs/namespaces.md)
+* [client options](docs/client-opts.md)
+
+See how to build containerd from source at [BUILDING](BUILDING.md).
+
+If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
+
+
+## Runtime Requirements
+
+Runtime requirements for containerd are very minimal. Most interactions with
+the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
+OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
+
+There are specific features
+used by containerd core code and snapshotters that will require a minimum kernel
+version on Linux. With the understood caveat of distro kernel versioning, a
+reasonable starting point for Linux is a minimum 4.x kernel version.
+
+The overlay filesystem snapshotter, used by default, uses features that were
+finalized in the 4.x kernel series. If you choose to use btrfs, there may
+be more flexibility in kernel version (minimum recommended is 3.18), but will
+require the btrfs kernel module and btrfs tools to be installed on your Linux
+distribution.
+
+To use Linux checkpoint and restore features, you will need `criu` installed on
+your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
+
+Build requirements for developers are listed in [BUILDING](BUILDING.md).
 
 ## Features
 
@@ -23,7 +53,11 @@
 
 ```go
 
-import "github.com/containerd/containerd"
+import (
+  "github.com/containerd/containerd"
+  "github.com/containerd/containerd/cio"
+)
+
 
 func main() {
 	client, err := containerd.New("/run/containerd/containerd.sock")
@@ -39,7 +73,7 @@
 To set a namespace for requests to the API:
 
 ```go
-context    = context.Background()
+context = context.Background()
 // create a context for docker
 docker = namespaces.WithNamespace(context, "docker")
 
@@ -78,7 +112,7 @@
 You can specify options when creating a container about how to modify the specification.
 
 ```go
-redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(containerd.WithImageConfig(image)))
+redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(oci.WithImageConfig(image)))
 ```
 
 ### Root Filesystems
@@ -92,8 +126,7 @@
 // allocate a new RW root filesystem for a container based on the image
 redis, err := client.NewContainer(context, "redis-master",
 	containerd.WithNewSnapshot("redis-rootfs", image),
-	containerd.WithNewSpec(containerd.WithImageConfig(image)),
-
+	containerd.WithNewSpec(oci.WithImageConfig(image)),
 )
 
 // use a readonly filesystem with multiple containers
@@ -101,7 +134,7 @@
 	id := fmt.Sprintf("id-%s", i)
 	container, err := client.NewContainer(ctx, id,
 		containerd.WithNewSnapshotView(id, image),
-		containerd.WithNewSpec(containerd.WithImageConfig(image)),
+		containerd.WithNewSpec(oci.WithImageConfig(image)),
 	)
 }
 ```
@@ -112,7 +145,7 @@
 
 ```go
 // create a new task
-task, err := redis.NewTask(context, containerd.Stdio)
+task, err := redis.NewTask(context, cio.Stdio)
 defer task.Delete(context)
 
 // the task is now running and has a pid that can be use to setup networking
@@ -144,37 +177,12 @@
 redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
 defer container.Delete(context)
 
-task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
+task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
 defer task.Delete(context)
 
 err := task.Start(context)
 ```
 
-## Developer Quick-Start
-
-To build the daemon and `ctr` simple test client, the following build system dependencies are required:
-
-* Go 1.9.x or above
-* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
-* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency.
-
-For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.1.0 release for a 64-bit Linux host:
-
-```
-$ wget -c https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip
-$ sudo unzip protoc-3.1.0-linux-x86_64.zip -d /usr/local
-```
-
-With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
-
-> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
-> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
-> Makefile target will disable the btrfs driver within the containerd Go build.
-
-Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
-
-Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
-
 ### Releases and API Stability
 
 Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go
similarity index 79%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
rename to vendor/github.com/containerd/containerd/api/events/container.pb.go
index 420aba0..b05a402 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go
@@ -1,28 +1,22 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/events/v1/container.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/events/container.proto
 
 /*
 	Package events is a generated protocol buffer package.
 
 	It is generated from these files:
-		github.com/containerd/containerd/api/services/events/v1/container.proto
-		github.com/containerd/containerd/api/services/events/v1/content.proto
-		github.com/containerd/containerd/api/services/events/v1/events.proto
-		github.com/containerd/containerd/api/services/events/v1/image.proto
-		github.com/containerd/containerd/api/services/events/v1/namespace.proto
-		github.com/containerd/containerd/api/services/events/v1/snapshot.proto
-		github.com/containerd/containerd/api/services/events/v1/task.proto
+		github.com/containerd/containerd/api/events/container.proto
+		github.com/containerd/containerd/api/events/content.proto
+		github.com/containerd/containerd/api/events/image.proto
+		github.com/containerd/containerd/api/events/namespace.proto
+		github.com/containerd/containerd/api/events/snapshot.proto
+		github.com/containerd/containerd/api/events/task.proto
 
 	It has these top-level messages:
 		ContainerCreate
 		ContainerUpdate
 		ContainerDelete
 		ContentDelete
-		PublishRequest
-		ForwardRequest
-		SubscribeRequest
-		Envelope
 		ImageCreate
 		ImageUpdate
 		ImageDelete
@@ -49,9 +43,10 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/gogo/protobuf/types"
-import _ "github.com/containerd/containerd/protobuf/plugin"
+import google_protobuf "github.com/gogo/protobuf/types"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
 
 import github_com_containerd_typeurl "github.com/containerd/typeurl"
 
@@ -83,8 +78,8 @@
 func (*ContainerCreate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{0} }
 
 type ContainerCreate_Runtime struct {
-	Name    string                `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	Name    string               `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
 }
 
 func (m *ContainerCreate_Runtime) Reset()      { *m = ContainerCreate_Runtime{} }
@@ -113,10 +108,10 @@
 func (*ContainerDelete) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{2} }
 
 func init() {
-	proto.RegisterType((*ContainerCreate)(nil), "containerd.services.events.v1.ContainerCreate")
-	proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.services.events.v1.ContainerCreate.Runtime")
-	proto.RegisterType((*ContainerUpdate)(nil), "containerd.services.events.v1.ContainerUpdate")
-	proto.RegisterType((*ContainerDelete)(nil), "containerd.services.events.v1.ContainerDelete")
+	proto.RegisterType((*ContainerCreate)(nil), "containerd.events.ContainerCreate")
+	proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.events.ContainerCreate.Runtime")
+	proto.RegisterType((*ContainerUpdate)(nil), "containerd.events.ContainerUpdate")
+	proto.RegisterType((*ContainerDelete)(nil), "containerd.events.ContainerDelete")
 }
 
 // Field returns the value for the given fieldpath as a string, if defined.
@@ -364,24 +359,6 @@
 	return i, nil
 }
 
-func encodeFixed64Container(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Container(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintContainer(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -490,7 +467,7 @@
 	}
 	s := strings.Join([]string{`&ContainerCreate_Runtime{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
-		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -762,7 +739,7 @@
 				return io.ErrUnexpectedEOF
 			}
 			if m.Options == nil {
-				m.Options = &google_protobuf1.Any{}
+				m.Options = &google_protobuf.Any{}
 			}
 			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
@@ -902,51 +879,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContainer
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContainer
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthContainer
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowContainer
@@ -956,41 +896,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowContainer
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContainer
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthContainer
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContainer
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthContainer
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipContainer(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthContainer
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthContainer
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
@@ -1227,36 +1206,35 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/container.proto", fileDescriptorContainer)
+	proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptorContainer)
 }
 
 var fileDescriptorContainer = []byte{
-	// 427 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x5d, 0x8b, 0xd4, 0x30,
-	0x14, 0xdd, 0x74, 0xd6, 0x19, 0x4c, 0x05, 0x25, 0x0c, 0x52, 0x07, 0xac, 0xe3, 0x3c, 0x8d, 0x2f,
-	0x09, 0x3b, 0x82, 0xe8, 0x0a, 0xa2, 0xbb, 0x2b, 0x22, 0x2a, 0x48, 0xc0, 0x17, 0x11, 0x24, 0x9d,
-	0xde, 0xed, 0x04, 0xdb, 0xa4, 0xb4, 0x69, 0xa1, 0x6f, 0xfe, 0x14, 0x7f, 0xce, 0x3e, 0xfa, 0xe8,
-	0x93, 0xec, 0xf6, 0x1f, 0x08, 0xfe, 0x00, 0x69, 0x32, 0xdd, 0x2d, 0x82, 0x9f, 0x6f, 0xe7, 0xe6,
-	0x9e, 0x73, 0xef, 0x39, 0xb7, 0xc5, 0xcf, 0x12, 0x69, 0x36, 0x55, 0x44, 0xd7, 0x3a, 0x63, 0x6b,
-	0xad, 0x8c, 0x90, 0x0a, 0x8a, 0x78, 0x08, 0x45, 0x2e, 0x59, 0x09, 0x45, 0x2d, 0xd7, 0x50, 0x32,
-	0xa8, 0x41, 0x99, 0x92, 0xd5, 0x7b, 0x17, 0x0c, 0x9a, 0x17, 0xda, 0x68, 0x72, 0xf3, 0x42, 0x42,
-	0x7b, 0x3a, 0x75, 0x74, 0x5a, 0xef, 0xcd, 0xa6, 0x89, 0x4e, 0xb4, 0x65, 0xb2, 0x0e, 0x39, 0xd1,
-	0xec, 0x46, 0xa2, 0x75, 0x92, 0x02, 0xb3, 0x55, 0x54, 0x1d, 0x33, 0xa1, 0x9a, 0x6d, 0xeb, 0xf1,
-	0x1f, 0x8d, 0x9d, 0x8b, 0xf2, 0xb4, 0x4a, 0xa4, 0x62, 0xc7, 0x12, 0xd2, 0x38, 0x17, 0x66, 0xe3,
-	0x26, 0x2c, 0x4e, 0x11, 0xbe, 0x7a, 0xd8, 0xd3, 0x0f, 0x0b, 0x10, 0x06, 0xc8, 0x75, 0xec, 0xc9,
-	0x38, 0x40, 0x73, 0xb4, 0xbc, 0x7c, 0x30, 0x6e, 0xbf, 0xde, 0xf2, 0x9e, 0x1f, 0x71, 0x4f, 0xc6,
-	0x64, 0x8a, 0x2f, 0xc9, 0x4c, 0x24, 0x10, 0x78, 0x5d, 0x8b, 0xbb, 0x82, 0xbc, 0xc6, 0x93, 0xa2,
-	0x52, 0x46, 0x66, 0x10, 0x8c, 0xe6, 0x68, 0xe9, 0xaf, 0xee, 0xd1, 0xdf, 0xa6, 0xa4, 0x3f, 0xad,
-	0xa3, 0xdc, 0xa9, 0x79, 0x3f, 0x66, 0xf6, 0x0a, 0x4f, 0xb6, 0x6f, 0x84, 0xe0, 0x5d, 0x25, 0x32,
-	0x70, 0x66, 0xb8, 0xc5, 0x84, 0xe2, 0x89, 0xce, 0x8d, 0xd4, 0xaa, 0xb4, 0x46, 0xfc, 0xd5, 0x94,
-	0xba, 0x0b, 0xd1, 0x3e, 0x2c, 0x7d, 0xa2, 0x1a, 0xde, 0x93, 0x16, 0xdf, 0x86, 0x11, 0xdf, 0xe4,
-	0xf1, 0xbf, 0x47, 0xe4, 0x78, 0x9c, 0x8a, 0x08, 0xd2, 0x32, 0x18, 0xcd, 0x47, 0x4b, 0x7f, 0xb5,
-	0xff, 0xb7, 0x09, 0xdd, 0x36, 0xfa, 0xd2, 0x8a, 0x9f, 0x2a, 0x53, 0x34, 0x7c, 0x3b, 0x89, 0xdc,
-	0xc6, 0x57, 0x4a, 0x25, 0xf2, 0x72, 0xa3, 0xcd, 0xfb, 0x0f, 0xd0, 0x04, 0xbb, 0x76, 0xa1, 0xdf,
-	0xbf, 0xbd, 0x80, 0x66, 0xf6, 0x00, 0xfb, 0x03, 0x25, 0xb9, 0x86, 0x47, 0x1d, 0xd1, 0x9d, 0xa2,
-	0x83, 0x9d, 0xdb, 0x5a, 0xa4, 0xd5, 0xb9, 0x5b, 0x5b, 0xec, 0x7b, 0xf7, 0xd1, 0xe2, 0xce, 0x20,
-	0xf2, 0x11, 0xa4, 0xf0, 0xeb, 0xc8, 0x07, 0xef, 0x4e, 0xce, 0xc2, 0x9d, 0x2f, 0x67, 0xe1, 0xce,
-	0xc7, 0x36, 0x44, 0x27, 0x6d, 0x88, 0x3e, 0xb7, 0x21, 0x3a, 0x6d, 0x43, 0xf4, 0xe9, 0x7b, 0x88,
-	0xde, 0x3e, 0xfa, 0xcf, 0x5f, 0xff, 0xa1, 0x43, 0xd1, 0xd8, 0x7e, 0x93, 0xbb, 0x3f, 0x02, 0x00,
-	0x00, 0xff, 0xff, 0x26, 0x0d, 0x55, 0x1c, 0x43, 0x03, 0x00, 0x00,
+	// 413 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30,
+	0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21,
+	0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb,
+	0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71,
+	0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f,
+	0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97,
+	0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96,
+	0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c,
+	0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56,
+	0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23,
+	0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7,
+	0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06,
+	0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b,
+	0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86,
+	0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f,
+	0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b,
+	0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58,
+	0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9,
+	0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a,
+	0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89,
+	0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8,
+	0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf,
+	0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde,
+	0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b,
+	0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e,
+	0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto b/vendor/github.com/containerd/containerd/api/events/container.proto
similarity index 65%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
rename to vendor/github.com/containerd/containerd/api/events/container.proto
index ca8acb8..13aa584 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
+++ b/vendor/github.com/containerd/containerd/api/events/container.proto
@@ -1,12 +1,12 @@
 syntax = "proto3";
 
-package containerd.services.events.v1;
+package containerd.events;
 
-import "gogoproto/gogo.proto";
 import "google/protobuf/any.proto";
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+import weak "gogoproto/gogo.proto";
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
 
-option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option go_package = "github.com/containerd/containerd/api/events;events";
 option (containerd.plugin.fieldpath_all) = true;
 
 message ContainerCreate {
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go
similarity index 73%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
rename to vendor/github.com/containerd/containerd/api/events/content.pb.go
index 6fc5d61..87648d1 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go
@@ -1,14 +1,14 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/events/v1/content.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/events/content.proto
 
 package events
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import _ "github.com/containerd/containerd/protobuf/plugin"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
 
 import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
 
@@ -31,7 +31,7 @@
 func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
 
 func init() {
-	proto.RegisterType((*ContentDelete)(nil), "containerd.services.events.v1.ContentDelete")
+	proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete")
 }
 
 // Field returns the value for the given fieldpath as a string, if defined.
@@ -71,24 +71,6 @@
 	return i, nil
 }
 
-func encodeFixed64Content(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Content(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -324,25 +306,24 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/content.proto", fileDescriptorContent)
+	proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptorContent)
 }
 
 var fileDescriptorContent = []byte{
-	// 242 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9,
+	// 228 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
-	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0x82, 0x55, 0xa4, 0xe6, 0x95, 0xe8, 0x15,
-	0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x34, 0xe8, 0xc1, 0x14, 0xeb, 0x41, 0x14, 0xeb, 0x95,
-	0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x52, 0x0e,
-	0x04, 0xed, 0x06, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x4f,
-	0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0, 0x14, 0xcd, 0xc5, 0xeb, 0x0c,
-	0x71, 0x87, 0x4b, 0x6a, 0x4e, 0x6a, 0x49, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a,
-	0x71, 0x89, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee,
-	0xc9, 0x6b, 0x21, 0x59, 0x95, 0x5f, 0x90, 0x9a, 0x07, 0xb7, 0xa3, 0x58, 0x3f, 0x3d, 0x5f, 0x17,
-	0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41, 0x4d, 0x70, 0x8a, 0x39, 0xf1, 0x50, 0x8e, 0xe1, 0xc6,
-	0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8,
-	0xe0, 0x91, 0x1c, 0xe3, 0x82, 0x2f, 0x72, 0x8c, 0x51, 0x76, 0x64, 0x06, 0x9c, 0x35, 0x84, 0x95,
-	0xc4, 0x06, 0xf6, 0x81, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x51, 0xce, 0xec, 0x89, 0x81, 0x01,
-	0x00, 0x00,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53,
+	0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4,
+	0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b,
+	0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53,
+	0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76,
+	0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91,
+	0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d,
+	0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8,
+	0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7,
+	0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92,
+	0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11,
+	0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee,
+	0x61, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/events/content.proto b/vendor/github.com/containerd/containerd/api/events/content.proto
new file mode 100644
index 0000000..aba5071
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/events/content.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+
+package containerd.events;
+
+import weak "gogoproto/gogo.proto";
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/events;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message ContentDelete {
+	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/containerd/containerd/api/events/doc.go b/vendor/github.com/containerd/containerd/api/events/doc.go
new file mode 100644
index 0000000..ac1e83f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/events/doc.go
@@ -0,0 +1,3 @@
+// Package events has protobuf types for various events that are used in
+// containerd.
+package events
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go
similarity index 74%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
rename to vendor/github.com/containerd/containerd/api/events/image.pb.go
index 0349e7c..ff3c6f6 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go
@@ -1,13 +1,13 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/events/v1/image.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/events/image.proto
 
 package events
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/containerd/containerd/protobuf/plugin"
+
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
 
 import strings "strings"
 import reflect "reflect"
@@ -215,24 +215,6 @@
 	return i, nil
 }
 
-func encodeFixed64Image(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Image(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintImage(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -445,51 +427,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowImage
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowImage
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthImage
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowImage
@@ -499,41 +444,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowImage
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowImage
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthImage
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowImage
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthImage
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipImage(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthImage
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthImage
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -640,51 +624,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowImage
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowImage
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthImage
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowImage
@@ -694,41 +641,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowImage
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowImage
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthImage
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowImage
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthImage
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipImage(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthImage
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthImage
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -936,28 +922,28 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/image.proto", fileDescriptorImage)
+	proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptorImage)
 }
 
 var fileDescriptorImage = []byte{
-	// 296 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
+	// 292 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
-	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x67, 0xe6, 0x26, 0xa6, 0xa7, 0xea,
-	0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xeb, 0xc1, 0x94, 0xea, 0x81, 0x15, 0x14,
-	0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x03, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e,
-	0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02,
-	0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84,
-	0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21,
-	0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23,
-	0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8,
-	0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a,
-	0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09,
-	0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42,
-	0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x98,
-	0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c,
-	0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x47,
-	0x66, 0x22, 0xb2, 0x86, 0xb0, 0x92, 0xd8, 0xc0, 0xb1, 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
-	0x44, 0x99, 0x59, 0x31, 0x8d, 0x02, 0x00, 0x00,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6,
+	0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7,
+	0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10,
+	0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a,
+	0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e,
+	0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53,
+	0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4,
+	0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43,
+	0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25,
+	0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48,
+	0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59,
+	0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1,
+	0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c,
+	0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f,
+	0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11,
+	0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17,
+	0x77, 0x02, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto b/vendor/github.com/containerd/containerd/api/events/image.proto
similarity index 65%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
rename to vendor/github.com/containerd/containerd/api/events/image.proto
index cbab0bb..470c3a2 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
+++ b/vendor/github.com/containerd/containerd/api/events/image.proto
@@ -2,9 +2,9 @@
 
 package containerd.services.images.v1;
 
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
 
-option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option go_package = "github.com/containerd/containerd/api/events;events";
 option (containerd.plugin.fieldpath_all) = true;
 
 message ImageCreate {
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
similarity index 73%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
rename to vendor/github.com/containerd/containerd/api/events/namespace.pb.go
index c2aabe4..f9a3e27 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go
@@ -1,14 +1,14 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/events/v1/namespace.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/events/namespace.proto
 
 package events
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import _ "github.com/containerd/containerd/protobuf/plugin"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
 
 import strings "strings"
 import reflect "reflect"
@@ -48,9 +48,9 @@
 func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
 
 func init() {
-	proto.RegisterType((*NamespaceCreate)(nil), "containerd.services.events.v1.NamespaceCreate")
-	proto.RegisterType((*NamespaceUpdate)(nil), "containerd.services.events.v1.NamespaceUpdate")
-	proto.RegisterType((*NamespaceDelete)(nil), "containerd.services.events.v1.NamespaceDelete")
+	proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate")
+	proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate")
+	proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete")
 }
 
 // Field returns the value for the given fieldpath as a string, if defined.
@@ -216,24 +216,6 @@
 	return i, nil
 }
 
-func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -446,51 +428,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowNamespace
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowNamespace
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthNamespace
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowNamespace
@@ -500,41 +445,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowNamespace
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowNamespace
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowNamespace
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipNamespace(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthNamespace
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -641,51 +625,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowNamespace
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowNamespace
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthNamespace
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowNamespace
@@ -695,41 +642,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowNamespace
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowNamespace
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowNamespace
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipNamespace(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthNamespace
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -937,29 +923,28 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/namespace.proto", fileDescriptorNamespace)
+	proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptorNamespace)
 }
 
 var fileDescriptorNamespace = []byte{
-	// 310 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4f, 0xcf, 0x2c, 0xc9,
+	// 296 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
-	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0xe7, 0x25, 0xe6, 0xa6, 0x16, 0x17,
-	0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0xb4, 0xe8, 0xc1, 0x94,
-	0xeb, 0x41, 0x94, 0xeb, 0x95, 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83,
-	0x58, 0x10, 0x4d, 0x52, 0x0e, 0x04, 0x6d, 0x07, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29,
-	0x4d, 0xcf, 0xcc, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0,
-	0xb4, 0x85, 0x91, 0x8b, 0xdf, 0x0f, 0xe6, 0x14, 0xe7, 0xa2, 0xd4, 0xc4, 0x92, 0x54, 0x21, 0x21,
-	0x2e, 0x16, 0x90, 0xeb, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0xa1, 0x20, 0x2e,
-	0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x2b, 0x3d,
-	0xbc, 0xee, 0xd5, 0x43, 0x33, 0x53, 0xcf, 0x07, 0xac, 0xd9, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08,
-	0x6a, 0x92, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4,
-	0x56, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06,
-	0xe1, 0x58, 0x31, 0x59, 0x30, 0xa2, 0x3a, 0x3b, 0xb4, 0x20, 0x85, 0xea, 0xce, 0x86, 0x98, 0x49,
-	0x6d, 0x67, 0xab, 0x22, 0xb9, 0xda, 0x25, 0x35, 0x27, 0x15, 0xbb, 0xab, 0x9d, 0x62, 0x4e, 0x3c,
-	0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17,
-	0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xb8, 0xe0, 0x8b, 0x1c, 0x63, 0x94, 0x1d, 0x99, 0x49,
-	0xce, 0x1a, 0xc2, 0x4a, 0x62, 0x03, 0xc7, 0xbc, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x58, 0x7e,
-	0x6c, 0xc6, 0xbb, 0x02, 0x00, 0x00,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25,
+	0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22,
+	0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88,
+	0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67,
+	0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1,
+	0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05,
+	0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27,
+	0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d,
+	0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b,
+	0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31,
+	0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15,
+	0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa,
+	0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86,
+	0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c,
+	0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21,
+	0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
+	0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.proto b/vendor/github.com/containerd/containerd/api/events/namespace.proto
new file mode 100644
index 0000000..45deae7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/events/namespace.proto
@@ -0,0 +1,23 @@
+syntax = "proto3";
+
+package containerd.events;
+
+import weak "gogoproto/gogo.proto";
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/events;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message NamespaceCreate {
+	string name = 1;
+	map<string, string> labels  = 2;
+}
+
+message NamespaceUpdate {
+	string name = 1;
+	map<string, string> labels  = 2;
+}
+
+message NamespaceDelete {
+	string name = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
similarity index 85%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
rename to vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
index 265d47a..e1f8f5c 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go
@@ -1,13 +1,13 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/events/v1/snapshot.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/events/snapshot.proto
 
 package events
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/containerd/containerd/protobuf/plugin"
+
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
 
 import strings "strings"
 import reflect "reflect"
@@ -46,9 +46,9 @@
 func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
 
 func init() {
-	proto.RegisterType((*SnapshotPrepare)(nil), "containerd.services.events.v1.SnapshotPrepare")
-	proto.RegisterType((*SnapshotCommit)(nil), "containerd.services.events.v1.SnapshotCommit")
-	proto.RegisterType((*SnapshotRemove)(nil), "containerd.services.events.v1.SnapshotRemove")
+	proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare")
+	proto.RegisterType((*SnapshotCommit)(nil), "containerd.events.SnapshotCommit")
+	proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove")
 }
 
 // Field returns the value for the given fieldpath as a string, if defined.
@@ -180,24 +180,6 @@
 	return i, nil
 }
 
-func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -699,25 +681,24 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/snapshot.proto", fileDescriptorSnapshot)
+	proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptorSnapshot)
 }
 
 var fileDescriptorSnapshot = []byte{
-	// 252 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
+	// 235 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
-	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x17, 0xe7, 0x25, 0x16, 0x14, 0x67,
-	0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x74, 0xe8, 0xc1, 0x54, 0xeb,
-	0x41, 0x54, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x06, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e,
-	0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06,
-	0xc4, 0x02, 0x25, 0x6b, 0x2e, 0xfe, 0x60, 0xa8, 0x95, 0x01, 0x45, 0xa9, 0x05, 0x89, 0x45, 0xa9,
-	0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6,
-	0x90, 0x18, 0x17, 0x1b, 0x48, 0x26, 0xaf, 0x44, 0x82, 0x09, 0x2c, 0x08, 0xe5, 0x29, 0x99, 0x71,
-	0xf1, 0xc1, 0x34, 0x3b, 0xe7, 0xe7, 0xe6, 0x66, 0x96, 0x60, 0xd1, 0x2b, 0xc4, 0xc5, 0x92, 0x97,
-	0x98, 0x9b, 0x0a, 0xd5, 0x09, 0x66, 0x2b, 0x29, 0x21, 0xf4, 0x05, 0xa5, 0xe6, 0xe6, 0x97, 0x61,
-	0xb1, 0xd3, 0x29, 0xe6, 0xc4, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31,
-	0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8,
-	0x31, 0x46, 0xd9, 0x91, 0x19, 0xbe, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0xd8, 0xf7, 0xc6, 0x80, 0x00,
-	0x00, 0x00, 0xff, 0xff, 0x3a, 0x82, 0x7a, 0xa7, 0xa8, 0x01, 0x00, 0x00,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7,
+	0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54,
+	0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7,
+	0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8,
+	0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01,
+	0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c,
+	0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60,
+	0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d,
+	0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9,
+	0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c,
+	0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3,
+	0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00,
+	0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.proto b/vendor/github.com/containerd/containerd/api/events/snapshot.proto
new file mode 100644
index 0000000..425eeec
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/events/snapshot.proto
@@ -0,0 +1,22 @@
+syntax = "proto3";
+
+package containerd.events;
+
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/events;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message SnapshotPrepare {
+	string key = 1;
+	string parent = 2;
+}
+
+message SnapshotCommit {
+	string key = 1;
+	string name = 2;
+}
+
+message SnapshotRemove {
+	string key = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go
similarity index 90%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
rename to vendor/github.com/containerd/containerd/api/events/task.pb.go
index 97faa38..e91e799 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
+++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go
@@ -1,16 +1,17 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/events/v1/task.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/events/task.proto
 
 package events
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/gogo/protobuf/types"
 import containerd_types "github.com/containerd/containerd/api/types"
-import _ "github.com/containerd/containerd/protobuf/plugin"
+
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
 
 import time "time"
 
@@ -136,17 +137,17 @@
 func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{10} }
 
 func init() {
-	proto.RegisterType((*TaskCreate)(nil), "containerd.services.events.v1.TaskCreate")
-	proto.RegisterType((*TaskStart)(nil), "containerd.services.events.v1.TaskStart")
-	proto.RegisterType((*TaskDelete)(nil), "containerd.services.events.v1.TaskDelete")
-	proto.RegisterType((*TaskIO)(nil), "containerd.services.events.v1.TaskIO")
-	proto.RegisterType((*TaskExit)(nil), "containerd.services.events.v1.TaskExit")
-	proto.RegisterType((*TaskOOM)(nil), "containerd.services.events.v1.TaskOOM")
-	proto.RegisterType((*TaskExecAdded)(nil), "containerd.services.events.v1.TaskExecAdded")
-	proto.RegisterType((*TaskExecStarted)(nil), "containerd.services.events.v1.TaskExecStarted")
-	proto.RegisterType((*TaskPaused)(nil), "containerd.services.events.v1.TaskPaused")
-	proto.RegisterType((*TaskResumed)(nil), "containerd.services.events.v1.TaskResumed")
-	proto.RegisterType((*TaskCheckpointed)(nil), "containerd.services.events.v1.TaskCheckpointed")
+	proto.RegisterType((*TaskCreate)(nil), "containerd.events.TaskCreate")
+	proto.RegisterType((*TaskStart)(nil), "containerd.events.TaskStart")
+	proto.RegisterType((*TaskDelete)(nil), "containerd.events.TaskDelete")
+	proto.RegisterType((*TaskIO)(nil), "containerd.events.TaskIO")
+	proto.RegisterType((*TaskExit)(nil), "containerd.events.TaskExit")
+	proto.RegisterType((*TaskOOM)(nil), "containerd.events.TaskOOM")
+	proto.RegisterType((*TaskExecAdded)(nil), "containerd.events.TaskExecAdded")
+	proto.RegisterType((*TaskExecStarted)(nil), "containerd.events.TaskExecStarted")
+	proto.RegisterType((*TaskPaused)(nil), "containerd.events.TaskPaused")
+	proto.RegisterType((*TaskResumed)(nil), "containerd.events.TaskResumed")
+	proto.RegisterType((*TaskCheckpointed)(nil), "containerd.events.TaskCheckpointed")
 }
 
 // Field returns the value for the given fieldpath as a string, if defined.
@@ -737,24 +738,6 @@
 	return i, nil
 }
 
-func encodeFixed64Task(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Task(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -991,7 +974,7 @@
 		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1018,7 +1001,7 @@
 		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
 		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
 		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
-		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2579,50 +2562,49 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/task.proto", fileDescriptorTask)
+	proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptorTask)
 }
 
 var fileDescriptorTask = []byte{
-	// 648 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xc1, 0x6e, 0xd3, 0x40,
-	0x10, 0x86, 0x6b, 0xa7, 0x75, 0x93, 0x0d, 0x55, 0x2b, 0xab, 0x82, 0x28, 0x12, 0x76, 0x64, 0x84,
-	0x94, 0x93, 0xad, 0x16, 0x89, 0x0b, 0x2a, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x0a, 0xb8, 0x3d, 0x21,
-	0xa4, 0xc8, 0xb1, 0x27, 0xc9, 0xd2, 0xc4, 0x6b, 0x79, 0xc7, 0x51, 0x91, 0x38, 0xf0, 0x08, 0x3c,
-	0x02, 0x4f, 0xc1, 0x33, 0xf4, 0xc0, 0x81, 0x23, 0xa7, 0x40, 0xfd, 0x0c, 0x9c, 0x38, 0xa1, 0xf5,
-	0x3a, 0x6e, 0xa1, 0xa2, 0x20, 0x4b, 0xdc, 0x76, 0xc6, 0x33, 0xff, 0xcc, 0x7c, 0x3b, 0xd9, 0x90,
-	0xee, 0x84, 0xe2, 0x34, 0x19, 0xd9, 0x3e, 0x9b, 0x3b, 0x3e, 0x0b, 0xd1, 0xa3, 0x21, 0xc4, 0xc1,
-	0xf5, 0xa3, 0x17, 0x51, 0x87, 0x43, 0xbc, 0xa0, 0x3e, 0x70, 0x07, 0x16, 0x10, 0x22, 0x77, 0x16,
-	0x7b, 0x0e, 0x7a, 0xfc, 0xcc, 0x8e, 0x62, 0x86, 0x4c, 0xbf, 0x7f, 0x15, 0x6d, 0xaf, 0x22, 0x6d,
-	0x19, 0x69, 0x2f, 0xf6, 0x9a, 0xbb, 0x13, 0x36, 0x61, 0x59, 0xa4, 0x23, 0x4e, 0x32, 0xa9, 0x69,
-	0x4e, 0x18, 0x9b, 0xcc, 0xc0, 0xc9, 0xac, 0x51, 0x32, 0x76, 0x90, 0xce, 0x81, 0xa3, 0x37, 0x8f,
-	0xf2, 0x80, 0xc7, 0xff, 0xd4, 0x19, 0xbe, 0x89, 0x80, 0x3b, 0x73, 0x96, 0x84, 0x98, 0xe7, 0x1d,
-	0xfe, 0x35, 0xaf, 0x28, 0x19, 0xcd, 0x92, 0x09, 0x0d, 0x9d, 0x31, 0x85, 0x59, 0x10, 0x79, 0x38,
-	0x95, 0x0a, 0xd6, 0x0f, 0x85, 0x90, 0x53, 0x8f, 0x9f, 0x1d, 0xc5, 0xe0, 0x21, 0xe8, 0xfb, 0xe4,
-	0x4e, 0x91, 0x3c, 0xa4, 0x41, 0x43, 0x69, 0x29, 0xed, 0x5a, 0x77, 0x3b, 0x5d, 0x9a, 0xf5, 0xa3,
-	0x95, 0xbf, 0xdf, 0x73, 0xeb, 0x45, 0x50, 0x3f, 0xd0, 0xef, 0x12, 0x6d, 0x94, 0x84, 0xc1, 0x0c,
-	0x1a, 0xaa, 0x88, 0x76, 0x73, 0x4b, 0x77, 0x88, 0x16, 0x33, 0x86, 0x63, 0xde, 0xa8, 0xb4, 0x2a,
-	0xed, 0xfa, 0xfe, 0x3d, 0xfb, 0x1a, 0xbb, 0x6c, 0x16, 0xfb, 0x58, 0xcc, 0xe2, 0xe6, 0x61, 0xfa,
-	0x01, 0x51, 0x29, 0x6b, 0xac, 0xb7, 0x94, 0x76, 0x7d, 0xff, 0xa1, 0x7d, 0x2b, 0x68, 0x5b, 0xf4,
-	0xdc, 0x1f, 0x74, 0xb5, 0x74, 0x69, 0xaa, 0xfd, 0x81, 0xab, 0x52, 0xa6, 0x1b, 0x84, 0xf8, 0x53,
-	0xf0, 0xcf, 0x22, 0x46, 0x43, 0x6c, 0x6c, 0x64, 0xbd, 0x5c, 0xf3, 0xe8, 0x3b, 0xa4, 0x12, 0xd1,
-	0xa0, 0xa1, 0xb5, 0x94, 0xf6, 0x96, 0x2b, 0x8e, 0xd6, 0x0b, 0x52, 0x13, 0x3a, 0x27, 0xe8, 0xc5,
-	0x58, 0x6a, 0xf4, 0x5c, 0x52, 0xbd, 0x92, 0xfc, 0x98, 0xf3, 0xec, 0xc1, 0x0c, 0x4a, 0xf2, 0xbc,
-	0x21, 0xaa, 0x9b, 0xa4, 0x0e, 0xe7, 0x14, 0x87, 0x1c, 0x3d, 0x4c, 0x04, 0x4e, 0xf1, 0x85, 0x08,
-	0xd7, 0x49, 0xe6, 0xd1, 0x3b, 0xa4, 0x26, 0x2c, 0x08, 0x86, 0x1e, 0xe6, 0x00, 0x9b, 0xb6, 0x5c,
-	0x3a, 0x7b, 0xb5, 0x01, 0xf6, 0xe9, 0x6a, 0xe9, 0xba, 0xd5, 0x8b, 0xa5, 0xb9, 0xf6, 0xfe, 0xab,
-	0xa9, 0xb8, 0x55, 0x99, 0xd6, 0x41, 0xeb, 0x35, 0xd1, 0x24, 0x53, 0x7d, 0x97, 0x6c, 0x70, 0x0c,
-	0x68, 0x28, 0x9b, 0x75, 0xa5, 0x21, 0x6e, 0x99, 0x63, 0xc0, 0x12, 0x5c, 0xdd, 0xb2, 0xb4, 0x72,
-	0x3f, 0xc4, 0x71, 0xd6, 0x96, 0xf4, 0x43, 0x1c, 0xeb, 0x4d, 0x52, 0x45, 0x88, 0xe7, 0x34, 0xf4,
-	0x66, 0x59, 0x47, 0x55, 0xb7, 0xb0, 0xad, 0x4f, 0x0a, 0xa9, 0x8a, 0x62, 0xcf, 0xce, 0x29, 0x96,
-	0x5c, 0x39, 0x35, 0x27, 0x54, 0xcb, 0x57, 0xa0, 0xe7, 0xaa, 0xb4, 0x40, 0x57, 0xf9, 0x23, 0xba,
-	0xf5, 0xdb, 0xd1, 0x6d, 0x94, 0x42, 0x77, 0x40, 0x36, 0xc5, 0x34, 0x83, 0xc1, 0x71, 0x99, 0x61,
-	0xac, 0x29, 0xd9, 0x92, 0x30, 0xc0, 0xef, 0x04, 0x01, 0x04, 0xa5, 0x88, 0x3c, 0x20, 0x9b, 0x70,
-	0x0e, 0xfe, 0xb0, 0xc0, 0x42, 0xd2, 0xa5, 0xa9, 0x09, 0xcd, 0x7e, 0xcf, 0xd5, 0xc4, 0xa7, 0x7e,
-	0x60, 0xbd, 0x25, 0xdb, 0xab, 0x4a, 0xd9, 0xce, 0xff, 0xc7, 0x5a, 0x37, 0xaf, 0xc2, 0x3a, 0x94,
-	0xbf, 0x8c, 0xe7, 0x5e, 0xc2, 0xcb, 0x15, 0xb6, 0x3a, 0xa4, 0x2e, 0x14, 0x5c, 0xe0, 0xc9, 0xbc,
-	0xa4, 0xc4, 0x98, 0xec, 0x64, 0xcf, 0x5d, 0xf1, 0x2c, 0x94, 0x64, 0xf0, 0xeb, 0x63, 0xa3, 0xfe,
-	0xfe, 0xd8, 0x74, 0x5f, 0x5d, 0x5c, 0x1a, 0x6b, 0x5f, 0x2e, 0x8d, 0xb5, 0x77, 0xa9, 0xa1, 0x5c,
-	0xa4, 0x86, 0xf2, 0x39, 0x35, 0x94, 0x6f, 0xa9, 0xa1, 0x7c, 0xf8, 0x6e, 0x28, 0x2f, 0x9f, 0x96,
-	0xfc, 0x27, 0x7a, 0x22, 0x4f, 0x23, 0x2d, 0xdb, 0xcc, 0x47, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff,
-	0x76, 0xdf, 0xe7, 0xaa, 0xd2, 0x06, 0x00, 0x00,
+	// 637 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40,
+	0x10, 0xc7, 0x63, 0xa7, 0x75, 0x93, 0x09, 0x55, 0x8b, 0x55, 0x41, 0xc8, 0xc1, 0x8e, 0xcc, 0x25,
+	0x27, 0x5b, 0x04, 0x89, 0x0b, 0x42, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x4a, 0x71, 0x7b, 0xa8, 0xb8,
+	0x44, 0x4e, 0x76, 0x93, 0x2c, 0x8d, 0xbd, 0x96, 0x3d, 0x46, 0x45, 0xe2, 0xc0, 0x23, 0xf0, 0x08,
+	0x3c, 0x05, 0xcf, 0xd0, 0x03, 0x07, 0x8e, 0x9c, 0x02, 0xf5, 0x03, 0x70, 0xe2, 0x01, 0xd0, 0x7a,
+	0x1d, 0xb7, 0x50, 0xf1, 0x65, 0x89, 0x53, 0x76, 0x66, 0x67, 0xff, 0x33, 0xf3, 0xdb, 0xc9, 0x1a,
+	0x1e, 0xcd, 0x19, 0x2e, 0x92, 0x89, 0x3d, 0xe5, 0xbe, 0x33, 0xe5, 0x01, 0x7a, 0x2c, 0xa0, 0x11,
+	0xb9, 0xbe, 0xf4, 0x42, 0xe6, 0xd0, 0x97, 0x34, 0xc0, 0xd8, 0x41, 0x2f, 0x3e, 0xb3, 0xc3, 0x88,
+	0x23, 0xd7, 0x6f, 0x5f, 0x45, 0xd8, 0x72, 0xb7, 0xb5, 0x37, 0xe7, 0x73, 0x9e, 0xed, 0x3a, 0x62,
+	0x25, 0x03, 0x5b, 0xe6, 0x9c, 0xf3, 0xf9, 0x92, 0x3a, 0x99, 0x35, 0x49, 0x66, 0x0e, 0x32, 0x9f,
+	0xc6, 0xe8, 0xf9, 0x61, 0x1e, 0xf0, 0x77, 0x15, 0xe0, 0xab, 0x90, 0xc6, 0x8e, 0xcf, 0x93, 0x00,
+	0xf3, 0x73, 0xfb, 0x7f, 0x3c, 0x57, 0xa4, 0x0c, 0x97, 0xc9, 0x9c, 0x05, 0xce, 0x8c, 0xd1, 0x25,
+	0x09, 0x3d, 0x5c, 0x48, 0x05, 0xeb, 0xab, 0x02, 0x70, 0xe2, 0xc5, 0x67, 0x07, 0x11, 0xf5, 0x90,
+	0xea, 0x5d, 0xb8, 0x55, 0x1c, 0x1e, 0x33, 0xd2, 0x54, 0xda, 0x4a, 0xa7, 0xde, 0xdf, 0x49, 0x57,
+	0x66, 0xe3, 0x60, 0xed, 0x1f, 0x0e, 0xdc, 0x46, 0x11, 0x34, 0x24, 0xfa, 0x1d, 0xd0, 0x26, 0x49,
+	0x40, 0x96, 0xb4, 0xa9, 0x8a, 0x68, 0x37, 0xb7, 0x74, 0x07, 0xb4, 0x88, 0x73, 0x9c, 0xc5, 0xcd,
+	0x6a, 0xbb, 0xda, 0x69, 0x74, 0xef, 0xda, 0xd7, 0x78, 0x65, 0xbd, 0xd8, 0x87, 0xa2, 0x17, 0x37,
+	0x0f, 0xd3, 0x1f, 0x80, 0xca, 0x78, 0x73, 0xa3, 0xad, 0x74, 0x1a, 0xdd, 0x7b, 0xf6, 0x0d, 0xb8,
+	0xb6, 0xa8, 0x73, 0x38, 0xea, 0x6b, 0xe9, 0xca, 0x54, 0x87, 0x23, 0x57, 0x65, 0x5c, 0x37, 0x00,
+	0xa6, 0x0b, 0x3a, 0x3d, 0x0b, 0x39, 0x0b, 0xb0, 0xb9, 0x99, 0xe5, 0xbf, 0xe6, 0xd1, 0x77, 0xa1,
+	0x1a, 0x32, 0xd2, 0xd4, 0xda, 0x4a, 0x67, 0xdb, 0x15, 0x4b, 0xeb, 0x19, 0xd4, 0x85, 0xce, 0x31,
+	0x7a, 0x11, 0x96, 0x6a, 0x37, 0x97, 0x54, 0xaf, 0x24, 0xdf, 0xe7, 0x0c, 0x07, 0x74, 0x49, 0x4b,
+	0x32, 0xbc, 0x21, 0xaa, 0x9b, 0xd0, 0xa0, 0xe7, 0x0c, 0xc7, 0x31, 0x7a, 0x98, 0x08, 0x84, 0x62,
+	0x07, 0x84, 0xeb, 0x38, 0xf3, 0xe8, 0x3d, 0xa8, 0x0b, 0x8b, 0x92, 0xb1, 0x87, 0x39, 0xb4, 0x96,
+	0x2d, 0x07, 0xcd, 0x5e, 0xdf, 0xba, 0x7d, 0xb2, 0x1e, 0xb4, 0x7e, 0xed, 0x62, 0x65, 0x56, 0xde,
+	0x7e, 0x36, 0x15, 0xb7, 0x26, 0x8f, 0xf5, 0xd0, 0x7a, 0x01, 0x9a, 0x64, 0xaa, 0xef, 0xc1, 0x66,
+	0x8c, 0x84, 0x05, 0xb2, 0x58, 0x57, 0x1a, 0xe2, 0x66, 0x63, 0x24, 0x3c, 0xc1, 0xf5, 0xcd, 0x4a,
+	0x2b, 0xf7, 0xd3, 0x28, 0xca, 0xca, 0x92, 0x7e, 0x1a, 0x45, 0x7a, 0x0b, 0x6a, 0x48, 0x23, 0x9f,
+	0x05, 0xde, 0x32, 0xab, 0xa8, 0xe6, 0x16, 0xb6, 0xf5, 0x41, 0x81, 0x9a, 0x48, 0xf6, 0xf4, 0x9c,
+	0x61, 0xc9, 0x31, 0x53, 0x73, 0x42, 0xf5, 0x7c, 0x04, 0x06, 0xae, 0xca, 0x0a, 0x74, 0xd5, 0x5f,
+	0xa2, 0xdb, 0xf8, 0x3d, 0xba, 0xcd, 0x52, 0xe8, 0x9e, 0xc0, 0x96, 0xe8, 0x66, 0x34, 0x3a, 0x2c,
+	0xd3, 0x8c, 0xb5, 0x80, 0x6d, 0x09, 0x83, 0x4e, 0x7b, 0x84, 0x50, 0x52, 0x8a, 0xc8, 0x7d, 0xd8,
+	0xa2, 0xe7, 0x74, 0x3a, 0x2e, 0xb0, 0x40, 0xba, 0x32, 0x35, 0xa1, 0x39, 0x1c, 0xb8, 0x9a, 0xd8,
+	0x1a, 0x12, 0xeb, 0x35, 0xec, 0xac, 0x33, 0x65, 0x33, 0xff, 0x1f, 0x73, 0xdd, 0xbc, 0x0a, 0x6b,
+	0x5f, 0xfe, 0x33, 0x8e, 0xbc, 0x24, 0x2e, 0x97, 0xd8, 0xea, 0x41, 0x43, 0x28, 0xb8, 0x34, 0x4e,
+	0xfc, 0x92, 0x12, 0x33, 0xd8, 0xcd, 0x9e, 0xb8, 0xe2, 0x59, 0x28, 0xc9, 0xe0, 0xc7, 0xc7, 0x46,
+	0xfd, 0xf9, 0xb1, 0xe9, 0x1f, 0x5d, 0x5c, 0x1a, 0x95, 0x4f, 0x97, 0x46, 0xe5, 0x4d, 0x6a, 0x28,
+	0x17, 0xa9, 0xa1, 0x7c, 0x4c, 0x0d, 0xe5, 0x4b, 0x6a, 0x28, 0xef, 0xbe, 0x19, 0xca, 0xf3, 0xee,
+	0x3f, 0x7c, 0x65, 0x1e, 0xcb, 0x9f, 0xd3, 0xca, 0x69, 0x75, 0xa2, 0x65, 0x13, 0xf9, 0xf0, 0x7b,
+	0x00, 0x00, 0x00, 0xff, 0xff, 0x07, 0x69, 0x62, 0x9d, 0xa6, 0x06, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto b/vendor/github.com/containerd/containerd/api/events/task.proto
similarity index 85%
rename from vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
rename to vendor/github.com/containerd/containerd/api/events/task.proto
index 79f87a6..d699213 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
+++ b/vendor/github.com/containerd/containerd/api/events/task.proto
@@ -1,13 +1,13 @@
 syntax = "proto3";
 
-package containerd.services.events.v1;
+package containerd.events;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/timestamp.proto";
 import "github.com/containerd/containerd/api/types/mount.proto";
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
 
-option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option go_package = "github.com/containerd/containerd/api/events;events";
 option (containerd.plugin.fieldpath_all) = true;
 
 message TaskCreate {
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
index 90d3099..17cd36d 100644
--- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
-// DO NOT EDIT!
 
 /*
 	Package containers is a generated protocol buffer package.
@@ -25,9 +24,10 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf2 "github.com/gogo/protobuf/types"
 import google_protobuf3 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 
@@ -844,24 +844,6 @@
 	return i, nil
 }
 
-func encodeFixed64Containers(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Containers(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintContainers(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1270,51 +1252,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContainers
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContainers
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthContainers
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowContainers
@@ -1324,41 +1269,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowContainers
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContainers
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthContainers
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContainers
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthContainers
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipContainers(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthContainers
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthContainers
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
@@ -1599,51 +1583,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContainers
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContainers
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthContainers
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Extensions == nil {
 				m.Extensions = make(map[string]google_protobuf1.Any)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			mapvalue := &google_protobuf1.Any{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowContainers
@@ -1653,46 +1600,85 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var mapmsglen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowContainers
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContainers
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthContainers
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					mapmsglen |= (int(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContainers
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= (int(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthContainers
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if mapmsglen < 0 {
+						return ErrInvalidLengthContainers
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &google_protobuf1.Any{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipContainers(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthContainers
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				if mapmsglen < 0 {
-					return ErrInvalidLengthContainers
-				}
-				postmsgIndex := iNdEx + mapmsglen
-				if mapmsglen < 0 {
-					return ErrInvalidLengthContainers
-				}
-				if postmsgIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := &google_protobuf1.Any{}
-				if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
-					return err
-				}
-				iNdEx = postmsgIndex
-				m.Extensions[mapkey] = *mapvalue
-			} else {
-				var mapvalue google_protobuf1.Any
-				m.Extensions[mapkey] = mapvalue
 			}
+			m.Extensions[mapkey] = *mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -2689,53 +2675,53 @@
 
 var fileDescriptorContainers = []byte{
 	// 776 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0x12, 0x5b,
-	0x10, 0xce, 0x00, 0x81, 0xd0, 0xdc, 0xaa, 0x7b, 0xeb, 0x5c, 0x2e, 0x77, 0x1c, 0xab, 0x80, 0xb0,
-	0xa2, 0x2c, 0x1d, 0x0c, 0x5a, 0x9a, 0x1f, 0x37, 0x21, 0x7f, 0x65, 0x99, 0x58, 0xa9, 0x51, 0x37,
-	0xba, 0x88, 0x03, 0x74, 0xc8, 0xc8, 0xfc, 0x39, 0xe7, 0x40, 0x49, 0xb9, 0xd0, 0x47, 0x70, 0xe7,
-	0x23, 0xf8, 0x2a, 0x59, 0xba, 0x74, 0x15, 0x13, 0x9e, 0xc4, 0x9a, 0x33, 0x33, 0xcc, 0x04, 0x06,
-	0x85, 0x68, 0x76, 0xa7, 0x39, 0xfd, 0x7d, 0xfd, 0xf1, 0x75, 0xf7, 0x01, 0xd8, 0xef, 0x68, 0xec,
-	0xa4, 0xd7, 0x94, 0x5b, 0x96, 0x51, 0x6b, 0x59, 0x26, 0x53, 0x35, 0x13, 0x9d, 0x76, 0xf4, 0xa8,
-	0xda, 0x5a, 0x8d, 0xa2, 0xd3, 0xd7, 0x5a, 0x48, 0xc3, 0xcf, 0x69, 0xad, 0xbf, 0x12, 0x89, 0x64,
-	0xdb, 0xb1, 0x98, 0x45, 0x96, 0x43, 0x9c, 0x1c, 0x60, 0xe4, 0x48, 0x56, 0x7f, 0x45, 0xca, 0x77,
-	0xac, 0x8e, 0xc5, 0xb3, 0x6b, 0xee, 0xc9, 0x03, 0x4a, 0x37, 0x3a, 0x96, 0xd5, 0xd1, 0xb1, 0xc6,
-	0xa3, 0x66, 0xef, 0xb8, 0xa6, 0x9a, 0x03, 0xff, 0xea, 0xe6, 0xf8, 0x15, 0x1a, 0x36, 0x0b, 0x2e,
-	0xcb, 0xe3, 0x97, 0xc7, 0x1a, 0xea, 0xed, 0x23, 0x43, 0xa5, 0x5d, 0x3f, 0xa3, 0x34, 0x9e, 0xc1,
-	0x34, 0x03, 0x29, 0x53, 0x0d, 0xdb, 0x4b, 0xa8, 0x7c, 0x4e, 0x43, 0x76, 0x2b, 0x90, 0x48, 0x0a,
-	0x90, 0xd0, 0xda, 0xa2, 0x50, 0x16, 0xaa, 0xd9, 0x46, 0x7a, 0x78, 0x56, 0x4a, 0x3c, 0xde, 0x56,
-	0x12, 0x5a, 0x9b, 0x1c, 0x42, 0x5a, 0x57, 0x9b, 0xa8, 0x53, 0x31, 0x51, 0x4e, 0x56, 0x73, 0xf5,
-	0x55, 0xf9, 0x97, 0x5f, 0x55, 0x1e, 0xb1, 0xca, 0xfb, 0x1c, 0xba, 0x63, 0x32, 0x67, 0xa0, 0xf8,
-	0x3c, 0x24, 0x0f, 0x8b, 0x9a, 0xa1, 0x76, 0x50, 0x4c, 0xba, 0xc5, 0x14, 0x2f, 0x20, 0x4f, 0x21,
-	0xe3, 0xf4, 0x4c, 0x57, 0xa3, 0x98, 0x2a, 0x0b, 0xd5, 0x5c, 0xfd, 0xfe, 0x5c, 0x85, 0x14, 0x0f,
-	0xab, 0x04, 0x24, 0xa4, 0x0a, 0x29, 0x6a, 0x63, 0x4b, 0x5c, 0xe4, 0x64, 0x79, 0xd9, 0x73, 0x43,
-	0x0e, 0xdc, 0x90, 0x37, 0xcd, 0x81, 0xc2, 0x33, 0x48, 0x19, 0x72, 0xd4, 0x54, 0x6d, 0x7a, 0x62,
-	0x31, 0x86, 0x8e, 0x98, 0xe6, 0xaa, 0xa2, 0x1f, 0x91, 0x65, 0xf8, 0x2b, 0x08, 0x8f, 0xba, 0x38,
-	0x10, 0x33, 0x97, 0x53, 0x9e, 0xe0, 0x80, 0x6c, 0x01, 0xb4, 0x1c, 0x54, 0x19, 0xb6, 0x8f, 0x54,
-	0x26, 0x2e, 0xf1, 0xa2, 0xd2, 0x44, 0xd1, 0xe7, 0x41, 0x0b, 0x1a, 0x4b, 0xa7, 0x67, 0xa5, 0x85,
-	0x4f, 0xdf, 0x4b, 0x82, 0x92, 0xf5, 0x71, 0x9b, 0xcc, 0x25, 0xe9, 0xd9, 0xed, 0x80, 0x24, 0x3b,
-	0x0f, 0x89, 0x8f, 0xdb, 0x64, 0xa4, 0x09, 0x80, 0xef, 0x18, 0x9a, 0x54, 0xb3, 0x4c, 0x2a, 0x02,
-	0x6f, 0xda, 0xa3, 0xb9, 0xbc, 0xdc, 0x19, 0xc1, 0x79, 0xe3, 0x1a, 0x29, 0xb7, 0x8c, 0x12, 0x61,
-	0x95, 0xd6, 0x20, 0x17, 0xe9, 0x2c, 0xf9, 0x07, 0x92, 0xae, 0x2d, 0x7c, 0x78, 0x14, 0xf7, 0xe8,
-	0xf6, 0xb8, 0xaf, 0xea, 0x3d, 0x14, 0x13, 0x5e, 0x8f, 0x79, 0xb0, 0x9e, 0x58, 0x15, 0xa4, 0x03,
-	0xc8, 0xf8, 0xbd, 0x22, 0x04, 0x52, 0xa6, 0x6a, 0xa0, 0x8f, 0xe3, 0x67, 0x22, 0x43, 0xc6, 0xb2,
-	0x19, 0x97, 0x9e, 0xf8, 0x49, 0xe7, 0x82, 0x24, 0xe9, 0x19, 0xfc, 0x3d, 0x26, 0x37, 0x46, 0xcd,
-	0xad, 0xa8, 0x9a, 0x69, 0x94, 0xa1, 0xc6, 0xca, 0x1d, 0xf8, 0x77, 0x0f, 0xd9, 0xc8, 0x10, 0x05,
-	0xdf, 0xf6, 0x90, 0xb2, 0x69, 0x2b, 0x52, 0x39, 0x81, 0xfc, 0xe5, 0x74, 0x6a, 0x5b, 0x26, 0x45,
-	0x72, 0x08, 0xd9, 0x91, 0xc5, 0x1c, 0x96, 0xab, 0xdf, 0x9e, 0xa7, 0x11, 0xbe, 0xf1, 0x21, 0x49,
-	0x65, 0x05, 0xfe, 0xdb, 0xd7, 0x68, 0x58, 0x8a, 0x06, 0xd2, 0x44, 0xc8, 0x1c, 0x6b, 0x3a, 0x43,
-	0x87, 0x8a, 0x42, 0x39, 0x59, 0xcd, 0x2a, 0x41, 0x58, 0xd1, 0xa1, 0x30, 0x0e, 0xf1, 0xe5, 0x29,
-	0x00, 0x61, 0x61, 0x0e, 0xbb, 0x9a, 0xbe, 0x08, 0x4b, 0xe5, 0x0d, 0x14, 0xb6, 0xf8, 0x38, 0x4f,
-	0x98, 0xf7, 0xe7, 0xcd, 0xe8, 0xc2, 0xff, 0x13, 0xb5, 0xae, 0xcd, 0xf9, 0x2f, 0x02, 0x14, 0x5e,
-	0xf0, 0x1d, 0xbb, 0xfe, 0x6f, 0x46, 0x36, 0x20, 0xe7, 0xed, 0x33, 0x7f, 0xcf, 0xfd, 0xa9, 0x9d,
-	0x7c, 0x08, 0x76, 0xdd, 0x27, 0xff, 0x40, 0xa5, 0x5d, 0xc5, 0x7f, 0x36, 0xdc, 0xb3, 0x6b, 0xcb,
-	0x84, 0xd0, 0x6b, 0xb3, 0xe5, 0x2e, 0x14, 0xb6, 0x51, 0xc7, 0x18, 0x57, 0xa6, 0x2c, 0x4b, 0xfd,
-	0x3c, 0x05, 0x10, 0x0e, 0x23, 0xe9, 0x43, 0x72, 0x0f, 0x19, 0x79, 0x30, 0x83, 0x8c, 0x98, 0x95,
-	0x94, 0x1e, 0xce, 0x8d, 0xf3, 0xad, 0x78, 0x0f, 0x29, 0x77, 0x2d, 0xc8, 0x2c, 0x3f, 0x67, 0xb1,
-	0x2b, 0x27, 0xad, 0x5d, 0x01, 0xe9, 0x17, 0xff, 0x00, 0x69, 0x6f, 0x72, 0xc9, 0x2c, 0x24, 0xf1,
-	0x0b, 0x25, 0xad, 0x5f, 0x05, 0x1a, 0x0a, 0xf0, 0x66, 0x64, 0x26, 0x01, 0xf1, 0x73, 0x3f, 0x93,
-	0x80, 0x69, 0x93, 0xf8, 0x0a, 0xd2, 0xde, 0xdc, 0xcc, 0x24, 0x20, 0x7e, 0xc4, 0xa4, 0xc2, 0xc4,
-	0x46, 0xec, 0xb8, 0xff, 0x90, 0x1a, 0xaf, 0x4f, 0x2f, 0x8a, 0x0b, 0xdf, 0x2e, 0x8a, 0x0b, 0x1f,
-	0x87, 0x45, 0xe1, 0x74, 0x58, 0x14, 0xbe, 0x0e, 0x8b, 0xc2, 0xf9, 0xb0, 0x28, 0xbc, 0xdc, 0xfd,
-	0x8d, 0x3f, 0x7d, 0x1b, 0x61, 0xd4, 0x4c, 0xf3, 0x8a, 0xf7, 0x7e, 0x04, 0x00, 0x00, 0xff, 0xff,
-	0x17, 0x73, 0xba, 0x43, 0x45, 0x0a, 0x00, 0x00,
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0xd2, 0x50,
+	0x14, 0x26, 0x81, 0x86, 0x72, 0x70, 0x46, 0xe7, 0x8a, 0x18, 0xe3, 0x0c, 0x50, 0x56, 0x8c, 0xa3,
+	0xc1, 0xa2, 0xa3, 0xfd, 0x71, 0x53, 0xfa, 0x37, 0x8e, 0xad, 0xd3, 0x89, 0x3a, 0xe3, 0xe8, 0xa2,
+	0x06, 0xb8, 0xa5, 0x91, 0xfc, 0x99, 0x7b, 0x61, 0x64, 0x5c, 0xe8, 0x23, 0xb8, 0xf3, 0x11, 0x7c,
+	0x95, 0x2e, 0x5d, 0xba, 0xaa, 0x2d, 0x4f, 0xe2, 0xe4, 0x26, 0x21, 0x29, 0x04, 0x85, 0x2a, 0xbb,
+	0x7b, 0xb8, 0xe7, 0xfb, 0xce, 0xc7, 0x77, 0xce, 0xb9, 0x00, 0x7b, 0x6d, 0x8d, 0x1e, 0x77, 0x1b,
+	0x72, 0xd3, 0x32, 0xaa, 0x4d, 0xcb, 0xa4, 0xaa, 0x66, 0x62, 0xa7, 0x15, 0x3d, 0xaa, 0xb6, 0x56,
+	0x25, 0xd8, 0xe9, 0x69, 0x4d, 0x4c, 0xc2, 0xcf, 0x49, 0xb5, 0xb7, 0x1c, 0x89, 0x64, 0xdb, 0xb1,
+	0xa8, 0x85, 0x96, 0x42, 0x9c, 0x1c, 0x60, 0xe4, 0x48, 0x56, 0x6f, 0x59, 0xca, 0xb5, 0xad, 0xb6,
+	0xc5, 0xb2, 0xab, 0xee, 0xc9, 0x03, 0x4a, 0xb7, 0xda, 0x96, 0xd5, 0xd6, 0x71, 0x95, 0x45, 0x8d,
+	0xee, 0x51, 0x55, 0x35, 0xfb, 0xfe, 0xd5, 0xed, 0xd1, 0x2b, 0x6c, 0xd8, 0x34, 0xb8, 0x2c, 0x8d,
+	0x5e, 0x1e, 0x69, 0x58, 0x6f, 0x1d, 0x1a, 0x2a, 0xe9, 0xf8, 0x19, 0xc5, 0xd1, 0x0c, 0xaa, 0x19,
+	0x98, 0x50, 0xd5, 0xb0, 0xbd, 0x84, 0xf2, 0x37, 0x01, 0x32, 0x9b, 0x81, 0x44, 0x94, 0x07, 0x5e,
+	0x6b, 0x89, 0x5c, 0x89, 0xab, 0x64, 0xea, 0xc2, 0xe0, 0xb4, 0xc8, 0x3f, 0xdd, 0x52, 0x78, 0xad,
+	0x85, 0x0e, 0x40, 0xd0, 0xd5, 0x06, 0xd6, 0x89, 0xc8, 0x97, 0x92, 0x95, 0x6c, 0x6d, 0x45, 0xfe,
+	0xeb, 0x57, 0x95, 0x87, 0xac, 0xf2, 0x1e, 0x83, 0x6e, 0x9b, 0xd4, 0xe9, 0x2b, 0x3e, 0x0f, 0xca,
+	0xc1, 0x82, 0x66, 0xa8, 0x6d, 0x2c, 0x26, 0xdd, 0x62, 0x8a, 0x17, 0xa0, 0xe7, 0x90, 0x76, 0xba,
+	0xa6, 0xab, 0x51, 0x4c, 0x95, 0xb8, 0x4a, 0xb6, 0xf6, 0x70, 0xa6, 0x42, 0x8a, 0x87, 0x55, 0x02,
+	0x12, 0x54, 0x81, 0x14, 0xb1, 0x71, 0x53, 0x5c, 0x60, 0x64, 0x39, 0xd9, 0x73, 0x43, 0x0e, 0xdc,
+	0x90, 0x37, 0xcc, 0xbe, 0xc2, 0x32, 0x50, 0x09, 0xb2, 0xc4, 0x54, 0x6d, 0x72, 0x6c, 0x51, 0x8a,
+	0x1d, 0x51, 0x60, 0xaa, 0xa2, 0x1f, 0xa1, 0x25, 0xb8, 0x12, 0x84, 0x87, 0x1d, 0xdc, 0x17, 0xd3,
+	0x17, 0x53, 0x9e, 0xe1, 0x3e, 0xda, 0x04, 0x68, 0x3a, 0x58, 0xa5, 0xb8, 0x75, 0xa8, 0x52, 0x71,
+	0x91, 0x15, 0x95, 0xc6, 0x8a, 0xbe, 0x0c, 0x5a, 0x50, 0x5f, 0x3c, 0x39, 0x2d, 0x26, 0xbe, 0xfe,
+	0x2a, 0x72, 0x4a, 0xc6, 0xc7, 0x6d, 0x50, 0x97, 0xa4, 0x6b, 0xb7, 0x02, 0x92, 0xcc, 0x2c, 0x24,
+	0x3e, 0x6e, 0x83, 0xa2, 0x06, 0x00, 0xfe, 0x48, 0xb1, 0x49, 0x34, 0xcb, 0x24, 0x22, 0xb0, 0xa6,
+	0x3d, 0x99, 0xc9, 0xcb, 0xed, 0x21, 0x9c, 0x35, 0xae, 0x9e, 0x72, 0xcb, 0x28, 0x11, 0x56, 0x69,
+	0x15, 0xb2, 0x91, 0xce, 0xa2, 0x6b, 0x90, 0x74, 0x6d, 0x61, 0xc3, 0xa3, 0xb8, 0x47, 0xb7, 0xc7,
+	0x3d, 0x55, 0xef, 0x62, 0x91, 0xf7, 0x7a, 0xcc, 0x82, 0x35, 0x7e, 0x85, 0x93, 0xf6, 0x21, 0xed,
+	0xf7, 0x0a, 0x21, 0x48, 0x99, 0xaa, 0x81, 0x7d, 0x1c, 0x3b, 0x23, 0x19, 0xd2, 0x96, 0x4d, 0x99,
+	0x74, 0xfe, 0x0f, 0x9d, 0x0b, 0x92, 0xa4, 0x17, 0x70, 0x75, 0x44, 0x6e, 0x8c, 0x9a, 0x3b, 0x51,
+	0x35, 0x93, 0x28, 0x43, 0x8d, 0xe5, 0x7b, 0x70, 0x7d, 0x17, 0xd3, 0xa1, 0x21, 0x0a, 0xfe, 0xd0,
+	0xc5, 0x84, 0x4e, 0x5a, 0x91, 0xf2, 0x31, 0xe4, 0x2e, 0xa6, 0x13, 0xdb, 0x32, 0x09, 0x46, 0x07,
+	0x90, 0x19, 0x5a, 0xcc, 0x60, 0xd9, 0xda, 0xdd, 0x59, 0x1a, 0xe1, 0x1b, 0x1f, 0x92, 0x94, 0x97,
+	0xe1, 0xc6, 0x9e, 0x46, 0xc2, 0x52, 0x24, 0x90, 0x26, 0x42, 0xfa, 0x48, 0xd3, 0x29, 0x76, 0x88,
+	0xc8, 0x95, 0x92, 0x95, 0x8c, 0x12, 0x84, 0x65, 0x1d, 0xf2, 0xa3, 0x10, 0x5f, 0x9e, 0x02, 0x10,
+	0x16, 0x66, 0xb0, 0xcb, 0xe9, 0x8b, 0xb0, 0x94, 0xdf, 0x43, 0x7e, 0x93, 0x8d, 0xf3, 0x98, 0x79,
+	0xff, 0xdf, 0x8c, 0x0e, 0xdc, 0x1c, 0xab, 0x35, 0x37, 0xe7, 0xbf, 0x73, 0x90, 0x7f, 0xc5, 0x76,
+	0x6c, 0xfe, 0xdf, 0x0c, 0xad, 0x43, 0xd6, 0xdb, 0x67, 0xf6, 0x9e, 0xfb, 0x53, 0x3b, 0xfe, 0x10,
+	0xec, 0xb8, 0x4f, 0xfe, 0xbe, 0x4a, 0x3a, 0x8a, 0xff, 0x6c, 0xb8, 0x67, 0xd7, 0x96, 0x31, 0xa1,
+	0x73, 0xb3, 0xe5, 0x3e, 0xe4, 0xb7, 0xb0, 0x8e, 0x63, 0x5c, 0x99, 0xb0, 0x2c, 0xb5, 0xb3, 0x14,
+	0x40, 0x38, 0x8c, 0xa8, 0x07, 0xc9, 0x5d, 0x4c, 0xd1, 0xa3, 0x29, 0x64, 0xc4, 0xac, 0xa4, 0xf4,
+	0x78, 0x66, 0x9c, 0x6f, 0xc5, 0x27, 0x48, 0xb9, 0x6b, 0x81, 0xa6, 0xf9, 0x39, 0x8b, 0x5d, 0x39,
+	0x69, 0xf5, 0x12, 0x48, 0xbf, 0xf8, 0x67, 0x10, 0xbc, 0xc9, 0x45, 0xd3, 0x90, 0xc4, 0x2f, 0x94,
+	0xb4, 0x76, 0x19, 0x68, 0x28, 0xc0, 0x9b, 0x91, 0xa9, 0x04, 0xc4, 0xcf, 0xfd, 0x54, 0x02, 0x26,
+	0x4d, 0xe2, 0x5b, 0x10, 0xbc, 0xb9, 0x99, 0x4a, 0x40, 0xfc, 0x88, 0x49, 0xf9, 0xb1, 0x8d, 0xd8,
+	0x76, 0xff, 0x21, 0xd5, 0xdf, 0x9d, 0x9c, 0x17, 0x12, 0x3f, 0xcf, 0x0b, 0x89, 0x2f, 0x83, 0x02,
+	0x77, 0x32, 0x28, 0x70, 0x3f, 0x06, 0x05, 0xee, 0x6c, 0x50, 0xe0, 0xde, 0xec, 0xfc, 0xc3, 0x9f,
+	0xbe, 0xf5, 0x30, 0x7a, 0x9d, 0x68, 0x08, 0xac, 0xe6, 0x83, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff,
+	0x95, 0x94, 0x84, 0xf2, 0x47, 0x0a, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
index 0a2311c..b7b32d9 100644
--- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
@@ -2,7 +2,7 @@
 
 package containerd.services.containers.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/any.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/field_mask.proto";
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
index c9b76b5..4e4f723 100644
--- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/content/v1/content.proto
-// DO NOT EDIT!
 
 /*
 	Package content is a generated protocol buffer package.
@@ -33,10 +32,11 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import google_protobuf1 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
-import google_protobuf3 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf3 "github.com/gogo/protobuf/types"
 
 import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
 import time "time"
@@ -1550,24 +1550,6 @@
 	return i, nil
 }
 
-func encodeFixed64Content(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Content(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -2247,51 +2229,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContent
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContent
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthContent
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowContent
@@ -2301,41 +2246,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowContent
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContent
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthContent
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContent
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthContent
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipContent(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthContent
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthContent
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3895,51 +3879,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContent
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowContent
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthContent
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowContent
@@ -3949,41 +3896,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowContent
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContent
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthContent
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowContent
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthContent
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipContent(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthContent
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthContent
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -4391,73 +4377,73 @@
 }
 
 var fileDescriptorContent = []byte{
-	// 1079 bytes of a gzipped FileDescriptorProto
+	// 1081 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45,
-	0x14, 0xcf, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x7b, 0xbb, 0x42,
+	0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42,
 	0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b,
-	0x97, 0x88, 0x5e, 0xca, 0xda, 0x1e, 0x9b, 0x55, 0x6c, 0xaf, 0xbb, 0x33, 0xb6, 0x08, 0x27, 0x2e,
-	0x48, 0x28, 0xea, 0x01, 0x71, 0xcf, 0x05, 0xf8, 0x2b, 0x38, 0x70, 0xce, 0x91, 0x23, 0xe2, 0xd0,
-	0xd2, 0xfc, 0x0f, 0xdc, 0xd1, 0xcc, 0xce, 0xda, 0xeb, 0x8f, 0xb0, 0xb6, 0xe3, 0x9e, 0xfc, 0x66,
-	0xf6, 0xfd, 0xde, 0xf7, 0xc7, 0x18, 0xee, 0x35, 0x1d, 0xf6, 0x4d, 0xaf, 0x6a, 0xd6, 0xdc, 0x76,
-	0xa1, 0xe6, 0x76, 0x98, 0xed, 0x74, 0x88, 0x57, 0x0f, 0x93, 0x76, 0xd7, 0x29, 0x50, 0xe2, 0xf5,
-	0x9d, 0x1a, 0xa1, 0xe2, 0x9e, 0x74, 0x58, 0xa1, 0x7f, 0x2b, 0x20, 0xcd, 0xae, 0xe7, 0x32, 0x17,
-	0x67, 0x87, 0x08, 0x33, 0xe0, 0x36, 0x03, 0x96, 0xfe, 0x2d, 0x2d, 0xdd, 0x74, 0x9b, 0xae, 0x60,
-	0x2d, 0x70, 0xca, 0x47, 0x69, 0x7a, 0xd3, 0x75, 0x9b, 0x2d, 0x52, 0x10, 0xa7, 0x6a, 0xaf, 0x51,
-	0x68, 0x38, 0xa4, 0x55, 0x7f, 0xda, 0xb6, 0xe9, 0x91, 0xe4, 0xc8, 0x8d, 0x73, 0x30, 0xa7, 0x4d,
-	0x28, 0xb3, 0xdb, 0x5d, 0xc9, 0xf0, 0xf6, 0x38, 0x03, 0x69, 0x77, 0xd9, 0xb1, 0xff, 0xd1, 0xf8,
-	0x37, 0x06, 0xf1, 0xfd, 0x4e, 0xc3, 0xc5, 0x9f, 0x81, 0x5a, 0x77, 0x9a, 0x84, 0xb2, 0x0c, 0xd2,
-	0x51, 0x7e, 0x7d, 0xb7, 0x78, 0xf6, 0x22, 0xb7, 0xf2, 0xf7, 0x8b, 0xdc, 0x8d, 0x90, 0xfb, 0x6e,
+	0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17,
+	0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68,
+	0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99,
+	0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d,
+	0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d,
+	0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5,
+	0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58,
+	0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14,
+	0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26,
+	0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc,
+	0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9,
+	0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e,
 	0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09,
-	0x18, 0x43, 0x9c, 0x3a, 0xdf, 0x91, 0x4c, 0x4c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4,
-	0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0x8c, 0xa2, 0xa3, 0x7c, 0xb2, 0xa8, 0x99, 0xbe, 0x69,
-	0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe3, 0xfa, 0x7f, 0x7a, 0x99, 0x43, 0xd6, 0xba,
-	0xc4, 0x95, 0x18, 0x17, 0xd2, 0xeb, 0xd6, 0x03, 0x21, 0xf1, 0x79, 0x84, 0x48, 0x5c, 0x89, 0xe1,
-	0xfb, 0xa0, 0xb6, 0xec, 0x2a, 0x69, 0xd1, 0x4c, 0x42, 0x57, 0xf2, 0xc9, 0xe2, 0xb6, 0xf9, 0xff,
-	0x99, 0x31, 0x79, 0x7c, 0xcc, 0x07, 0x02, 0xb2, 0xd7, 0x61, 0xde, 0xb1, 0x25, 0xf1, 0xda, 0x87,
-	0x90, 0x0c, 0x5d, 0xe3, 0x14, 0x28, 0x47, 0xe4, 0xd8, 0x8f, 0x9f, 0xc5, 0x49, 0x9c, 0x86, 0x44,
-	0xdf, 0x6e, 0xf5, 0xfc, 0x48, 0xac, 0x5b, 0xfe, 0xe1, 0xa3, 0xd8, 0x07, 0xc8, 0xf8, 0x0a, 0x92,
-	0x5c, 0xac, 0x45, 0x9e, 0xf5, 0x78, 0xc4, 0x96, 0x18, 0x7d, 0xe3, 0x21, 0x6c, 0xf8, 0xa2, 0x69,
-	0xd7, 0xed, 0x50, 0x82, 0x3f, 0x86, 0xb8, 0xd3, 0x69, 0xb8, 0x42, 0x72, 0xb2, 0xf8, 0xee, 0x2c,
-	0xde, 0xee, 0xc6, 0xb9, 0x7e, 0x4b, 0xe0, 0x8c, 0xe7, 0x08, 0xae, 0x3c, 0x16, 0xd1, 0x0b, 0xac,
-	0xbd, 0xa4, 0x44, 0x7c, 0x07, 0x92, 0x7e, 0x3a, 0x44, 0x1d, 0x8b, 0xe0, 0x4c, 0xcb, 0xe3, 0x3d,
-	0x5e, 0xea, 0x07, 0x36, 0x3d, 0xb2, 0x64, 0xd6, 0x39, 0x6d, 0x7c, 0x01, 0x57, 0x03, 0x6b, 0x96,
-	0xe4, 0xa0, 0x09, 0xf8, 0x81, 0x43, 0x59, 0xd9, 0x67, 0x09, 0x9c, 0xcc, 0xc0, 0x6a, 0xc3, 0x69,
-	0x31, 0xe2, 0xd1, 0x0c, 0xd2, 0x95, 0xfc, 0xba, 0x15, 0x1c, 0x8d, 0xc7, 0xb0, 0x39, 0xc2, 0x3f,
-	0x61, 0x86, 0xb2, 0x90, 0x19, 0x55, 0x48, 0xdf, 0x25, 0x2d, 0xc2, 0xc8, 0x98, 0x21, 0xcb, 0xac,
-	0x8d, 0xe7, 0x08, 0xb0, 0x45, 0xec, 0xfa, 0xeb, 0x53, 0x81, 0xaf, 0x81, 0xea, 0x36, 0x1a, 0x94,
-	0x30, 0xd9, 0xfe, 0xf2, 0x34, 0x18, 0x0a, 0xca, 0x70, 0x28, 0x18, 0x25, 0xd8, 0x1c, 0xb1, 0x46,
-	0x46, 0x72, 0x28, 0x02, 0x8d, 0x8b, 0xa8, 0xdb, 0xcc, 0x16, 0x82, 0x37, 0x2c, 0x41, 0x1b, 0xbf,
-	0xc4, 0x40, 0x7d, 0xc4, 0x6c, 0xd6, 0xa3, 0x7c, 0x3a, 0x50, 0x66, 0x7b, 0x72, 0x3a, 0xa0, 0x79,
-	0xa6, 0x83, 0xc4, 0x4d, 0x8c, 0x98, 0xd8, 0x62, 0x23, 0x26, 0x05, 0x8a, 0x47, 0x1a, 0xc2, 0xd5,
-	0x75, 0x8b, 0x93, 0x21, 0x97, 0xe2, 0x23, 0x2e, 0xa5, 0x21, 0xc1, 0x5c, 0x66, 0xb7, 0x32, 0x09,
-	0x71, 0xed, 0x1f, 0xf0, 0x43, 0x58, 0x23, 0xdf, 0x76, 0x49, 0x8d, 0x91, 0x7a, 0x46, 0x5d, 0x38,
-	0x23, 0x03, 0x19, 0xc6, 0x75, 0xb8, 0xe2, 0xc7, 0x28, 0x48, 0xb8, 0x34, 0x10, 0x0d, 0x0c, 0xe4,
-	0x6d, 0x15, 0xb0, 0x0c, 0xea, 0x59, 0xa5, 0xe2, 0x46, 0x86, 0xf2, 0xbd, 0xa8, 0x8a, 0x96, 0x78,
-	0x89, 0x32, 0x0a, 0x7e, 0x9b, 0xf8, 0xb7, 0x84, 0x46, 0xf7, 0xd5, 0xd7, 0x90, 0x1e, 0x05, 0x48,
-	0x43, 0xee, 0xc3, 0x1a, 0x95, 0x77, 0xb2, 0xb9, 0x66, 0x34, 0x45, 0xb6, 0xd7, 0x00, 0x6d, 0xfc,
-	0xac, 0xc0, 0xe6, 0xa1, 0xe7, 0x4c, 0xb4, 0x58, 0x19, 0x54, 0xbb, 0xc6, 0x1c, 0xb7, 0x23, 0x5c,
-	0xbd, 0x5a, 0xbc, 0x19, 0x25, 0x5f, 0x08, 0x29, 0x09, 0x88, 0x25, 0xa1, 0x41, 0x4c, 0x63, 0xc3,
-	0xa4, 0x0f, 0x92, 0xab, 0x5c, 0x94, 0xdc, 0xf8, 0xe5, 0x93, 0x1b, 0x2a, 0xad, 0xc4, 0xd4, 0x6e,
-	0x51, 0x87, 0xdd, 0x82, 0x0f, 0x07, 0xbb, 0x6f, 0x55, 0x04, 0xf2, 0x93, 0x99, 0x1c, 0x1d, 0x8d,
-	0xd6, 0xb2, 0x57, 0xe1, 0xcb, 0x18, 0xa4, 0x47, 0xd5, 0xc8, 0xbc, 0x2f, 0x25, 0x2b, 0xa3, 0x43,
-	0x21, 0xb6, 0x8c, 0xa1, 0xa0, 0x2c, 0x36, 0x14, 0xe6, 0x1b, 0x01, 0xc3, 0x91, 0xac, 0x5e, 0x7a,
-	0xea, 0xeb, 0xb0, 0x51, 0xaa, 0xba, 0x1e, 0xbb, 0xb0, 0xfb, 0x6f, 0xfc, 0x80, 0x20, 0x19, 0x8a,
-	0x1e, 0x7e, 0x07, 0xe2, 0x8f, 0x2a, 0xa5, 0x4a, 0x6a, 0x45, 0xdb, 0x3c, 0x39, 0xd5, 0xdf, 0x08,
-	0x7d, 0xe2, 0x9d, 0x85, 0x73, 0x90, 0x38, 0xb4, 0xf6, 0x2b, 0x7b, 0x29, 0xa4, 0xa5, 0x4f, 0x4e,
-	0xf5, 0x54, 0xe8, 0xbb, 0x20, 0xf1, 0x75, 0x50, 0xcb, 0x9f, 0x1f, 0x1c, 0xec, 0x57, 0x52, 0x31,
-	0xed, 0xad, 0x93, 0x53, 0xfd, 0xcd, 0x10, 0x47, 0xd9, 0x6d, 0xb7, 0x1d, 0xa6, 0x6d, 0xfe, 0xf8,
-	0x6b, 0x76, 0xe5, 0xf7, 0xdf, 0xb2, 0x61, 0xbd, 0xc5, 0x3f, 0x56, 0x61, 0x55, 0x96, 0x01, 0xb6,
-	0xe5, 0xcb, 0xf4, 0xe6, 0x2c, 0x9b, 0x54, 0xba, 0xa6, 0xbd, 0x3f, 0x1b, 0xb3, 0xac, 0xb0, 0x26,
-	0xa8, 0xfe, 0x5b, 0x02, 0x6f, 0x45, 0xe1, 0x46, 0x5e, 0x40, 0x9a, 0x39, 0x2b, 0xbb, 0x54, 0xf4,
-	0x0c, 0xe2, 0x7c, 0xb4, 0xe1, 0x62, 0x14, 0x6e, 0xf2, 0x21, 0xa2, 0xed, 0xcc, 0x85, 0xf1, 0x15,
-	0x6e, 0x23, 0xfc, 0x25, 0xa8, 0xfe, 0x73, 0x02, 0xdf, 0x8e, 0x12, 0x30, 0xed, 0xd9, 0xa1, 0x5d,
-	0x9b, 0xa8, 0xef, 0x3d, 0xfe, 0xbf, 0x81, 0xbb, 0xc2, 0x77, 0x76, 0xb4, 0x2b, 0x93, 0xef, 0x8c,
-	0x68, 0x57, 0xa6, 0xbc, 0x06, 0xb6, 0x11, 0x4f, 0x93, 0x5c, 0xf1, 0x5b, 0x33, 0xee, 0xa0, 0x59,
-	0xd3, 0x34, 0xb6, 0xf2, 0x8e, 0x61, 0x23, 0xbc, 0x81, 0xf0, 0x4c, 0xa1, 0x1f, 0x5b, 0x70, 0xda,
-	0xed, 0xf9, 0x40, 0x52, 0x75, 0x1f, 0x12, 0x7e, 0xeb, 0xec, 0x2c, 0x30, 0x92, 0xa3, 0x75, 0x4e,
-	0x1b, 0xb0, 0x79, 0xb4, 0x8d, 0xf0, 0x01, 0x24, 0xc4, 0x6c, 0xc0, 0x91, 0x9d, 0x13, 0x1e, 0x21,
-	0x17, 0x55, 0xc7, 0xee, 0x93, 0xb3, 0x57, 0xd9, 0x95, 0xbf, 0x5e, 0x65, 0x57, 0xbe, 0x3f, 0xcf,
-	0xa2, 0xb3, 0xf3, 0x2c, 0xfa, 0xf3, 0x3c, 0x8b, 0xfe, 0x39, 0xcf, 0xa2, 0x27, 0x9f, 0x2e, 0xfa,
-	0x3f, 0xfa, 0x8e, 0x24, 0xab, 0xaa, 0xd0, 0xb5, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbf,
-	0xc1, 0xae, 0xf1, 0x92, 0x0f, 0x00, 0x00,
+	0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4,
+	0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69,
+	0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35,
+	0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3,
+	0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf,
+	0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7,
+	0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9,
+	0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4,
+	0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3,
+	0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59,
+	0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58,
+	0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b,
+	0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c,
+	0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3,
+	0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f,
+	0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59,
+	0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50,
+	0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a,
+	0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc,
+	0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6,
+	0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57,
+	0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26,
+	0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1,
+	0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90,
+	0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2,
+	0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20,
+	0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1,
+	0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70,
+	0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f,
+	0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb,
+	0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34,
+	0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e,
+	0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9,
+	0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28,
+	0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16,
+	0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39,
+	0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7,
+	0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3,
+	0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c,
+	0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d,
+	0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8,
+	0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b,
+	0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb,
+	0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19,
+	0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3,
+	0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4,
+	0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c,
+	0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42,
+	0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7,
+	0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd,
+	0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff,
+	0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
index a0a41c4..4f11871 100644
--- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
@@ -2,7 +2,7 @@
 
 package containerd.services.content.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/field_mask.proto";
 import "google/protobuf/timestamp.proto";
 import "google/protobuf/empty.proto";
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
index b19a377..f7b3529 100644
--- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
-// DO NOT EDIT!
 
 /*
 	Package diff is a generated protocol buffer package.
@@ -19,7 +18,8 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import containerd_types "github.com/containerd/containerd/api/types"
 import containerd_types1 "github.com/containerd/containerd/api/types"
 
@@ -386,24 +386,6 @@
 	return i, nil
 }
 
-func encodeFixed64Diff(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Diff(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintDiff(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -931,51 +913,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowDiff
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowDiff
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthDiff
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowDiff
@@ -985,41 +930,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowDiff
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowDiff
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthDiff
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowDiff
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthDiff
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipDiff(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthDiff
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthDiff
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -1235,34 +1219,34 @@
 }
 
 var fileDescriptorDiff = []byte{
-	// 454 bytes of a gzipped FileDescriptorProto
+	// 457 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
-	0x14, 0x9f, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
-	0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x09, 0x24, 0x50, 0xda,
-	0xbc, 0x74, 0x16, 0x69, 0xec, 0xd9, 0x6e, 0xa5, 0xdc, 0xf8, 0x2e, 0x7c, 0x14, 0x2e, 0x3b, 0x72,
-	0xe4, 0x48, 0xfb, 0x49, 0x90, 0x9d, 0x14, 0x22, 0x21, 0x95, 0xc0, 0x29, 0x2f, 0xcf, 0xbf, 0x7f,
-	0xf6, 0xb3, 0xe1, 0x6c, 0xce, 0xcd, 0xd5, 0x72, 0xca, 0x66, 0x62, 0x11, 0xce, 0x44, 0x6e, 0x62,
-	0x9e, 0xa3, 0x4a, 0xea, 0x65, 0x2c, 0x79, 0xa8, 0x51, 0xad, 0xf8, 0x0c, 0x75, 0x98, 0xf0, 0x34,
-	0x0d, 0x57, 0x47, 0xee, 0xcb, 0xa4, 0x12, 0x46, 0xd0, 0x7b, 0xbf, 0xb1, 0x6c, 0x8b, 0x63, 0x6e,
-	0x7d, 0x75, 0xe4, 0x1f, 0xcc, 0xc5, 0x5c, 0x38, 0x5c, 0x68, 0xab, 0x92, 0xe2, 0x1f, 0x37, 0x32,
-	0x35, 0x85, 0x44, 0x1d, 0x2e, 0xc4, 0x32, 0x37, 0x15, 0xef, 0xf4, 0x1f, 0x78, 0x09, 0xea, 0x99,
-	0xe2, 0xd2, 0x08, 0x55, 0x92, 0x47, 0xd7, 0xb0, 0xff, 0x52, 0xca, 0xac, 0x88, 0xf0, 0x7a, 0x89,
-	0xda, 0xd0, 0x27, 0xd0, 0xb1, 0x29, 0x3d, 0x32, 0x24, 0xe3, 0xc1, 0xe4, 0x3e, 0xab, 0x6d, 0xc3,
-	0x29, 0xb0, 0xf3, 0x5f, 0x0a, 0x91, 0x43, 0xd2, 0x10, 0x7a, 0x2e, 0x8d, 0xf6, 0x5a, 0xc3, 0xf6,
-	0x78, 0x30, 0xb9, 0xfb, 0x27, 0xe7, 0xad, 0x5d, 0x8f, 0x2a, 0xd8, 0xe8, 0x0d, 0xdc, 0xae, 0x2c,
-	0xb5, 0x14, 0xb9, 0x46, 0x7a, 0x0c, 0xb7, 0x62, 0x29, 0x33, 0x8e, 0x49, 0x23, 0xdb, 0x2d, 0x78,
-	0xf4, 0xa5, 0x05, 0x83, 0x73, 0x9e, 0xa6, 0xdb, 0xec, 0x8f, 0xa0, 0x93, 0x61, 0x6a, 0x3c, 0xb2,
-	0x3b, 0x87, 0x03, 0xd1, 0xc7, 0xd0, 0x55, 0x7c, 0x7e, 0x65, 0xfe, 0x96, 0xba, 0x44, 0xd1, 0x07,
-	0x00, 0x0b, 0x4c, 0x78, 0xfc, 0xd1, 0xae, 0x79, 0xed, 0x21, 0x19, 0xf7, 0xa3, 0xbe, 0xeb, 0x5c,
-	0x16, 0x12, 0xe9, 0x1d, 0x68, 0x2b, 0x4c, 0xbd, 0x8e, 0xeb, 0xdb, 0x92, 0x5e, 0x40, 0x2f, 0x8b,
-	0xa7, 0x98, 0x69, 0xaf, 0xeb, 0x0c, 0x9e, 0xb1, 0x1d, 0x37, 0x82, 0xd5, 0xb6, 0xc1, 0x2e, 0x1c,
-	0xed, 0x75, 0x6e, 0x54, 0x11, 0x55, 0x1a, 0xfe, 0x0b, 0x18, 0xd4, 0xda, 0xd6, 0xee, 0x13, 0x16,
-	0xee, 0xb4, 0xfa, 0x91, 0x2d, 0xe9, 0x01, 0x74, 0x57, 0x71, 0xb6, 0x44, 0xaf, 0xe5, 0x7a, 0xe5,
-	0xcf, 0x49, 0xeb, 0x39, 0x19, 0x9d, 0xc1, 0x7e, 0xa9, 0x5e, 0x9d, 0xf6, 0x76, 0xc2, 0xed, 0xa6,
-	0x13, 0x9e, 0x7c, 0x25, 0xd0, 0xb1, 0x12, 0xf4, 0x03, 0x74, 0xdd, 0xe4, 0xe8, 0xe1, 0xce, 0xcd,
-	0xd4, 0x2f, 0x94, 0xff, 0xb0, 0x09, 0xb4, 0x8a, 0xf6, 0xbe, 0xf2, 0x19, 0x37, 0x3d, 0x2b, 0xff,
-	0xb0, 0x01, 0xb2, 0x14, 0x7f, 0x75, 0x79, 0xb3, 0x0e, 0xf6, 0xbe, 0xaf, 0x83, 0xbd, 0xcf, 0x9b,
-	0x80, 0xdc, 0x6c, 0x02, 0xf2, 0x6d, 0x13, 0x90, 0x1f, 0x9b, 0x80, 0xbc, 0x3b, 0xf9, 0xaf, 0xd7,
-	0x7e, 0x6a, 0xbf, 0xd3, 0x9e, 0x7b, 0x46, 0x4f, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x01,
-	0x51, 0xf0, 0x32, 0x04, 0x00, 0x00,
+	0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
+	0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a,
+	0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47,
+	0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef,
+	0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea,
+	0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1,
+	0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63,
+	0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35,
+	0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa,
+	0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab,
+	0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6,
+	0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f,
+	0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb,
+	0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b,
+	0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d,
+	0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2,
+	0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb,
+	0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd,
+	0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b,
+	0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f,
+	0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb,
+	0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77,
+	0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b,
+	0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac,
+	0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f,
+	0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1,
+	0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff,
+	0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
index d1d2a82..66d7ecb 100644
--- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
@@ -2,7 +2,7 @@
 
 package containerd.services.diff.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "github.com/containerd/containerd/api/types/mount.proto";
 import "github.com/containerd/containerd/api/types/descriptor.proto";
 
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
deleted file mode 100644
index 95358f5..0000000
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
+++ /dev/null
@@ -1,13 +0,0 @@
-syntax = "proto3";
-
-package containerd.services.events.v1;
-
-import "gogoproto/gogo.proto";
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
-
-option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
-option (containerd.plugin.fieldpath_all) = true;
-
-message ContentDelete {
-	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
new file mode 100644
index 0000000..070604b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
@@ -0,0 +1,2 @@
+// Package events defines the event pushing and subscription service.
+package events
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
index e894064..e2ad455 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
@@ -1,16 +1,28 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/events/v1/events.proto
-// DO NOT EDIT!
 
+/*
+	Package events is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/events/v1/events.proto
+
+	It has these top-level messages:
+		PublishRequest
+		ForwardRequest
+		SubscribeRequest
+		Envelope
+*/
 package events
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/containerd/containerd/protobuf/plugin"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import google_protobuf1 "github.com/gogo/protobuf/types"
-import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf2 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 
 import time "time"
@@ -35,6 +47,12 @@
 var _ = math.Inf
 var _ = time.Kitchen
 
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
 type PublishRequest struct {
 	Topic string                `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
 	Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
@@ -449,24 +467,6 @@
 	return i, nil
 }
 
-func encodeFixed64Events(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Events(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1152,34 +1152,35 @@
 }
 
 var fileDescriptorEvents = []byte{
-	// 462 bytes of a gzipped FileDescriptorProto
+	// 466 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
 	0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
-	0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x84, 0xd8, 0x25, 0xe9, 0x6d,
-	0x6a, 0x29, 0xb1, 0x4d, 0xec, 0x04, 0xcd, 0x6e, 0x1e, 0x81, 0x0d, 0x6f, 0xc2, 0x86, 0x37, 0xe8,
-	0x92, 0x25, 0x2b, 0x60, 0xfa, 0x24, 0xa8, 0x89, 0xdd, 0x30, 0x2d, 0x10, 0x34, 0xbb, 0x6b, 0xdf,
-	0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x3c, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
-	0x4c, 0x05, 0x94, 0x41, 0x36, 0xfd, 0xbd, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
+	0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb,
+	0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0,
+	0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf,
+	0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
+	0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
 	0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
-	0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0x67, 0x9e, 0x48, 0xf2,
-	0x98, 0x32, 0x6f, 0x46, 0x21, 0x99, 0x8a, 0x40, 0xcd, 0xab, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
-	0x96, 0xde, 0xaa, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
-	0x9b, 0x9b, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x66, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
-	0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0xb9, 0x0f, 0xef, 0x73, 0x90, 0xca, 0xee,
+	0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2,
+	0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
+	0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
+	0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
+	0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee,
 	0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
 	0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
-	0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0x0f, 0x41, 0x36, 0x35, 0xcc, 0xe7, 0xb8,
-	0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0x9f, 0x46, 0x92, 0x89, 0x96,
-	0xfb, 0xeb, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
-	0x3e, 0x9c, 0xd1, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x46,
-	0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0xeb, 0xc1, 0xf5, 0x07, 0x38, 0x5b, 0x13, 0xbc, 0x36, 0x8a,
-	0x71, 0x7b, 0xf1, 0xbd, 0xd7, 0xfa, 0xf8, 0xa3, 0x87, 0xfc, 0xfa, 0x98, 0x7d, 0x0b, 0x5b, 0x2c,
-	0x48, 0x41, 0x8a, 0x20, 0x82, 0xd2, 0x05, 0xcb, 0xaf, 0x37, 0x6a, 0xd7, 0x76, 0xff, 0xe8, 0xda,
-	0x5e, 0xa3, 0x6b, 0x8f, 0xf6, 0xce, 0xbf, 0xf4, 0xd0, 0xe8, 0xd3, 0x0e, 0x3e, 0x98, 0x94, 0x2e,
-	0xd8, 0xa7, 0xf8, 0x50, 0x47, 0x63, 0xdf, 0x6f, 0x70, 0xeb, 0x72, 0x84, 0xce, 0xf5, 0xad, 0x7b,
-	0x26, 0xab, 0x37, 0xb1, 0x22, 0xea, 0x60, 0x1a, 0x89, 0x97, 0x03, 0xfc, 0x2b, 0x31, 0xc6, 0xd6,
-	0x3a, 0x13, 0xdb, 0x6b, 0x60, 0x6e, 0xa6, 0xe7, 0xfc, 0xef, 0x23, 0x78, 0x80, 0xc6, 0x6f, 0x17,
-	0x17, 0x6e, 0xeb, 0xdb, 0x85, 0xdb, 0x3a, 0x5f, 0xba, 0x68, 0xb1, 0x74, 0xd1, 0xd7, 0xa5, 0x8b,
-	0x7e, 0x2e, 0x5d, 0xf4, 0xee, 0xc9, 0x15, 0xff, 0xeb, 0xc7, 0x55, 0x15, 0x1e, 0x94, 0x23, 0x3d,
-	0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x38, 0x37, 0x72, 0x20, 0x04, 0x00, 0x00,
+	0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8,
+	0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96,
+	0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
+	0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42,
+	0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14,
+	0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58,
+	0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5,
+	0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d,
+	0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee,
+	0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b,
+	0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97,
+	0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b,
+	0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0,
+	0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04,
+	0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
index a20a1e8..58f2dad 100644
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
@@ -2,8 +2,8 @@
 
 package containerd.services.events.v1;
 
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
-import "gogoproto/gogo.proto";
+import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/any.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/timestamp.proto";
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
deleted file mode 100644
index 2d6be2b..0000000
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
+++ /dev/null
@@ -1,23 +0,0 @@
-syntax = "proto3";
-
-package containerd.services.events.v1;
-
-import "gogoproto/gogo.proto";
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
-
-option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
-option (containerd.plugin.fieldpath_all) = true;
-
-message NamespaceCreate {
-	string name = 1;
-	map<string, string> labels  = 2;
-}
-
-message NamespaceUpdate {
-	string name = 1;
-	map<string, string> labels  = 2;
-}
-
-message NamespaceDelete {
-	string name = 1;
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
deleted file mode 100644
index b6af0ea..0000000
--- a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-syntax = "proto3";
-
-package containerd.services.events.v1;
-
-import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
-
-option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
-option (containerd.plugin.fieldpath_all) = true;
-
-message SnapshotPrepare {
-	string key = 1;
-	string parent = 2;
-}
-
-message SnapshotCommit {
-	string key = 1;
-	string name = 2;
-}
-
-message SnapshotRemove {
-	string key = 1;
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
index 41b97df..4577eb0 100644
--- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/images/v1/images.proto
-// DO NOT EDIT!
 
 /*
 	Package images is a generated protocol buffer package.
@@ -25,8 +24,9 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
 import google_protobuf2 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 import containerd_types "github.com/containerd/containerd/api/types"
@@ -163,6 +163,11 @@
 
 type DeleteImageRequest struct {
 	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Sync indicates that the delete and cleanup should be done
+	// synchronously before returning to the caller
+	//
+	// Default is false
+	Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"`
 }
 
 func (m *DeleteImageRequest) Reset()                    { *m = DeleteImageRequest{} }
@@ -717,27 +722,19 @@
 		i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
 		i += copy(dAtA[i:], m.Name)
 	}
+	if m.Sync {
+		dAtA[i] = 0x10
+		i++
+		if m.Sync {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
 	return i, nil
 }
 
-func encodeFixed64Images(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Images(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintImages(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -858,6 +855,9 @@
 	if l > 0 {
 		n += 1 + l + sovImages(uint64(l))
 	}
+	if m.Sync {
+		n += 2
+	}
 	return n
 }
 
@@ -985,6 +985,7 @@
 	}
 	s := strings.Join([]string{`&DeleteImageRequest{`,
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Sync:` + fmt.Sprintf("%v", this.Sync) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1081,51 +1082,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowImages
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowImages
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthImages
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowImages
@@ -1135,41 +1099,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowImages
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowImages
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthImages
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowImages
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthImages
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipImages(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthImages
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthImages
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
@@ -2015,6 +2018,26 @@
 			}
 			m.Name = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Sync = bool(v != 0)
 		default:
 			iNdEx = preIndex
 			skippy, err := skipImages(dAtA[iNdEx:])
@@ -2146,46 +2169,47 @@
 }
 
 var fileDescriptorImages = []byte{
-	// 648 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0xd3, 0x4e,
-	0x10, 0xad, 0x93, 0xd4, 0x6d, 0x27, 0x87, 0x5f, 0x7f, 0x4b, 0x85, 0x2c, 0x03, 0x69, 0x14, 0x81,
-	0x94, 0x0b, 0x6b, 0x1a, 0x2e, 0xd0, 0x4a, 0x88, 0xa6, 0x2d, 0x05, 0xa9, 0x70, 0x30, 0xff, 0x2a,
-	0x2e, 0xd5, 0x26, 0x99, 0x18, 0x2b, 0x76, 0x6c, 0xbc, 0x9b, 0x48, 0xb9, 0xf1, 0x11, 0x90, 0xe0,
-	0x43, 0xf5, 0xc8, 0x91, 0x13, 0xd0, 0x1c, 0xf8, 0x1c, 0xc8, 0xbb, 0x1b, 0x9a, 0x26, 0x11, 0x4e,
-	0x4a, 0x6f, 0xe3, 0xf8, 0xbd, 0x79, 0x33, 0x6f, 0x66, 0x62, 0xd8, 0xf7, 0x7c, 0xf1, 0xbe, 0xd7,
-	0xa0, 0xcd, 0x28, 0x74, 0x9a, 0x51, 0x57, 0x30, 0xbf, 0x8b, 0x49, 0x6b, 0x3c, 0x64, 0xb1, 0xef,
-	0x70, 0x4c, 0xfa, 0x7e, 0x13, 0xb9, 0xe3, 0x87, 0xcc, 0x43, 0xee, 0xf4, 0xb7, 0x74, 0x44, 0xe3,
-	0x24, 0x12, 0x11, 0xb9, 0x75, 0x8e, 0xa7, 0x23, 0x2c, 0xd5, 0x88, 0xfe, 0x96, 0xbd, 0xe1, 0x45,
-	0x5e, 0x24, 0x91, 0x4e, 0x1a, 0x29, 0x92, 0x7d, 0xc3, 0x8b, 0x22, 0x2f, 0x40, 0x47, 0x3e, 0x35,
-	0x7a, 0x6d, 0x07, 0xc3, 0x58, 0x0c, 0xf4, 0xcb, 0xf2, 0xe4, 0xcb, 0xb6, 0x8f, 0x41, 0xeb, 0x24,
-	0x64, 0xbc, 0xa3, 0x11, 0x9b, 0x93, 0x08, 0xe1, 0x87, 0xc8, 0x05, 0x0b, 0x63, 0x0d, 0xd8, 0x99,
-	0xab, 0x35, 0x31, 0x88, 0x91, 0x3b, 0x2d, 0xe4, 0xcd, 0xc4, 0x8f, 0x45, 0x94, 0x28, 0x72, 0xe5,
-	0x57, 0x0e, 0x96, 0x9f, 0xa5, 0x0d, 0x10, 0x02, 0x85, 0x2e, 0x0b, 0xd1, 0x32, 0xca, 0x46, 0x75,
-	0xcd, 0x95, 0x31, 0x79, 0x0a, 0x66, 0xc0, 0x1a, 0x18, 0x70, 0x2b, 0x57, 0xce, 0x57, 0x8b, 0xb5,
-	0x7b, 0xf4, 0xaf, 0x06, 0x50, 0x99, 0x89, 0x1e, 0x49, 0xca, 0x41, 0x57, 0x24, 0x03, 0x57, 0xf3,
-	0xc9, 0x36, 0x98, 0x82, 0x25, 0x1e, 0x0a, 0x2b, 0x5f, 0x36, 0xaa, 0xc5, 0xda, 0xcd, 0xf1, 0x4c,
-	0xb2, 0x36, 0xba, 0xff, 0xa7, 0xb6, 0x7a, 0xe1, 0xf4, 0xfb, 0xe6, 0x92, 0xab, 0x19, 0x64, 0x0f,
-	0xa0, 0x99, 0x20, 0x13, 0xd8, 0x3a, 0x61, 0xc2, 0x5a, 0x91, 0x7c, 0x9b, 0x2a, 0x5b, 0xe8, 0xc8,
-	0x16, 0xfa, 0x6a, 0x64, 0x4b, 0x7d, 0x35, 0x65, 0x7f, 0xfa, 0xb1, 0x69, 0xb8, 0x6b, 0x9a, 0xb7,
-	0x2b, 0x93, 0xf4, 0xe2, 0xd6, 0x28, 0xc9, 0xea, 0x22, 0x49, 0x34, 0x6f, 0x57, 0xd8, 0x0f, 0xa1,
-	0x38, 0xd6, 0x1c, 0x59, 0x87, 0x7c, 0x07, 0x07, 0xda, 0xb1, 0x34, 0x24, 0x1b, 0xb0, 0xdc, 0x67,
-	0x41, 0x0f, 0xad, 0x9c, 0xfc, 0x4d, 0x3d, 0x6c, 0xe7, 0x1e, 0x18, 0x95, 0x3b, 0xf0, 0xdf, 0x21,
-	0x0a, 0x69, 0x90, 0x8b, 0x1f, 0x7a, 0xc8, 0xc5, 0x2c, 0xc7, 0x2b, 0x2f, 0x60, 0xfd, 0x1c, 0xc6,
-	0xe3, 0xa8, 0xcb, 0x91, 0x6c, 0xc3, 0xb2, 0xb4, 0x58, 0x02, 0x8b, 0xb5, 0xdb, 0xf3, 0x0c, 0xc1,
-	0x55, 0x94, 0xca, 0x1b, 0x20, 0x7b, 0xd2, 0x83, 0x0b, 0xca, 0x8f, 0x2f, 0x91, 0x51, 0x0f, 0x45,
-	0xe7, 0x7d, 0x0b, 0xd7, 0x2e, 0xe4, 0xd5, 0xa5, 0xfe, 0x7b, 0xe2, 0xcf, 0x06, 0x90, 0xd7, 0xd2,
-	0xf0, 0xab, 0xad, 0x98, 0xec, 0x40, 0x51, 0x0d, 0x52, 0x1e, 0x97, 0x1c, 0xd0, 0xac, 0x0d, 0x78,
-	0x92, 0xde, 0xdf, 0x73, 0xc6, 0x3b, 0xae, 0xde, 0x97, 0x34, 0x4e, 0xdb, 0xbd, 0x50, 0xd4, 0x95,
-	0xb5, 0x7b, 0x17, 0xfe, 0x3f, 0xf2, 0xb9, 0x1a, 0x38, 0x1f, 0x35, 0x6b, 0xc1, 0x4a, 0xdb, 0x0f,
-	0x04, 0x26, 0xdc, 0x32, 0xca, 0xf9, 0xea, 0x9a, 0x3b, 0x7a, 0xac, 0x1c, 0x03, 0x19, 0x87, 0xeb,
-	0x32, 0xea, 0x60, 0x2a, 0x11, 0x09, 0x5f, 0xac, 0x0e, 0xcd, 0xac, 0x54, 0x81, 0xec, 0x63, 0x80,
-	0x13, 0xb6, 0xcf, 0x58, 0xd1, 0xda, 0x97, 0x02, 0x98, 0xaa, 0x00, 0xd2, 0x86, 0xfc, 0x21, 0x0a,
-	0x42, 0x33, 0xf4, 0x26, 0x16, 0xdf, 0x76, 0xe6, 0xc6, 0xeb, 0x06, 0x3b, 0x50, 0x48, 0xdb, 0x26,
-	0x59, 0xff, 0x3f, 0x53, 0x56, 0xda, 0x5b, 0x0b, 0x30, 0xb4, 0x58, 0x04, 0xa6, 0x5a, 0x6d, 0x92,
-	0x45, 0x9e, 0xbe, 0x2c, 0xbb, 0xb6, 0x08, 0xe5, 0x5c, 0x50, 0x2d, 0x57, 0xa6, 0xe0, 0xf4, 0x61,
-	0x64, 0x0a, 0xce, 0x5a, 0xdb, 0x97, 0x60, 0xaa, 0x59, 0x67, 0x0a, 0x4e, 0xaf, 0x84, 0x7d, 0x7d,
-	0xea, 0x64, 0x0e, 0xd2, 0xef, 0x59, 0xfd, 0xf8, 0xf4, 0xac, 0xb4, 0xf4, 0xed, 0xac, 0xb4, 0xf4,
-	0x71, 0x58, 0x32, 0x4e, 0x87, 0x25, 0xe3, 0xeb, 0xb0, 0x64, 0xfc, 0x1c, 0x96, 0x8c, 0x77, 0x8f,
-	0x2e, 0xf9, 0xed, 0xdd, 0x51, 0x51, 0xc3, 0x94, 0x4a, 0xf7, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff,
-	0x74, 0x4c, 0xf0, 0x24, 0xc4, 0x07, 0x00, 0x00,
+	// 659 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40,
+	0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5,
+	0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52,
+	0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5,
+	0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde,
+	0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36,
+	0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31,
+	0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48,
+	0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91,
+	0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b,
+	0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78,
+	0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69,
+	0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c,
+	0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b,
+	0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8,
+	0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d,
+	0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e,
+	0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91,
+	0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa,
+	0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d,
+	0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52,
+	0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17,
+	0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05,
+	0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75,
+	0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52,
+	0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf,
+	0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72,
+	0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb,
+	0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde,
+	0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84,
+	0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06,
+	0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9,
+	0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6,
+	0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d,
+	0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73,
+	0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84,
+	0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05,
+	0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab,
+	0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a,
+	0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47,
+	0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda,
+	0x07, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
index d6b6b83..152ade2 100644
--- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
@@ -2,7 +2,7 @@
 
 package containerd.services.images.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/field_mask.proto";
 import "google/protobuf/timestamp.proto";
@@ -115,4 +115,10 @@
 
 message DeleteImageRequest {
 	string name = 1;
+
+	// Sync indicates that the delete and cleanup should be done
+	// synchronously before returning to the caller
+	//
+	// Default is false
+	bool sync = 2;
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
index 9e61bf7..bce8231 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
-// DO NOT EDIT!
 
 /*
 	Package introspection is a generated protocol buffer package.
@@ -20,7 +19,8 @@
 import math "math"
 import containerd_types "github.com/containerd/containerd/api/types"
 import google_rpc "github.com/containerd/containerd/protobuf/google/rpc"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 import (
 	context "golang.org/x/net/context"
@@ -362,24 +362,6 @@
 	return i, nil
 }
 
-func encodeFixed64Introspection(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Introspection(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -697,51 +679,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowIntrospection
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowIntrospection
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthIntrospection
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Exports == nil {
 				m.Exports = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowIntrospection
@@ -751,41 +696,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowIntrospection
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowIntrospection
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthIntrospection
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowIntrospection
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthIntrospection
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipIntrospection(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthIntrospection
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthIntrospection
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Exports[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Exports[mapkey] = mapvalue
 			}
+			m.Exports[mapkey] = mapvalue
 			iNdEx = postIndex
 		case 6:
 			if wireType != 2 {
@@ -1140,36 +1124,36 @@
 }
 
 var fileDescriptorIntrospection = []byte{
-	// 485 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x3d, 0x6f, 0xdb, 0x30,
-	0x10, 0x35, 0x65, 0xc7, 0x8a, 0xcf, 0xe9, 0x07, 0x88, 0xa0, 0x15, 0x34, 0x28, 0x86, 0xd1, 0xc1,
-	0x28, 0x5a, 0x0a, 0x71, 0x5b, 0xa0, 0x49, 0x81, 0x0e, 0x46, 0x3d, 0x04, 0xc8, 0x10, 0x28, 0x5b,
-	0x97, 0x40, 0x96, 0x69, 0x95, 0xa8, 0x22, 0x32, 0x24, 0x25, 0xd4, 0x5b, 0xb7, 0xfe, 0x35, 0x8f,
-	0x1d, 0x3b, 0x05, 0x8d, 0x7e, 0x43, 0x7f, 0x40, 0x21, 0x51, 0x4a, 0xec, 0xcd, 0x46, 0xb6, 0xbb,
-	0xa7, 0xf7, 0xee, 0xde, 0x3d, 0x88, 0x10, 0xc4, 0x4c, 0x7f, 0xcb, 0x66, 0x24, 0xe2, 0xd7, 0x7e,
-	0xc4, 0x53, 0x1d, 0xb2, 0x94, 0xca, 0xf9, 0x7a, 0x19, 0x0a, 0xe6, 0x2b, 0x2a, 0x73, 0x16, 0x51,
-	0xe5, 0xb3, 0x54, 0x4b, 0xae, 0x04, 0x8d, 0x34, 0xe3, 0xa9, 0x9f, 0x1f, 0x6f, 0x02, 0x44, 0x48,
-	0xae, 0x39, 0x7e, 0xf5, 0xa0, 0x26, 0x8d, 0x92, 0x6c, 0x12, 0xf3, 0x63, 0xf7, 0x64, 0xab, 0xcd,
-	0x7a, 0x29, 0xa8, 0xf2, 0x45, 0x12, 0xea, 0x05, 0x97, 0xd7, 0x66, 0x81, 0xfb, 0x32, 0xe6, 0x3c,
-	0x4e, 0xa8, 0x2f, 0x45, 0xe4, 0x2b, 0x1d, 0xea, 0x4c, 0xd5, 0x1f, 0x0e, 0x63, 0x1e, 0xf3, 0xaa,
-	0xf4, 0xcb, 0xca, 0xa0, 0xc3, 0x7f, 0x16, 0x74, 0x2f, 0x92, 0x2c, 0x66, 0x29, 0xc6, 0xd0, 0x29,
-	0x27, 0x3a, 0x68, 0x80, 0x46, 0xbd, 0xa0, 0xaa, 0xf1, 0x0b, 0xb0, 0xd8, 0xdc, 0xb1, 0x4a, 0x64,
-	0xd2, 0x2d, 0x6e, 0x8f, 0xac, 0xb3, 0x2f, 0x81, 0xc5, 0xe6, 0xd8, 0x85, 0x7d, 0x49, 0x6f, 0x32,
-	0x26, 0xa9, 0x72, 0xda, 0x83, 0xf6, 0xa8, 0x17, 0xdc, 0xf7, 0xf8, 0x33, 0xf4, 0x1a, 0x4f, 0xca,
-	0xe9, 0x0c, 0xda, 0xa3, 0xfe, 0xd8, 0x25, 0x6b, 0x67, 0x57, 0xb6, 0xc9, 0x45, 0x4d, 0x99, 0x74,
-	0x56, 0xb7, 0x47, 0xad, 0xe0, 0x41, 0x82, 0x2f, 0xc1, 0xa6, 0x3f, 0x04, 0x97, 0x5a, 0x39, 0x7b,
-	0x95, 0xfa, 0x84, 0x6c, 0x13, 0x1a, 0x31, 0x67, 0x90, 0xa9, 0xd1, 0x4e, 0x53, 0x2d, 0x97, 0x41,
-	0x33, 0x09, 0x0f, 0xe1, 0x20, 0x0a, 0x45, 0x38, 0x63, 0x09, 0xd3, 0x8c, 0x2a, 0xa7, 0x5b, 0x99,
-	0xde, 0xc0, 0xf0, 0x5b, 0xd8, 0x67, 0x29, 0xd3, 0x57, 0x54, 0x4a, 0xc7, 0x1e, 0xa0, 0x51, 0x7f,
-	0x8c, 0x89, 0x49, 0x93, 0x48, 0x11, 0x91, 0xcb, 0x2a, 0xcd, 0xc0, 0x2e, 0x39, 0x53, 0x29, 0xdd,
-	0x53, 0x38, 0x58, 0xdf, 0x85, 0x9f, 0x43, 0xfb, 0x3b, 0x5d, 0xd6, 0xf1, 0x95, 0x25, 0x3e, 0x84,
-	0xbd, 0x3c, 0x4c, 0x32, 0x6a, 0x02, 0x0c, 0x4c, 0x73, 0x6a, 0x7d, 0x44, 0xc3, 0xd7, 0xf0, 0xd4,
-	0xd8, 0x55, 0x01, 0xbd, 0xc9, 0xa8, 0xd2, 0xd8, 0x01, 0x7b, 0xc1, 0x12, 0x4d, 0xa5, 0x72, 0x50,
-	0xe5, 0xad, 0x69, 0x87, 0x57, 0xf0, 0xec, 0x9e, 0xab, 0x04, 0x4f, 0x15, 0xc5, 0xe7, 0x60, 0x0b,
-	0x03, 0x55, 0xe4, 0xfe, 0xf8, 0xcd, 0x2e, 0x11, 0xd5, 0x91, 0x37, 0x23, 0xc6, 0xbf, 0x10, 0x3c,
-	0x39, 0x5b, 0xa7, 0xe2, 0x1c, 0xec, 0x7a, 0x25, 0x7e, 0xbf, 0xcb, 0xe4, 0xe6, 0x1a, 0xf7, 0xc3,
-	0x8e, 0x2a, 0x73, 0xd7, 0x64, 0xb1, 0xba, 0xf3, 0x5a, 0x7f, 0xee, 0xbc, 0xd6, 0xcf, 0xc2, 0x43,
-	0xab, 0xc2, 0x43, 0xbf, 0x0b, 0x0f, 0xfd, 0x2d, 0x3c, 0xf4, 0xf5, 0xfc, 0x71, 0x6f, 0xf1, 0xd3,
-	0x06, 0x30, 0xeb, 0x56, 0x3f, 0xff, 0xbb, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x21, 0xec, 0xef,
-	0x92, 0xe2, 0x03, 0x00, 0x00,
+	// 487 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
+	0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21,
+	0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10,
+	0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb,
+	0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b,
+	0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda,
+	0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22,
+	0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08,
+	0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5,
+	0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c,
+	0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e,
+	0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a,
+	0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89,
+	0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d,
+	0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49,
+	0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93,
+	0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67,
+	0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32,
+	0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b,
+	0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea,
+	0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5,
+	0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87,
+	0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e,
+	0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e,
+	0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c,
+	0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82,
+	0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe,
+	0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78,
+	0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc,
+	0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6,
+	0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
index 424c731..95e804b 100644
--- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
@@ -4,7 +4,7 @@
 
 import "github.com/containerd/containerd/api/types/platform.proto";
 import "google/rpc/status.proto";
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 
 option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
 
diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
index db2a5f8..a922fde 100644
--- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/leases/v1/leases.proto
-// DO NOT EDIT!
 
 /*
 	Package leases is a generated protocol buffer package.
@@ -21,8 +20,9 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 
 import time "time"
@@ -133,7 +133,7 @@
 	// during the lease eligible for garbage collection if not referenced
 	// or retained by other resources during the lease.
 	Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	// ListTransactions lists all active leases, returning the full list of
+	// List lists all active leases, returning the full list of
 	// leases and optionally including the referenced resources.
 	List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error)
 }
@@ -183,7 +183,7 @@
 	// during the lease eligible for garbage collection if not referenced
 	// or retained by other resources during the lease.
 	Delete(context.Context, *DeleteRequest) (*google_protobuf1.Empty, error)
-	// ListTransactions lists all active leases, returning the full list of
+	// List lists all active leases, returning the full list of
 	// leases and optionally including the referenced resources.
 	List(context.Context, *ListRequest) (*ListResponse, error)
 }
@@ -472,24 +472,6 @@
 	return i, nil
 }
 
-func encodeFixed64Leases(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Leases(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintLeases(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -799,51 +781,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowLeases
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowLeases
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthLeases
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowLeases
@@ -853,41 +798,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowLeases
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowLeases
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthLeases
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowLeases
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthLeases
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipLeases(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthLeases
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthLeases
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -994,51 +978,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowLeases
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowLeases
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthLeases
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowLeases
@@ -1048,41 +995,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowLeases
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowLeases
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthLeases
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowLeases
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthLeases
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipLeases(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthLeases
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthLeases
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -1537,37 +1523,37 @@
 }
 
 var fileDescriptorLeases = []byte{
-	// 501 bytes of a gzipped FileDescriptorProto
+	// 504 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xdf, 0x8a, 0xd3, 0x40,
-	0x14, 0xc6, 0x3b, 0xa9, 0x8d, 0xf6, 0xd4, 0x15, 0x19, 0x96, 0x25, 0x44, 0x4c, 0x4b, 0x10, 0xb6,
-	0xf8, 0x67, 0xe2, 0xd6, 0x9b, 0x75, 0x15, 0xc1, 0x6e, 0x17, 0x14, 0x82, 0x48, 0xf0, 0x42, 0xbc,
-	0x59, 0xd2, 0xf6, 0x6c, 0x0c, 0xa6, 0x9d, 0x98, 0x99, 0x16, 0x7a, 0xe7, 0x23, 0xf8, 0x08, 0x3e,
-	0x84, 0x0f, 0xd1, 0x4b, 0x2f, 0xbd, 0x5a, 0xdd, 0xdc, 0xf9, 0x16, 0x92, 0x99, 0x84, 0xfd, 0x23,
-	0xda, 0x2a, 0xde, 0x9d, 0xc9, 0x7c, 0xdf, 0x39, 0xbf, 0xf3, 0xc1, 0x04, 0x06, 0x51, 0x2c, 0xdf,
-	0xce, 0x86, 0x6c, 0xc4, 0x27, 0xde, 0x88, 0x4f, 0x65, 0x18, 0x4f, 0x31, 0x1b, 0x9f, 0x2d, 0xc3,
-	0x34, 0xf6, 0x04, 0x66, 0xf3, 0x78, 0x84, 0xc2, 0x4b, 0x30, 0x14, 0x28, 0xbc, 0xf9, 0x4e, 0x59,
-	0xb1, 0x34, 0xe3, 0x92, 0xd3, 0x9b, 0xa7, 0x7a, 0x56, 0x69, 0x59, 0xa9, 0x98, 0xef, 0xd8, 0x9b,
-	0x11, 0x8f, 0xb8, 0x52, 0x7a, 0x45, 0xa5, 0x4d, 0xf6, 0x8d, 0x88, 0xf3, 0x28, 0x41, 0x4f, 0x9d,
-	0x86, 0xb3, 0x23, 0x0f, 0x27, 0xa9, 0x5c, 0x94, 0x97, 0xed, 0x8b, 0x97, 0x32, 0x9e, 0xa0, 0x90,
-	0xe1, 0x24, 0xd5, 0x02, 0xf7, 0x07, 0x81, 0x86, 0x5f, 0x4c, 0xa0, 0x5b, 0x60, 0xc4, 0x63, 0x8b,
-	0x74, 0x48, 0xb7, 0xd9, 0x37, 0xf3, 0xe3, 0xb6, 0xf1, 0x7c, 0x10, 0x18, 0xf1, 0x98, 0xee, 0x03,
-	0x8c, 0x32, 0x0c, 0x25, 0x8e, 0x0f, 0x43, 0x69, 0x19, 0x1d, 0xd2, 0x6d, 0xf5, 0x6c, 0xa6, 0xfb,
-	0xb2, 0xaa, 0x2f, 0x7b, 0x55, 0xf5, 0xed, 0x5f, 0x59, 0x1e, 0xb7, 0x6b, 0x1f, 0xbf, 0xb5, 0x49,
-	0xd0, 0x2c, 0x7d, 0x4f, 0x25, 0x7d, 0x06, 0x66, 0x12, 0x0e, 0x31, 0x11, 0x56, 0xbd, 0x53, 0xef,
-	0xb6, 0x7a, 0xf7, 0xd9, 0x1f, 0x57, 0x65, 0x0a, 0x89, 0xf9, 0xca, 0x72, 0x30, 0x95, 0xd9, 0x22,
-	0x28, 0xfd, 0xf6, 0x43, 0x68, 0x9d, 0xf9, 0x4c, 0xaf, 0x43, 0xfd, 0x1d, 0x2e, 0x34, 0x76, 0x50,
-	0x94, 0x74, 0x13, 0x1a, 0xf3, 0x30, 0x99, 0xa1, 0x42, 0x6d, 0x06, 0xfa, 0xb0, 0x67, 0xec, 0x12,
-	0xf7, 0x33, 0x81, 0x8d, 0x7d, 0x85, 0x14, 0xe0, 0xfb, 0x19, 0x0a, 0xf9, 0xdb, 0x9d, 0x5f, 0x5e,
-	0xc0, 0xdd, 0x5d, 0x81, 0x7b, 0xae, 0xeb, 0xff, 0xc6, 0xf6, 0xe1, 0x5a, 0xd5, 0x5f, 0xa4, 0x7c,
-	0x2a, 0x90, 0xee, 0x41, 0x43, 0xcd, 0x56, 0xfe, 0x56, 0xef, 0xd6, 0x3a, 0x61, 0x06, 0xda, 0xe2,
-	0x6e, 0xc3, 0xc6, 0x00, 0x13, 0x5c, 0x99, 0x81, 0xbb, 0x0d, 0x2d, 0x3f, 0x16, 0xb2, 0x92, 0x59,
-	0x70, 0xf9, 0x28, 0x4e, 0x24, 0x66, 0xc2, 0x22, 0x9d, 0x7a, 0xb7, 0x19, 0x54, 0x47, 0xd7, 0x87,
-	0xab, 0x5a, 0x58, 0xd2, 0x3d, 0x06, 0x53, 0xcf, 0x56, 0xc2, 0x75, 0xf1, 0x4a, 0x4f, 0xef, 0x93,
-	0x01, 0xa6, 0xfa, 0x22, 0x28, 0x82, 0xa9, 0x17, 0xa7, 0x77, 0xff, 0x26, 0x7f, 0xfb, 0xde, 0x9a,
-	0xea, 0x92, 0xf7, 0x05, 0x98, 0x3a, 0x91, 0x95, 0x63, 0xce, 0x05, 0x67, 0x6f, 0xfd, 0xf2, 0x08,
-	0x0e, 0x8a, 0x97, 0x47, 0x0f, 0xe1, 0x52, 0x91, 0x07, 0xbd, 0xbd, 0x6a, 0xef, 0xd3, 0x74, 0xed,
-	0x3b, 0x6b, 0x69, 0x35, 0x70, 0xff, 0xf5, 0xf2, 0xc4, 0xa9, 0x7d, 0x3d, 0x71, 0x6a, 0x1f, 0x72,
-	0x87, 0x2c, 0x73, 0x87, 0x7c, 0xc9, 0x1d, 0xf2, 0x3d, 0x77, 0xc8, 0x9b, 0x27, 0xff, 0xf8, 0x1b,
-	0x7a, 0xa4, 0xab, 0xa1, 0xa9, 0x56, 0x79, 0xf0, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xb9, 0xa6,
-	0x63, 0xcf, 0x04, 0x00, 0x00,
+	0x14, 0xc6, 0x3b, 0xa9, 0x8d, 0xf6, 0xc4, 0x15, 0x19, 0x96, 0x25, 0x44, 0x4c, 0x4b, 0x10, 0xb6,
+	0xf8, 0x67, 0xe2, 0xd6, 0x9b, 0x75, 0x15, 0xc1, 0x6e, 0x17, 0x14, 0x82, 0x48, 0xf0, 0x62, 0xf1,
+	0x66, 0x49, 0xdb, 0xb3, 0x31, 0x98, 0x36, 0x31, 0x33, 0x2d, 0xf4, 0xce, 0x47, 0xf0, 0x11, 0x7c,
+	0x08, 0x1f, 0xa2, 0x97, 0x5e, 0x7a, 0xb5, 0xba, 0xb9, 0xf3, 0x2d, 0x24, 0x33, 0x09, 0xbb, 0x5b,
+	0xd1, 0x56, 0xd9, 0xbb, 0x33, 0x99, 0xef, 0x3b, 0xe7, 0x77, 0x3e, 0x98, 0x40, 0x3f, 0x8c, 0xc4,
+	0xbb, 0xe9, 0x80, 0x0d, 0x93, 0xb1, 0x3b, 0x4c, 0x26, 0x22, 0x88, 0x26, 0x98, 0x8d, 0xce, 0x97,
+	0x41, 0x1a, 0xb9, 0x1c, 0xb3, 0x59, 0x34, 0x44, 0xee, 0xc6, 0x18, 0x70, 0xe4, 0xee, 0x6c, 0xa7,
+	0xac, 0x58, 0x9a, 0x25, 0x22, 0xa1, 0xb7, 0xcf, 0xf4, 0xac, 0xd2, 0xb2, 0x52, 0x31, 0xdb, 0xb1,
+	0x36, 0xc3, 0x24, 0x4c, 0xa4, 0xd2, 0x2d, 0x2a, 0x65, 0xb2, 0x6e, 0x85, 0x49, 0x12, 0xc6, 0xe8,
+	0xca, 0xd3, 0x60, 0x7a, 0xec, 0xe2, 0x38, 0x15, 0xf3, 0xf2, 0xb2, 0xb5, 0x7c, 0x29, 0xa2, 0x31,
+	0x72, 0x11, 0x8c, 0x53, 0x25, 0x70, 0x7e, 0x12, 0x68, 0x78, 0xc5, 0x04, 0xba, 0x05, 0x5a, 0x34,
+	0x32, 0x49, 0x9b, 0x74, 0x9a, 0x3d, 0x3d, 0x3f, 0x69, 0x69, 0x2f, 0xfb, 0xbe, 0x16, 0x8d, 0xe8,
+	0x3e, 0xc0, 0x30, 0xc3, 0x40, 0xe0, 0xe8, 0x28, 0x10, 0xa6, 0xd6, 0x26, 0x1d, 0xa3, 0x6b, 0x31,
+	0xd5, 0x97, 0x55, 0x7d, 0xd9, 0x9b, 0xaa, 0x6f, 0xef, 0xda, 0xe2, 0xa4, 0x55, 0xfb, 0xf4, 0xbd,
+	0x45, 0xfc, 0x66, 0xe9, 0x7b, 0x2e, 0xe8, 0x0b, 0xd0, 0xe3, 0x60, 0x80, 0x31, 0x37, 0xeb, 0xed,
+	0x7a, 0xc7, 0xe8, 0x3e, 0x64, 0x7f, 0x5d, 0x95, 0x49, 0x24, 0xe6, 0x49, 0xcb, 0xc1, 0x44, 0x64,
+	0x73, 0xbf, 0xf4, 0x5b, 0x8f, 0xc1, 0x38, 0xf7, 0x99, 0xde, 0x84, 0xfa, 0x7b, 0x9c, 0x2b, 0x6c,
+	0xbf, 0x28, 0xe9, 0x26, 0x34, 0x66, 0x41, 0x3c, 0x45, 0x89, 0xda, 0xf4, 0xd5, 0x61, 0x4f, 0xdb,
+	0x25, 0xce, 0x17, 0x02, 0x1b, 0xfb, 0x12, 0xc9, 0xc7, 0x0f, 0x53, 0xe4, 0xe2, 0x8f, 0x3b, 0xbf,
+	0x5e, 0xc2, 0xdd, 0x5d, 0x81, 0x7b, 0xa1, 0xeb, 0x65, 0x63, 0x7b, 0x70, 0xa3, 0xea, 0xcf, 0xd3,
+	0x64, 0xc2, 0x91, 0xee, 0x41, 0x43, 0xce, 0x96, 0x7e, 0xa3, 0x7b, 0x67, 0x9d, 0x30, 0x7d, 0x65,
+	0x71, 0xb6, 0x61, 0xa3, 0x8f, 0x31, 0xae, 0xcc, 0xc0, 0xd9, 0x06, 0xc3, 0x8b, 0xb8, 0xa8, 0x64,
+	0x26, 0x5c, 0x3d, 0x8e, 0x62, 0x81, 0x19, 0x37, 0x49, 0xbb, 0xde, 0x69, 0xfa, 0xd5, 0xd1, 0xf1,
+	0xe0, 0xba, 0x12, 0x96, 0x74, 0x4f, 0x41, 0x57, 0xb3, 0xa5, 0x70, 0x5d, 0xbc, 0xd2, 0xd3, 0xfd,
+	0xac, 0x81, 0x2e, 0xbf, 0x70, 0x8a, 0xa0, 0xab, 0xc5, 0xe9, 0xfd, 0x7f, 0xc9, 0xdf, 0x7a, 0xb0,
+	0xa6, 0xba, 0xe4, 0x7d, 0x05, 0xba, 0x4a, 0x64, 0xe5, 0x98, 0x0b, 0xc1, 0x59, 0x5b, 0xbf, 0x3d,
+	0x82, 0x83, 0xe2, 0xe5, 0xd1, 0x23, 0xb8, 0x52, 0xe4, 0x41, 0xef, 0xae, 0xda, 0xfb, 0x2c, 0x5d,
+	0xeb, 0xde, 0x5a, 0x5a, 0x05, 0xdc, 0x3b, 0x5c, 0x9c, 0xda, 0xb5, 0x6f, 0xa7, 0x76, 0xed, 0x63,
+	0x6e, 0x93, 0x45, 0x6e, 0x93, 0xaf, 0xb9, 0x4d, 0x7e, 0xe4, 0x36, 0x79, 0xfb, 0xec, 0x3f, 0x7f,
+	0x43, 0x4f, 0x54, 0x75, 0x58, 0x1b, 0xe8, 0x72, 0x99, 0x47, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff,
+	0xad, 0x77, 0xda, 0x73, 0xd1, 0x04, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto
index 29d58d1..1002a2d 100644
--- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto
+++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto
@@ -2,7 +2,7 @@
 
 package containerd.services.leases.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/timestamp.proto";
 
@@ -19,7 +19,7 @@
 	// or retained by other resources during the lease.
 	rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
 
-	// ListTransactions lists all active leases, returning the full list of
+	// List lists all active leases, returning the full list of
 	// leases and optionally including the referenced resources.
 	rpc List(ListRequest) returns (ListResponse);
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
index e98330b..822e348 100644
--- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
-// DO NOT EDIT!
 
 /*
 	Package namespaces is a generated protocol buffer package.
@@ -25,8 +24,9 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
 import google_protobuf2 "github.com/gogo/protobuf/types"
 
 import (
@@ -653,24 +653,6 @@
 	return i, nil
 }
 
-func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1001,51 +983,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowNamespace
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowNamespace
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthNamespace
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowNamespace
@@ -1055,41 +1000,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowNamespace
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowNamespace
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowNamespace
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipNamespace(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthNamespace
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthNamespace
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -1973,40 +1957,40 @@
 }
 
 var fileDescriptorNamespace = []byte{
-	// 547 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
-	0x10, 0xcd, 0x26, 0xc1, 0x52, 0xc6, 0x17, 0xb4, 0x04, 0x13, 0x19, 0xc9, 0x04, 0x9f, 0x8a, 0x54,
-	0xad, 0xd5, 0x20, 0x41, 0x3f, 0x6e, 0x85, 0xb6, 0x07, 0x0a, 0x42, 0x96, 0xb8, 0xc0, 0x01, 0x9c,
-	0x64, 0xe3, 0x9a, 0x38, 0xb6, 0xf1, 0xae, 0x2d, 0x45, 0x1c, 0xe0, 0xdf, 0x70, 0xe1, 0x87, 0xe4,
-	0xc8, 0x91, 0x13, 0x6a, 0xf3, 0x4b, 0xd0, 0xae, 0x9d, 0x38, 0x34, 0x46, 0xb8, 0x81, 0x72, 0x9b,
-	0xb1, 0xf7, 0xcd, 0x7b, 0x3b, 0x7a, 0xcf, 0x86, 0x67, 0xae, 0xc7, 0xcf, 0x92, 0x3e, 0x19, 0x84,
-	0x13, 0x6b, 0x10, 0x06, 0xdc, 0xf1, 0x02, 0x1a, 0x0f, 0x57, 0x4b, 0x27, 0xf2, 0x2c, 0x46, 0xe3,
-	0xd4, 0x1b, 0x50, 0x66, 0x05, 0xce, 0x84, 0xb2, 0xc8, 0x11, 0x65, 0xba, 0x53, 0x74, 0x24, 0x8a,
-	0x43, 0x1e, 0xe2, 0xfb, 0x05, 0x8c, 0x2c, 0x20, 0xa4, 0x80, 0x90, 0x74, 0x47, 0x6f, 0xbb, 0xa1,
-	0x1b, 0xca, 0xd3, 0x96, 0xa8, 0x32, 0xa0, 0x7e, 0xd7, 0x0d, 0x43, 0xd7, 0xa7, 0x96, 0xec, 0xfa,
-	0xc9, 0xc8, 0xa2, 0x93, 0x88, 0x4f, 0xf3, 0x97, 0xdd, 0xcb, 0x2f, 0x47, 0x1e, 0xf5, 0x87, 0x6f,
-	0x27, 0x0e, 0x1b, 0x67, 0x27, 0xcc, 0xaf, 0x08, 0x5a, 0x2f, 0x16, 0x34, 0x18, 0x43, 0x53, 0x70,
-	0x76, 0x50, 0x17, 0x6d, 0xb5, 0x6c, 0x59, 0xe3, 0x97, 0xa0, 0xf8, 0x4e, 0x9f, 0xfa, 0xac, 0x53,
-	0xef, 0x36, 0xb6, 0xd4, 0xde, 0x2e, 0xf9, 0xa3, 0x54, 0xb2, 0x9c, 0x48, 0x4e, 0x25, 0xf4, 0x28,
-	0xe0, 0xf1, 0xd4, 0xce, 0xe7, 0xe8, 0x7b, 0xa0, 0xae, 0x3c, 0xc6, 0x37, 0xa1, 0x31, 0xa6, 0xd3,
-	0x9c, 0x53, 0x94, 0xb8, 0x0d, 0x37, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, 0xcd, 0x7e,
-	0x7d, 0x17, 0x99, 0x0f, 0xe0, 0xd6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0xfd, 0x90, 0x50, 0xc6, 0xcb,
-	0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf5, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, 0x62, 0x25,
-	0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0x8f, 0x7b, 0x35, 0xbb, 0x18, 0x62, 0x5a,
-	0x70, 0xfb, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, 0x71, 0x2e,
-	0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, 0x5c, 0xf8,
-	0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x41, 0x7b, 0x12, 0x53, 0x87, 0xd3, 0xb5, 0xb5, 0xfd, 0xfb,
-	0x55, 0x8c, 0xe1, 0xce, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x0b, 0x02, 0xed, 0x55, 0x34, 0xfc, 0x2f,
-	0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, 0x0b, 0x18,
-	0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x77, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, 0xc5, 0x5a,
-	0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4a, 0x7d, 0x5a, 0xb2, 0x95, 0x92, 0x98, 0xf4,
-	0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0xa3, 0x0a, 0x12, 0x4a, 0x82,
-	0xa8, 0x3f, 0xbe, 0x32, 0x2e, 0x5f, 0xc3, 0x47, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, 0x97, 0xd2,
-	0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x4f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, 0x94, 0x87,
-	0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, 0x09, 0xf8,
-	0x9d, 0x0b, 0xdf, 0x80, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, 0x1a, 0x8e,
-	0xc4, 0xbf, 0xe8, 0xf0, 0xdd, 0xec, 0xc2, 0xa8, 0x7d, 0xbf, 0x30, 0x6a, 0x9f, 0xe7, 0x06, 0x9a,
-	0xcd, 0x0d, 0xf4, 0x6d, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1f, 0xff, 0xc5, 0x2f, 0xf4, 0xa0,
-	0xe8, 0xfa, 0x8a, 0x64, 0x7c, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe8, 0x4d, 0xe1, 0x93,
-	0x07, 0x00, 0x00,
+	// 551 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c,
+	0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0x7a, 0xf3, 0x69, 0x08, 0x26, 0x32, 0x92, 0x09, 0x5e, 0x15,
+	0xa9, 0x1a, 0xab, 0x41, 0x82, 0xfe, 0xec, 0x0a, 0x6d, 0x17, 0x14, 0x84, 0x2c, 0x21, 0x21, 0x58,
+	0x80, 0x93, 0x4c, 0x5c, 0x13, 0xc7, 0x36, 0x9e, 0xb1, 0xa5, 0x88, 0x05, 0xbc, 0x0d, 0x1b, 0x1e,
+	0x24, 0x4b, 0x96, 0xac, 0x50, 0x9b, 0x27, 0x41, 0x33, 0x76, 0xe2, 0xd0, 0x18, 0xe1, 0x06, 0xca,
+	0xee, 0x5e, 0x7b, 0xce, 0x3d, 0x67, 0xae, 0xce, 0xb1, 0xe1, 0x89, 0xeb, 0xf1, 0xb3, 0xa4, 0x4f,
+	0x06, 0xe1, 0xc4, 0x1a, 0x84, 0x01, 0x77, 0xbc, 0x80, 0xc6, 0xc3, 0xd5, 0xd2, 0x89, 0x3c, 0x8b,
+	0xd1, 0x38, 0xf5, 0x06, 0x94, 0x59, 0x81, 0x33, 0xa1, 0x2c, 0x72, 0x44, 0x99, 0xee, 0x14, 0x1d,
+	0x89, 0xe2, 0x90, 0x87, 0xf8, 0x6e, 0x01, 0x23, 0x0b, 0x08, 0x29, 0x20, 0x24, 0xdd, 0xd1, 0xdb,
+	0x6e, 0xe8, 0x86, 0xf2, 0xb4, 0x25, 0xaa, 0x0c, 0xa8, 0xdf, 0x76, 0xc3, 0xd0, 0xf5, 0xa9, 0x25,
+	0xbb, 0x7e, 0x32, 0xb2, 0xe8, 0x24, 0xe2, 0xd3, 0xfc, 0x65, 0xf7, 0xf2, 0xcb, 0x91, 0x47, 0xfd,
+	0xe1, 0x9b, 0x89, 0xc3, 0xc6, 0xd9, 0x09, 0xf3, 0x0b, 0x82, 0xd6, 0xb3, 0x05, 0x0d, 0xc6, 0xd0,
+	0x14, 0x9c, 0x1d, 0xd4, 0x45, 0x5b, 0x2d, 0x5b, 0xd6, 0xf8, 0x39, 0x28, 0xbe, 0xd3, 0xa7, 0x3e,
+	0xeb, 0xd4, 0xbb, 0x8d, 0x2d, 0xb5, 0xb7, 0x4b, 0x7e, 0x2b, 0x95, 0x2c, 0x27, 0x92, 0x53, 0x09,
+	0x3d, 0x0a, 0x78, 0x3c, 0xb5, 0xf3, 0x39, 0xfa, 0x1e, 0xa8, 0x2b, 0x8f, 0xf1, 0xff, 0xd0, 0x18,
+	0xd3, 0x69, 0xce, 0x29, 0x4a, 0xdc, 0x86, 0xff, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65,
+	0xcd, 0x7e, 0x7d, 0x17, 0x99, 0xf7, 0xe0, 0xc6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0x7d, 0x9f, 0x50,
+	0xc6, 0xcb, 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf9, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96,
+	0x62, 0x25, 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0xf7, 0x3b, 0x35, 0xbb, 0x18,
+	0x62, 0x5a, 0x70, 0xf3, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7,
+	0x71, 0x2e, 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90,
+	0x5c, 0xf8, 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x40, 0x7b, 0x14, 0x53, 0x87, 0xd3, 0xb5, 0xb5,
+	0xfd, 0xfd, 0x55, 0x8c, 0xe1, 0xd6, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x33, 0x02, 0xed, 0x45, 0x34,
+	0xfc, 0x27, 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24,
+	0x0b, 0x18, 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x75, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8,
+	0xc5, 0x5a, 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4c, 0x7d, 0x5a, 0xb2, 0x95, 0x92,
+	0x98, 0xf4, 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0x83, 0x0a, 0x12,
+	0x4a, 0x82, 0xa8, 0x3f, 0xbc, 0x32, 0x2e, 0x5f, 0xc3, 0x07, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f,
+	0x97, 0xd2, 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x8f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86,
+	0x94, 0x87, 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b,
+	0x09, 0xf8, 0x95, 0x0b, 0x5f, 0x83, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a,
+	0x1a, 0x8e, 0xc4, 0xbf, 0xe8, 0xf0, 0xed, 0xec, 0xc2, 0xa8, 0x7d, 0xbb, 0x30, 0x6a, 0x9f, 0xe6,
+	0x06, 0x9a, 0xcd, 0x0d, 0xf4, 0x75, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1d, 0xff, 0xc1, 0x2f,
+	0xf4, 0xa0, 0xe8, 0x5e, 0xd6, 0xfa, 0x8a, 0xe4, 0xbc, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4f,
+	0x4a, 0x87, 0xf3, 0x95, 0x07, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
index d58a8c2..c22eeba 100644
--- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
@@ -2,7 +2,7 @@
 
 package containerd.services.namespaces.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/field_mask.proto";
 
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
similarity index 87%
rename from vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go
rename to vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
index ae3c792..4cf68f0 100644
--- a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
@@ -1,12 +1,11 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
 
 /*
-	Package snapshot is a generated protocol buffer package.
+	Package snapshots is a generated protocol buffer package.
 
 	It is generated from these files:
-		github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
+		github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
 
 	It has these top-level messages:
 		PrepareSnapshotRequest
@@ -27,13 +26,14 @@
 		UsageRequest
 		UsageResponse
 */
-package snapshot
+package snapshots
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
 import google_protobuf2 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 import containerd_types "github.com/containerd/containerd/api/types"
@@ -651,7 +651,7 @@
 			ServerStreams: true,
 		},
 	},
-	Metadata: "github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto",
+	Metadata: "github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto",
 }
 
 func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) {
@@ -1267,24 +1267,6 @@
 	return i, nil
 }
 
-func encodeFixed64Snapshots(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Snapshots(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintSnapshots(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1953,51 +1935,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthSnapshots
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowSnapshots
@@ -2007,41 +1952,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowSnapshots
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipSnapshots(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthSnapshots
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -2287,51 +2271,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthSnapshots
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowSnapshots
@@ -2341,41 +2288,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowSnapshots
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipSnapshots(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthSnapshots
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -2918,51 +2904,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthSnapshots
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowSnapshots
@@ -2972,41 +2921,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowSnapshots
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipSnapshots(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthSnapshots
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -3329,51 +3317,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			var keykey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				keykey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			var stringLenmapkey uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowSnapshots
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLenmapkey |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLenmapkey := int(stringLenmapkey)
-			if intStringLenmapkey < 0 {
-				return ErrInvalidLengthSnapshots
-			}
-			postStringIndexmapkey := iNdEx + intStringLenmapkey
-			if postStringIndexmapkey > l {
-				return io.ErrUnexpectedEOF
-			}
-			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
-			iNdEx = postStringIndexmapkey
 			if m.Labels == nil {
 				m.Labels = make(map[string]string)
 			}
-			if iNdEx < postIndex {
-				var valuekey uint64
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
 				for shift := uint(0); ; shift += 7 {
 					if shift >= 64 {
 						return ErrIntOverflowSnapshots
@@ -3383,41 +3334,80 @@
 					}
 					b := dAtA[iNdEx]
 					iNdEx++
-					valuekey |= (uint64(b) & 0x7F) << shift
+					wire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
 						break
 					}
 				}
-				var stringLenmapvalue uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowSnapshots
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
-					if iNdEx >= l {
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey > l {
 						return io.ErrUnexpectedEOF
 					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowSnapshots
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
 					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipSnapshots(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthSnapshots
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
 				}
-				intStringLenmapvalue := int(stringLenmapvalue)
-				if intStringLenmapvalue < 0 {
-					return ErrInvalidLengthSnapshots
-				}
-				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
-				if postStringIndexmapvalue > l {
-					return io.ErrUnexpectedEOF
-				}
-				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
-				iNdEx = postStringIndexmapvalue
-				m.Labels[mapkey] = mapvalue
-			} else {
-				var mapvalue string
-				m.Labels[mapkey] = mapvalue
 			}
+			m.Labels[mapkey] = mapvalue
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
@@ -4204,72 +4194,72 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto", fileDescriptorSnapshots)
+	proto.RegisterFile("github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", fileDescriptorSnapshots)
 }
 
 var fileDescriptorSnapshots = []byte{
-	// 1004 bytes of a gzipped FileDescriptorProto
+	// 1007 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1a, 0x47,
-	0x14, 0xf7, 0xc0, 0x1a, 0xc7, 0x0f, 0xdb, 0xa5, 0x13, 0x4c, 0xd0, 0xb6, 0xc2, 0x2b, 0x0e, 0x95,
-	0xd5, 0xc3, 0x6e, 0x42, 0xd5, 0xc4, 0x89, 0x2f, 0x05, 0x42, 0x2b, 0xe2, 0xd8, 0xa9, 0x36, 0xb6,
-	0x53, 0xa7, 0x91, 0xa2, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5,
+	0x14, 0x67, 0x60, 0x8d, 0xe3, 0x87, 0xed, 0xd2, 0x09, 0x26, 0x68, 0x5b, 0xe1, 0x15, 0x87, 0xca,
+	0xea, 0x61, 0x37, 0xa1, 0x6a, 0xe2, 0xc4, 0x97, 0x62, 0x4c, 0x2b, 0xec, 0xd8, 0xa9, 0x36, 0xb6,
+	0x13, 0xa7, 0x55, 0xa3, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5,
 	0x18, 0xf9, 0xd4, 0x2f, 0xe0, 0x53, 0xfb, 0x21, 0xaa, 0x7e, 0x02, 0x1f, 0x7b, 0xec, 0xa9, 0x6d,
-	0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0xb2, 0xa1, 0xb7, 0xb7, 0x33,
-	0xf3, 0xde, 0xfb, 0xbd, 0xdf, 0x9b, 0xf7, 0xde, 0x2c, 0x54, 0x1b, 0x36, 0x3b, 0xed, 0x1e, 0xeb,
-	0x35, 0xcf, 0x31, 0x6a, 0x9e, 0xcb, 0x2c, 0xdb, 0x25, 0x9d, 0xfa, 0xa8, 0x68, 0xb5, 0x6d, 0x83,
-	0x92, 0x4e, 0xcf, 0xae, 0x11, 0x6a, 0x50, 0xd7, 0x6a, 0xd3, 0x53, 0x8f, 0x19, 0xbd, 0x3b, 0x03,
-	0x99, 0xea, 0xed, 0x8e, 0xc7, 0x3c, 0xac, 0x0d, 0x95, 0xf4, 0x40, 0x41, 0x1f, 0x1e, 0xea, 0xdd,
-	0x51, 0xd3, 0x0d, 0xaf, 0xe1, 0x89, 0xc3, 0x06, 0x97, 0x7c, 0x3d, 0xf5, 0xbd, 0x86, 0xe7, 0x35,
-	0x5a, 0xc4, 0x10, 0x5f, 0xc7, 0xdd, 0x13, 0x83, 0x38, 0x6d, 0xd6, 0x97, 0x9b, 0xda, 0xf8, 0xe6,
-	0x89, 0x4d, 0x5a, 0xf5, 0x97, 0x8e, 0x45, 0x9b, 0xf2, 0xc4, 0xc6, 0xf8, 0x09, 0x66, 0x3b, 0x84,
-	0x32, 0xcb, 0x69, 0xcb, 0x03, 0x77, 0x43, 0x85, 0xc8, 0xfa, 0x6d, 0x42, 0x0d, 0xc7, 0xeb, 0xba,
-	0xcc, 0xd7, 0xcb, 0xff, 0x85, 0x20, 0xf3, 0x79, 0x87, 0xb4, 0xad, 0x0e, 0x79, 0x2a, 0xa3, 0x30,
-	0xc9, 0x57, 0x5d, 0x42, 0x19, 0xd6, 0x20, 0x19, 0x04, 0xc6, 0x48, 0x27, 0x8b, 0x34, 0xb4, 0xb9,
-	0x6c, 0x8e, 0x2e, 0xe1, 0x14, 0xc4, 0x9b, 0xa4, 0x9f, 0x8d, 0x89, 0x1d, 0x2e, 0xe2, 0x0c, 0x24,
-	0xb8, 0x29, 0x97, 0x65, 0xe3, 0x62, 0x51, 0x7e, 0xe1, 0x17, 0x90, 0x68, 0x59, 0xc7, 0xa4, 0x45,
-	0xb3, 0x8a, 0x16, 0xdf, 0x4c, 0x16, 0x1e, 0xea, 0xd3, 0x78, 0xd4, 0x27, 0xa3, 0xd2, 0x1f, 0x0b,
-	0x33, 0x15, 0x97, 0x75, 0xfa, 0xa6, 0xb4, 0xa9, 0xde, 0x87, 0xe4, 0xc8, 0x72, 0x00, 0x0b, 0x0d,
-	0x61, 0xa5, 0x61, 0xb1, 0x67, 0xb5, 0xba, 0x44, 0x42, 0xf5, 0x3f, 0x1e, 0xc4, 0xb6, 0x50, 0xfe,
-	0x11, 0xdc, 0xba, 0xe6, 0x88, 0xb6, 0x3d, 0x97, 0x12, 0x6c, 0x40, 0x42, 0x30, 0x45, 0xb3, 0x48,
-	0x60, 0xbe, 0x35, 0x8a, 0x59, 0x30, 0xa9, 0xef, 0xf2, 0x7d, 0x53, 0x1e, 0xcb, 0xff, 0x89, 0xe0,
-	0xe6, 0xa1, 0x4d, 0x5e, 0xfd, 0x9f, 0x44, 0x1e, 0x8d, 0x11, 0x59, 0x9c, 0x4e, 0xe4, 0x04, 0x48,
-	0xf3, 0x66, 0xf1, 0x33, 0x48, 0x5f, 0xf5, 0x12, 0x95, 0xc2, 0x32, 0xac, 0x8a, 0x05, 0xfa, 0x16,
-	0xdc, 0xe5, 0x8b, 0xb0, 0x16, 0x18, 0x89, 0x8a, 0x63, 0x07, 0xd6, 0x4d, 0xe2, 0x78, 0xbd, 0x79,
-	0x14, 0x05, 0xbf, 0x17, 0xeb, 0x65, 0xcf, 0x71, 0x6c, 0x36, 0xbb, 0x35, 0x0c, 0x8a, 0x6b, 0x39,
-	0x01, 0xe5, 0x42, 0x0e, 0x3c, 0xc4, 0x87, 0x99, 0xf9, 0x72, 0xec, 0x56, 0x94, 0xa7, 0xdf, 0x8a,
-	0x89, 0x80, 0xe6, 0x7d, 0x2f, 0xaa, 0x70, 0xf3, 0x29, 0xb3, 0xd8, 0x3c, 0x48, 0xfc, 0x27, 0x06,
-	0x4a, 0xd5, 0x3d, 0xf1, 0x06, 0x8c, 0xa0, 0x11, 0x46, 0x86, 0xd5, 0x12, 0xbb, 0x52, 0x2d, 0x0f,
-	0x40, 0x69, 0xda, 0x6e, 0x5d, 0x50, 0xb5, 0x56, 0xf8, 0x60, 0x3a, 0x2b, 0x3b, 0xb6, 0x5b, 0x37,
-	0x85, 0x0e, 0x2e, 0x03, 0xd4, 0x3a, 0xc4, 0x62, 0xa4, 0xfe, 0xd2, 0x62, 0x59, 0x45, 0x43, 0x9b,
-	0xc9, 0x82, 0xaa, 0xfb, 0x7d, 0x58, 0x0f, 0xfa, 0xb0, 0xbe, 0x1f, 0xf4, 0xe1, 0xd2, 0x8d, 0x8b,
-	0xdf, 0x36, 0x16, 0xbe, 0xff, 0x7d, 0x03, 0x99, 0xcb, 0x52, 0xaf, 0xc8, 0xb8, 0x91, 0x6e, 0xbb,
-	0x1e, 0x18, 0x59, 0x9c, 0xc5, 0x88, 0xd4, 0x2b, 0x32, 0xfc, 0x68, 0x90, 0xdd, 0x84, 0xc8, 0x6e,
-	0x61, 0x7a, 0x1c, 0x9c, 0xa9, 0x79, 0x27, 0xf3, 0x0b, 0x48, 0x5f, 0x4d, 0xa6, 0x2c, 0xae, 0x4f,
-	0x40, 0xb1, 0xdd, 0x13, 0x4f, 0x18, 0x49, 0x86, 0x21, 0x99, 0x83, 0x2b, 0x29, 0x3c, 0x52, 0x53,
-	0x68, 0xe6, 0x7f, 0x42, 0xb0, 0x7e, 0x20, 0xc2, 0x9d, 0xfd, 0xa6, 0x04, 0xde, 0x63, 0x51, 0xbd,
-	0xe3, 0x6d, 0x48, 0xfa, 0x5c, 0x8b, 0x81, 0x2b, 0xee, 0xca, 0xa4, 0x24, 0x7d, 0xca, 0x67, 0xf2,
-	0xae, 0x45, 0x9b, 0xa6, 0x4c, 0x29, 0x97, 0xf3, 0xcf, 0x21, 0x33, 0x8e, 0x7c, 0x6e, 0xb4, 0x6c,
-	0x41, 0xfa, 0xb1, 0x4d, 0x07, 0x84, 0x87, 0xef, 0x89, 0xf9, 0x23, 0x58, 0x1f, 0xd3, 0xbc, 0x06,
-	0x2a, 0x1e, 0x11, 0x54, 0x09, 0x56, 0x0e, 0xa8, 0xd5, 0x20, 0x6f, 0x53, 0xcb, 0xdb, 0xb0, 0x2a,
-	0x6d, 0x48, 0x58, 0x18, 0x14, 0x6a, 0x7f, 0xed, 0xd7, 0x74, 0xdc, 0x14, 0x32, 0xaf, 0x69, 0xdb,
-	0xf5, 0xea, 0x84, 0x0a, 0xcd, 0xb8, 0x29, 0xbf, 0x3e, 0x7c, 0x8d, 0x40, 0xe1, 0x65, 0x8a, 0xdf,
-	0x87, 0xa5, 0x83, 0xbd, 0x9d, 0xbd, 0x27, 0xcf, 0xf6, 0x52, 0x0b, 0xea, 0x3b, 0x67, 0xe7, 0x5a,
-	0x92, 0x2f, 0x1f, 0xb8, 0x4d, 0xd7, 0x7b, 0xe5, 0xe2, 0x0c, 0x28, 0x87, 0xd5, 0xca, 0xb3, 0x14,
-	0x52, 0x57, 0xce, 0xce, 0xb5, 0x1b, 0x7c, 0x8b, 0x8f, 0x28, 0xac, 0x42, 0xa2, 0x58, 0xde, 0xaf,
-	0x1e, 0x56, 0x52, 0x31, 0x75, 0xed, 0xec, 0x5c, 0x03, 0xbe, 0x53, 0xac, 0x31, 0xbb, 0x47, 0xb0,
-	0x06, 0xcb, 0xe5, 0x27, 0xbb, 0xbb, 0xd5, 0xfd, 0xfd, 0xca, 0xc3, 0x54, 0x5c, 0x7d, 0xf7, 0xec,
-	0x5c, 0x5b, 0xe5, 0xdb, 0x7e, 0xaf, 0x64, 0xa4, 0xae, 0xae, 0xbc, 0xfe, 0x21, 0xb7, 0xf0, 0xf3,
-	0x8f, 0x39, 0x81, 0xa0, 0xf0, 0xf7, 0x12, 0x2c, 0x0f, 0x38, 0xc6, 0xdf, 0xc2, 0x92, 0x7c, 0x4a,
-	0xe0, 0xad, 0xa8, 0xcf, 0x1b, 0xf5, 0x7e, 0x04, 0x4d, 0x49, 0x62, 0x17, 0x14, 0x11, 0xe1, 0xc7,
-	0x91, 0x9e, 0x04, 0xea, 0xdd, 0x59, 0xd5, 0xa4, 0xdb, 0x26, 0x24, 0xfc, 0x69, 0x8b, 0x8d, 0xe9,
-	0x16, 0xae, 0x0c, 0x77, 0xf5, 0x76, 0x78, 0x05, 0xe9, 0xec, 0x08, 0x12, 0x7e, 0x32, 0xf0, 0xbd,
-	0x88, 0x23, 0x4e, 0xcd, 0x5c, 0xab, 0xec, 0x0a, 0x7f, 0x8a, 0x73, 0xd3, 0xfe, 0xc8, 0x0f, 0x63,
-	0x7a, 0xe2, 0xe3, 0xe0, 0x3f, 0x4d, 0x77, 0x41, 0xe1, 0x9d, 0x33, 0x4c, 0x66, 0x26, 0x8c, 0xcb,
-	0x30, 0x99, 0x99, 0xd8, 0x98, 0xbf, 0x81, 0x84, 0xdf, 0x9b, 0xc2, 0x44, 0x34, 0xb1, 0xff, 0xaa,
-	0x5b, 0xb3, 0x2b, 0x4a, 0xe7, 0x7d, 0x50, 0x78, 0x0b, 0xc2, 0x21, 0xc0, 0x4f, 0x6a, 0x72, 0xea,
-	0xbd, 0x99, 0xf5, 0x7c, 0xc7, 0xb7, 0x11, 0x3e, 0x85, 0x45, 0xd1, 0x5e, 0xb0, 0x1e, 0x02, 0xfd,
-	0x48, 0x2f, 0x53, 0x8d, 0xd0, 0xe7, 0x7d, 0x5f, 0xa5, 0x17, 0x17, 0x6f, 0x72, 0x0b, 0xbf, 0xbe,
-	0xc9, 0x2d, 0x7c, 0x77, 0x99, 0x43, 0x17, 0x97, 0x39, 0xf4, 0xcb, 0x65, 0x0e, 0xfd, 0x71, 0x99,
-	0x43, 0xcf, 0x4b, 0x91, 0x7f, 0x39, 0xb7, 0x03, 0xf9, 0x38, 0x21, 0xae, 0xd1, 0x47, 0xff, 0x06,
-	0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0x35, 0xc8, 0xbf, 0x0e, 0x00, 0x00,
+	0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0x82, 0xc9, 0x6d, 0x66, 0x67,
+	0x7e, 0xef, 0xfd, 0xe6, 0xf7, 0xe6, 0xbd, 0x37, 0x0b, 0xdb, 0x35, 0x9b, 0x9d, 0xb6, 0x8f, 0xf5,
+	0x8a, 0xe7, 0x18, 0x15, 0xcf, 0x65, 0x96, 0xed, 0x92, 0x56, 0x75, 0x70, 0x68, 0x35, 0x6d, 0x83,
+	0x92, 0x56, 0xc7, 0xae, 0x10, 0x6a, 0x50, 0xd7, 0x6a, 0xd2, 0x53, 0x8f, 0x51, 0xa3, 0x73, 0xaf,
+	0x3f, 0xd1, 0x9b, 0x2d, 0x8f, 0x79, 0x58, 0xeb, 0xa3, 0xf4, 0x00, 0xa1, 0xf7, 0x37, 0x75, 0xee,
+	0xa9, 0xa9, 0x9a, 0x57, 0xf3, 0xc4, 0x66, 0x83, 0x8f, 0x7c, 0x9c, 0xfa, 0x5e, 0xcd, 0xf3, 0x6a,
+	0x0d, 0x62, 0x88, 0xd9, 0x71, 0xfb, 0xc4, 0x20, 0x4e, 0x93, 0x75, 0xe5, 0xa2, 0x36, 0xbc, 0x78,
+	0x62, 0x93, 0x46, 0xf5, 0xa5, 0x63, 0xd1, 0xba, 0xdc, 0xb1, 0x3a, 0xbc, 0x83, 0xd9, 0x0e, 0xa1,
+	0xcc, 0x72, 0x9a, 0x72, 0xc3, 0xfd, 0x50, 0x67, 0x64, 0xdd, 0x26, 0xa1, 0x86, 0xe3, 0xb5, 0x5d,
+	0xe6, 0xe3, 0x72, 0x7f, 0x23, 0x48, 0x7f, 0xde, 0x22, 0x4d, 0xab, 0x45, 0x9e, 0xca, 0x53, 0x98,
+	0xe4, 0xeb, 0x36, 0xa1, 0x0c, 0x6b, 0x90, 0x08, 0x0e, 0xc6, 0x48, 0x2b, 0x83, 0x34, 0xb4, 0xb6,
+	0x60, 0x0e, 0x7e, 0xc2, 0x49, 0x88, 0xd5, 0x49, 0x37, 0x13, 0x15, 0x2b, 0x7c, 0x88, 0xd3, 0x10,
+	0xe7, 0xa6, 0x5c, 0x96, 0x89, 0x89, 0x8f, 0x72, 0x86, 0xbf, 0x84, 0x78, 0xc3, 0x3a, 0x26, 0x0d,
+	0x9a, 0x51, 0xb4, 0xd8, 0x5a, 0x22, 0xbf, 0xa5, 0x8f, 0xd3, 0x51, 0x1f, 0xcd, 0x4a, 0x7f, 0x2c,
+	0xcc, 0x94, 0x5c, 0xd6, 0xea, 0x9a, 0xd2, 0xa6, 0xfa, 0x10, 0x12, 0x03, 0x9f, 0x03, 0x5a, 0xa8,
+	0x4f, 0x2b, 0x05, 0x73, 0x1d, 0xab, 0xd1, 0x26, 0x92, 0xaa, 0x3f, 0x79, 0x14, 0x5d, 0x47, 0xb9,
+	0x6d, 0xb8, 0x73, 0xcd, 0x11, 0x6d, 0x7a, 0x2e, 0x25, 0xd8, 0x80, 0xb8, 0x50, 0x8a, 0x66, 0x90,
+	0xe0, 0x7c, 0x67, 0x90, 0xb3, 0x50, 0x52, 0xdf, 0xe5, 0xeb, 0xa6, 0xdc, 0x96, 0xfb, 0x0b, 0xc1,
+	0xed, 0x43, 0x9b, 0xbc, 0x7a, 0x9b, 0x42, 0x1e, 0x0d, 0x09, 0x59, 0x18, 0x2f, 0xe4, 0x08, 0x4a,
+	0xb3, 0x56, 0xf1, 0x33, 0x48, 0x5d, 0xf5, 0x32, 0xad, 0x84, 0x45, 0x58, 0x12, 0x1f, 0xe8, 0x0d,
+	0xb4, 0xcb, 0x15, 0x60, 0x39, 0x30, 0x32, 0x2d, 0x8f, 0x1d, 0x58, 0x31, 0x89, 0xe3, 0x75, 0x66,
+	0x91, 0x14, 0xfc, 0x5e, 0xac, 0x14, 0x3d, 0xc7, 0xb1, 0xd9, 0xe4, 0xd6, 0x30, 0x28, 0xae, 0xe5,
+	0x04, 0x92, 0x8b, 0x71, 0xe0, 0x21, 0xd6, 0x8f, 0xcc, 0x17, 0x43, 0xb7, 0xa2, 0x38, 0xfe, 0x56,
+	0x8c, 0x24, 0x34, 0xeb, 0x7b, 0x51, 0x86, 0xdb, 0x4f, 0x99, 0xc5, 0x66, 0x21, 0xe2, 0xbf, 0x51,
+	0x50, 0xca, 0xee, 0x89, 0xd7, 0x53, 0x04, 0x0d, 0x28, 0xd2, 0xcf, 0x96, 0xe8, 0x95, 0x6c, 0x79,
+	0x04, 0x4a, 0xdd, 0x76, 0xab, 0x42, 0xaa, 0xe5, 0xfc, 0x07, 0xe3, 0x55, 0xd9, 0xb1, 0xdd, 0xaa,
+	0x29, 0x30, 0xb8, 0x08, 0x50, 0x69, 0x11, 0x8b, 0x91, 0xea, 0x4b, 0x8b, 0x65, 0x14, 0x0d, 0xad,
+	0x25, 0xf2, 0xaa, 0xee, 0xd7, 0x61, 0x3d, 0xa8, 0xc3, 0xfa, 0x7e, 0x50, 0x87, 0x37, 0x6f, 0x5d,
+	0xfc, 0xbe, 0x1a, 0xf9, 0xe1, 0x8f, 0x55, 0x64, 0x2e, 0x48, 0x5c, 0x81, 0x71, 0x23, 0xed, 0x66,
+	0x35, 0x30, 0x32, 0x37, 0x89, 0x11, 0x89, 0x2b, 0x30, 0xbc, 0xdd, 0x8b, 0x6e, 0x5c, 0x44, 0x37,
+	0x3f, 0xfe, 0x1c, 0x5c, 0xa9, 0x59, 0x07, 0xf3, 0x39, 0xa4, 0xae, 0x06, 0x53, 0x26, 0xd7, 0x27,
+	0xa0, 0xd8, 0xee, 0x89, 0x27, 0x8c, 0x24, 0xc2, 0x88, 0xcc, 0xc9, 0x6d, 0x2a, 0xfc, 0xa4, 0xa6,
+	0x40, 0xe6, 0x7e, 0x46, 0xb0, 0x72, 0x20, 0x8e, 0x3b, 0xf9, 0x4d, 0x09, 0xbc, 0x47, 0xa7, 0xf5,
+	0x8e, 0x37, 0x20, 0xe1, 0x6b, 0x2d, 0x1a, 0xae, 0xb8, 0x2b, 0xa3, 0x82, 0xf4, 0x29, 0xef, 0xc9,
+	0xbb, 0x16, 0xad, 0x9b, 0x32, 0xa4, 0x7c, 0x9c, 0x7b, 0x01, 0xe9, 0x61, 0xe6, 0x33, 0x93, 0x65,
+	0x1d, 0x52, 0x8f, 0x6d, 0xda, 0x13, 0x3c, 0x7c, 0x4d, 0xcc, 0x1d, 0xc1, 0xca, 0x10, 0xf2, 0x1a,
+	0xa9, 0xd8, 0x94, 0xa4, 0x36, 0x61, 0xf1, 0x80, 0x5a, 0x35, 0x72, 0x93, 0x5c, 0xde, 0x80, 0x25,
+	0x69, 0x43, 0xd2, 0xc2, 0xa0, 0x50, 0xfb, 0x1b, 0x3f, 0xa7, 0x63, 0xa6, 0x18, 0xf3, 0x9c, 0xb6,
+	0x5d, 0xaf, 0x4a, 0xa8, 0x40, 0xc6, 0x4c, 0x39, 0xfb, 0xf0, 0x35, 0x02, 0x85, 0xa7, 0x29, 0x7e,
+	0x1f, 0xe6, 0x0f, 0xf6, 0x76, 0xf6, 0x9e, 0x3c, 0xdb, 0x4b, 0x46, 0xd4, 0x77, 0xce, 0xce, 0xb5,
+	0x04, 0xff, 0x7c, 0xe0, 0xd6, 0x5d, 0xef, 0x95, 0x8b, 0xd3, 0xa0, 0x1c, 0x96, 0x4b, 0xcf, 0x92,
+	0x48, 0x5d, 0x3c, 0x3b, 0xd7, 0x6e, 0xf1, 0x25, 0xde, 0xa2, 0xb0, 0x0a, 0xf1, 0x42, 0x71, 0xbf,
+	0x7c, 0x58, 0x4a, 0x46, 0xd5, 0xe5, 0xb3, 0x73, 0x0d, 0xf8, 0x4a, 0xa1, 0xc2, 0xec, 0x0e, 0xc1,
+	0x1a, 0x2c, 0x14, 0x9f, 0xec, 0xee, 0x96, 0xf7, 0xf7, 0x4b, 0x5b, 0xc9, 0x98, 0xfa, 0xee, 0xd9,
+	0xb9, 0xb6, 0xc4, 0x97, 0xfd, 0x5a, 0xc9, 0x48, 0x55, 0x5d, 0x7c, 0xfd, 0x63, 0x36, 0xf2, 0xcb,
+	0x4f, 0x59, 0xc1, 0x20, 0xff, 0xcf, 0x3c, 0x2c, 0xf4, 0x34, 0xc6, 0xdf, 0xc1, 0xbc, 0x7c, 0x4a,
+	0xe0, 0xf5, 0x69, 0x9f, 0x37, 0xea, 0xc3, 0x29, 0x90, 0x52, 0xc4, 0x36, 0x28, 0xe2, 0x84, 0x1f,
+	0x4f, 0xf5, 0x24, 0x50, 0xef, 0x4f, 0x0a, 0x93, 0x6e, 0xeb, 0x10, 0xf7, 0xbb, 0x2d, 0x36, 0xc6,
+	0x5b, 0xb8, 0xd2, 0xdc, 0xd5, 0xbb, 0xe1, 0x01, 0xd2, 0xd9, 0x11, 0xc4, 0xfd, 0x60, 0xe0, 0x07,
+	0x53, 0xb6, 0x38, 0x35, 0x7d, 0x2d, 0xb3, 0x4b, 0xfc, 0x29, 0xce, 0x4d, 0xfb, 0x2d, 0x3f, 0x8c,
+	0xe9, 0x91, 0x8f, 0x83, 0xff, 0x35, 0xdd, 0x06, 0x85, 0x57, 0xce, 0x30, 0x91, 0x19, 0xd1, 0x2e,
+	0xc3, 0x44, 0x66, 0x64, 0x61, 0xfe, 0x16, 0xe2, 0x7e, 0x6d, 0x0a, 0x73, 0xa2, 0x91, 0xf5, 0x57,
+	0x5d, 0x9f, 0x1c, 0x28, 0x9d, 0x77, 0x41, 0xe1, 0x25, 0x08, 0x87, 0x20, 0x3f, 0xaa, 0xc8, 0xa9,
+	0x0f, 0x26, 0xc6, 0xf9, 0x8e, 0xef, 0x22, 0x7c, 0x0a, 0x73, 0xa2, 0xbc, 0x60, 0x3d, 0x04, 0xfb,
+	0x81, 0x5a, 0xa6, 0x1a, 0xa1, 0xf7, 0xfb, 0xbe, 0x36, 0xbf, 0xba, 0x78, 0x93, 0x8d, 0xfc, 0xf6,
+	0x26, 0x1b, 0xf9, 0xfe, 0x32, 0x8b, 0x2e, 0x2e, 0xb3, 0xe8, 0xd7, 0xcb, 0x2c, 0xfa, 0xf3, 0x32,
+	0x8b, 0x5e, 0x6c, 0x4d, 0xff, 0xcf, 0xb9, 0xd1, 0x9b, 0x3c, 0x8f, 0x1c, 0xc7, 0xc5, 0x55, 0xfa,
+	0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xa0, 0xb2, 0xda, 0xc4, 0x0e, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
similarity index 98%
rename from vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
rename to vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
index f8c0895..0e62add 100644
--- a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
+++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto
@@ -2,13 +2,13 @@
 
 package containerd.services.snapshots.v1;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/empty.proto";
 import "google/protobuf/field_mask.proto";
 import "google/protobuf/timestamp.proto";
 import "github.com/containerd/containerd/api/types/mount.proto";
 
-option go_package = "github.com/containerd/containerd/api/services/snapshot/v1;snapshot";
+option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots";
 
 // Snapshot service manages snapshots
 service Snapshots {
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
index 0f58768..83c18f6 100644
--- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
-// DO NOT EDIT!
 
 /*
 	Package tasks is a generated protocol buffer package.
@@ -42,9 +41,10 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf "github.com/gogo/protobuf/types"
 import google_protobuf1 "github.com/gogo/protobuf/types"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import containerd_types "github.com/containerd/containerd/api/types"
 import containerd_types1 "github.com/containerd/containerd/api/types"
 import containerd_types2 "github.com/containerd/containerd/api/types"
@@ -1890,24 +1890,6 @@
 	return i, nil
 }
 
-func encodeFixed64Tasks(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Tasks(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintTasks(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -5725,88 +5707,88 @@
 }
 
 var fileDescriptorTasks = []byte{
-	// 1317 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45,
-	0x1b, 0xee, 0xfa, 0xec, 0xd7, 0x49, 0x9b, 0xec, 0x97, 0xe6, 0x33, 0x4b, 0x15, 0x87, 0xe5, 0xc6,
-	0x04, 0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x07, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x72,
-	0x50, 0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0xb8, 0x80,
-	0x9f, 0xd0, 0x5b, 0x6e, 0xf8, 0x3d, 0xb9, 0xe4, 0x12, 0xa1, 0x2a, 0x50, 0xff, 0x0b, 0xee, 0xd0,
-	0x1c, 0x76, 0xb3, 0xb1, 0x63, 0xaf, 0x93, 0x34, 0xdc, 0xb4, 0x33, 0xb3, 0xef, 0x69, 0x9e, 0x79,
-	0x0f, 0x8f, 0x03, 0xab, 0x1d, 0xcc, 0x76, 0xfb, 0xcf, 0x2c, 0x8f, 0xf4, 0x6c, 0x8f, 0x04, 0xcc,
-	0xc5, 0x01, 0x8a, 0xfc, 0xf4, 0xd2, 0x0d, 0xb1, 0x4d, 0x51, 0xb4, 0x8f, 0x3d, 0x44, 0x6d, 0xe6,
-	0xd2, 0x3d, 0x6a, 0xef, 0xdf, 0x92, 0x0b, 0x2b, 0x8c, 0x08, 0x23, 0xfa, 0x8d, 0x63, 0x69, 0x2b,
-	0x96, 0xb4, 0xa4, 0xc0, 0xfe, 0x2d, 0xe3, 0xdd, 0x0e, 0x21, 0x9d, 0x2e, 0xb2, 0x85, 0xec, 0xb3,
-	0xfe, 0x8e, 0x8d, 0x7a, 0x21, 0x3b, 0x90, 0xaa, 0xc6, 0x3b, 0xc3, 0x1f, 0xdd, 0x20, 0xfe, 0xb4,
-	0xd0, 0x21, 0x1d, 0x22, 0x96, 0x36, 0x5f, 0xa9, 0xd3, 0x3b, 0x53, 0xc5, 0xcb, 0x0e, 0x42, 0x44,
-	0xed, 0x1e, 0xe9, 0x07, 0x4c, 0xe9, 0x7d, 0x7a, 0x16, 0x3d, 0xc4, 0x22, 0xec, 0xa9, 0xdb, 0x19,
-	0x77, 0xcf, 0xa0, 0xe9, 0x23, 0xea, 0x45, 0x38, 0x64, 0x24, 0x52, 0xca, 0x9f, 0x9d, 0x41, 0x99,
-	0x23, 0x26, 0xfe, 0x51, 0xba, 0x8d, 0x61, 0x6c, 0x18, 0xee, 0x21, 0xca, 0xdc, 0x5e, 0x28, 0x05,
-	0xcc, 0xc3, 0x1c, 0xcc, 0xaf, 0x45, 0xc8, 0x65, 0xe8, 0x89, 0x4b, 0xf7, 0x1c, 0xf4, 0xbc, 0x8f,
-	0x28, 0xd3, 0x5b, 0x30, 0x93, 0x98, 0xdf, 0xc6, 0x7e, 0x5d, 0x5b, 0xd6, 0x9a, 0xd5, 0xd5, 0x6b,
-	0x83, 0xa3, 0x46, 0x6d, 0x2d, 0x3e, 0x6f, 0xaf, 0x3b, 0xb5, 0x44, 0xa8, 0xed, 0xeb, 0x36, 0x94,
-	0x22, 0x42, 0xd8, 0x0e, 0xad, 0xe7, 0x97, 0xf3, 0xcd, 0x5a, 0xeb, 0xff, 0x56, 0xea, 0x49, 0x45,
-	0x74, 0xd6, 0x03, 0x0e, 0xa6, 0xa3, 0xc4, 0xf4, 0x05, 0x28, 0x52, 0xe6, 0xe3, 0xa0, 0x5e, 0xe0,
-	0xd6, 0x1d, 0xb9, 0xd1, 0x17, 0xa1, 0x44, 0x99, 0x4f, 0xfa, 0xac, 0x5e, 0x14, 0xc7, 0x6a, 0xa7,
-	0xce, 0x51, 0x14, 0xd5, 0x4b, 0xc9, 0x39, 0x8a, 0x22, 0xdd, 0x80, 0x0a, 0x43, 0x51, 0x0f, 0x07,
-	0x6e, 0xb7, 0x5e, 0x5e, 0xd6, 0x9a, 0x15, 0x27, 0xd9, 0xeb, 0xf7, 0x00, 0xbc, 0x5d, 0xe4, 0xed,
-	0x85, 0x04, 0x07, 0xac, 0x5e, 0x59, 0xd6, 0x9a, 0xb5, 0xd6, 0x8d, 0xd1, 0xb0, 0xd6, 0x13, 0xc4,
-	0x9d, 0x94, 0xbc, 0x6e, 0x41, 0x99, 0x84, 0x0c, 0x93, 0x80, 0xd6, 0xab, 0x42, 0x75, 0xc1, 0x92,
-	0x68, 0x5a, 0x31, 0x9a, 0xd6, 0xfd, 0xe0, 0xc0, 0x89, 0x85, 0xcc, 0xa7, 0xa0, 0xa7, 0x91, 0xa4,
-	0x21, 0x09, 0x28, 0x3a, 0x17, 0x94, 0x73, 0x90, 0x0f, 0xb1, 0x5f, 0xcf, 0x2d, 0x6b, 0xcd, 0x59,
-	0x87, 0x2f, 0xcd, 0x0e, 0xcc, 0x3c, 0x66, 0x6e, 0xc4, 0x2e, 0xf2, 0x40, 0xef, 0x43, 0x19, 0xbd,
-	0x44, 0xde, 0xb6, 0xb2, 0x5c, 0x5d, 0x85, 0xc1, 0x51, 0xa3, 0xb4, 0xf1, 0x12, 0x79, 0xed, 0x75,
-	0xa7, 0xc4, 0x3f, 0xb5, 0x7d, 0xf3, 0x3d, 0x98, 0x55, 0x8e, 0x54, 0xfc, 0x2a, 0x16, 0xed, 0x38,
-	0x96, 0x4d, 0x98, 0x5f, 0x47, 0x5d, 0x74, 0xe1, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xaa, 0xb4, 0x94,
-	0x78, 0x5b, 0x84, 0x5c, 0xa2, 0x5c, 0x1a, 0x1c, 0x35, 0x72, 0xed, 0x75, 0x27, 0x87, 0x4f, 0x41,
-	0x44, 0x6f, 0x40, 0x0d, 0xbd, 0xc4, 0x6c, 0x9b, 0x32, 0x97, 0xf5, 0x79, 0xce, 0xf1, 0x2f, 0xc0,
-	0x8f, 0x1e, 0x8b, 0x13, 0xfd, 0x3e, 0x54, 0xf9, 0x0e, 0xf9, 0xdb, 0x2e, 0x13, 0x29, 0x56, 0x6b,
-	0x19, 0x23, 0x0f, 0xf8, 0x24, 0x2e, 0x87, 0xd5, 0xca, 0xe1, 0x51, 0xe3, 0xca, 0xab, 0xbf, 0x1a,
-	0x9a, 0x53, 0x91, 0x6a, 0xf7, 0x99, 0x49, 0x60, 0x41, 0xc6, 0xb7, 0x15, 0x11, 0x0f, 0x51, 0x7a,
-	0xe9, 0xe8, 0x23, 0x80, 0x4d, 0x74, 0xf9, 0x8f, 0xbc, 0x01, 0x35, 0xe1, 0x46, 0x81, 0x7e, 0x07,
-	0xca, 0xa1, 0xbc, 0xa0, 0x70, 0x31, 0x54, 0x23, 0xfb, 0xb7, 0x54, 0x99, 0xc4, 0x20, 0xc4, 0xc2,
-	0xe6, 0x0a, 0xcc, 0x7d, 0x85, 0x29, 0xe3, 0x69, 0x90, 0x40, 0xb3, 0x08, 0xa5, 0x1d, 0xdc, 0x65,
-	0x28, 0x92, 0xd1, 0x3a, 0x6a, 0xc7, 0x93, 0x26, 0x25, 0x9b, 0xd4, 0x46, 0x51, 0xb4, 0xf8, 0xba,
-	0x26, 0x3a, 0xc6, 0x64, 0xb7, 0x52, 0xd4, 0x7c, 0xa5, 0x41, 0xed, 0x4b, 0xdc, 0xed, 0x5e, 0x36,
-	0x48, 0xa2, 0xe1, 0xe0, 0x0e, 0x6f, 0x2b, 0x32, 0xb7, 0xd4, 0x8e, 0xa7, 0xa2, 0xdb, 0xed, 0x8a,
-	0x8c, 0xaa, 0x38, 0x7c, 0x69, 0xfe, 0xa3, 0x81, 0xce, 0x95, 0xdf, 0x42, 0x96, 0x24, 0x3d, 0x31,
-	0x77, 0x7a, 0x4f, 0xcc, 0x8f, 0xe9, 0x89, 0x85, 0xb1, 0x3d, 0xb1, 0x38, 0xd4, 0x13, 0x9b, 0x50,
-	0xa0, 0x21, 0xf2, 0x44, 0x17, 0x1d, 0xd7, 0xd2, 0x84, 0x44, 0x1a, 0xa5, 0xf2, 0xd8, 0x54, 0xba,
-	0x0e, 0xff, 0x3b, 0x71, 0x75, 0xf9, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0x39, 0x88, 0xe2, 0x1f, 0xd1,
-	0x16, 0x3b, 0xb8, 0xf4, 0xa7, 0x5a, 0x80, 0xe2, 0x0b, 0xec, 0xb3, 0x5d, 0xf5, 0x52, 0x72, 0xc3,
-	0xd1, 0xd9, 0x45, 0xb8, 0xb3, 0x2b, 0xab, 0x7f, 0xd6, 0x51, 0x3b, 0xf3, 0x67, 0xb8, 0xba, 0xd6,
-	0x25, 0x14, 0xb5, 0x1f, 0xfe, 0x17, 0x81, 0xc9, 0xe7, 0xcc, 0x8b, 0x57, 0x90, 0x1b, 0xf3, 0x0b,
-	0x98, 0xdb, 0x72, 0xfb, 0xf4, 0xc2, 0xfd, 0x73, 0x13, 0xe6, 0x1d, 0x44, 0xfb, 0xbd, 0x0b, 0x1b,
-	0xda, 0x80, 0x6b, 0xbc, 0x38, 0xb7, 0xb0, 0x7f, 0x91, 0xe4, 0x35, 0x1d, 0xd9, 0x0f, 0xa4, 0x19,
-	0x55, 0xe2, 0x9f, 0x43, 0x55, 0xb5, 0x0b, 0x14, 0x97, 0xf9, 0xf2, 0xa4, 0x32, 0x6f, 0x07, 0x3b,
-	0xc4, 0x39, 0x56, 0x31, 0x5f, 0x6b, 0x70, 0x7d, 0x2d, 0x99, 0xc9, 0x17, 0xe5, 0x28, 0xdb, 0x30,
-	0x1f, 0xba, 0x11, 0x0a, 0xd8, 0x76, 0x8a, 0x17, 0xc8, 0xe7, 0x6b, 0xf1, 0xfe, 0xff, 0xe7, 0x51,
-	0x63, 0x25, 0xc5, 0xb6, 0x48, 0x88, 0x82, 0x44, 0x9d, 0xda, 0x1d, 0x72, 0xd3, 0xc7, 0x1d, 0x44,
-	0x99, 0xb5, 0x2e, 0xfe, 0x73, 0xe6, 0xa4, 0xb1, 0xb5, 0x53, 0x39, 0x43, 0x7e, 0x1a, 0xce, 0xf0,
-	0x1d, 0x2c, 0x0e, 0xdf, 0x2e, 0x01, 0xae, 0x76, 0xcc, 0x04, 0x4f, 0xed, 0x90, 0x23, 0xe4, 0x25,
-	0xad, 0x60, 0xfe, 0x04, 0xf3, 0x5f, 0x87, 0xfe, 0x5b, 0xe0, 0x75, 0x2d, 0xa8, 0x46, 0x88, 0x92,
-	0x7e, 0xe4, 0x21, 0x2a, 0xb0, 0x1a, 0x77, 0xa9, 0x63, 0x31, 0x73, 0x05, 0xae, 0x3e, 0x90, 0x04,
-	0x38, 0xf6, 0x5c, 0x87, 0xb2, 0x9c, 0x04, 0xf2, 0x2a, 0x55, 0x27, 0xde, 0xf2, 0xe4, 0x4b, 0x64,
-	0x93, 0xb9, 0x50, 0x56, 0xfc, 0x59, 0xdd, 0xbb, 0x7e, 0x0a, 0x97, 0x14, 0x02, 0x4e, 0x2c, 0x68,
-	0xee, 0x40, 0xed, 0x5b, 0x17, 0x5f, 0xfe, 0xec, 0x8c, 0x60, 0x46, 0xfa, 0x51, 0xb1, 0x0e, 0xf1,
-	0x10, 0x6d, 0x32, 0x0f, 0xc9, 0x9d, 0x87, 0x87, 0xb4, 0x5e, 0xcf, 0x40, 0x51, 0x4c, 0x4e, 0x7d,
-	0x0f, 0x4a, 0x92, 0x63, 0xea, 0xb6, 0x35, 0xe9, 0x17, 0x93, 0x35, 0xc2, 0xe9, 0x8d, 0x8f, 0xa7,
-	0x57, 0x50, 0x57, 0xfb, 0x01, 0x8a, 0x82, 0x0b, 0xea, 0x2b, 0x93, 0x55, 0xd3, 0xcc, 0xd4, 0xf8,
-	0x70, 0x2a, 0x59, 0xe5, 0xa1, 0x03, 0x25, 0x49, 0xb0, 0xb2, 0xae, 0x33, 0x42, 0x38, 0x8d, 0x8f,
-	0xa6, 0x51, 0x48, 0x1c, 0x3d, 0x87, 0xd9, 0x13, 0x4c, 0x4e, 0x6f, 0x4d, 0xa3, 0x7e, 0x72, 0xa0,
-	0x9f, 0xd1, 0xe5, 0x53, 0xc8, 0x6f, 0x22, 0xa6, 0x37, 0x27, 0x2b, 0x1d, 0xd3, 0x3d, 0xe3, 0x83,
-	0x29, 0x24, 0x13, 0xdc, 0x0a, 0xbc, 0xd3, 0xea, 0xd6, 0x64, 0x95, 0x61, 0x76, 0x66, 0xd8, 0x53,
-	0xcb, 0x2b, 0x47, 0x6d, 0x28, 0x70, 0xb2, 0xa5, 0x67, 0xc4, 0x96, 0x22, 0x64, 0xc6, 0xe2, 0x48,
-	0x72, 0x6f, 0xf0, 0x1f, 0xeb, 0xfa, 0x16, 0x14, 0x78, 0x29, 0xe9, 0x19, 0x79, 0x38, 0x4a, 0xa4,
-	0xc6, 0x5a, 0x7c, 0x0c, 0xd5, 0x84, 0x63, 0x64, 0x41, 0x31, 0x4c, 0x46, 0xc6, 0x1a, 0x7d, 0x08,
-	0x65, 0xc5, 0x0e, 0xf4, 0x8c, 0xf7, 0x3e, 0x49, 0x22, 0x26, 0x18, 0x2c, 0x8a, 0x69, 0x9f, 0x15,
-	0xe1, 0x30, 0x25, 0x18, 0x6b, 0xf0, 0x11, 0x94, 0xe4, 0xd8, 0xcf, 0x2a, 0x9a, 0x11, 0x72, 0x30,
-	0xd6, 0x24, 0x86, 0x4a, 0x3c, 0xb9, 0xf5, 0x9b, 0xd9, 0x39, 0x92, 0x22, 0x0a, 0x86, 0x35, 0xad,
-	0xb8, 0xca, 0xa8, 0x17, 0x00, 0xa9, 0x79, 0x79, 0x3b, 0x03, 0xe2, 0xd3, 0x26, 0xbf, 0xf1, 0xc9,
-	0xd9, 0x94, 0x94, 0xe3, 0x47, 0x50, 0x92, 0x03, 0x31, 0x0b, 0xb6, 0x91, 0xb1, 0x39, 0x16, 0xb6,
-	0x1d, 0x28, 0xab, 0xd1, 0x95, 0x95, 0x2b, 0x27, 0xa7, 0xa1, 0x71, 0x73, 0x4a, 0x69, 0x15, 0xfa,
-	0xf7, 0x50, 0xe0, 0x33, 0x27, 0xab, 0x0a, 0x53, 0xf3, 0xcf, 0x58, 0x99, 0x46, 0x54, 0x9a, 0x5f,
-	0xfd, 0xe6, 0xf0, 0xcd, 0xd2, 0x95, 0x3f, 0xde, 0x2c, 0x5d, 0xf9, 0x65, 0xb0, 0xa4, 0x1d, 0x0e,
-	0x96, 0xb4, 0xdf, 0x07, 0x4b, 0xda, 0xdf, 0x83, 0x25, 0xed, 0xe9, 0xbd, 0xf3, 0xfd, 0x65, 0xef,
-	0xae, 0x58, 0x3c, 0x2b, 0x09, 0xb8, 0x6e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x67, 0xc5, 0x63,
-	0x32, 0x20, 0x14, 0x00, 0x00,
+	// 1318 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0x45,
+	0x1c, 0xef, 0xfa, 0xed, 0xbf, 0x93, 0x36, 0x59, 0xd2, 0x60, 0x96, 0x2a, 0x0e, 0xcb, 0xc5, 0x04,
+	0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x0f, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x02, 0x55,
+	0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0x38, 0xc0, 0x47,
+	0xe8, 0x95, 0x0b, 0x9f, 0x27, 0x47, 0x8e, 0x08, 0x55, 0x81, 0xfa, 0x5b, 0x70, 0x43, 0xf3, 0xd8,
+	0xcd, 0xc6, 0x8e, 0xbd, 0x4e, 0xd3, 0x70, 0x69, 0x67, 0x66, 0xff, 0xaf, 0xf9, 0xcd, 0xff, 0xf1,
+	0x73, 0x60, 0xb5, 0x83, 0xd9, 0x6e, 0xff, 0xa9, 0xe5, 0x91, 0x9e, 0xed, 0x91, 0x80, 0xb9, 0x38,
+	0x40, 0x91, 0x9f, 0x5e, 0xba, 0x21, 0xb6, 0x29, 0x8a, 0xf6, 0xb1, 0x87, 0xa8, 0xcd, 0x5c, 0xba,
+	0x47, 0xed, 0xfd, 0x1b, 0x72, 0x61, 0x85, 0x11, 0x61, 0x44, 0xbf, 0x76, 0x2c, 0x6d, 0xc5, 0x92,
+	0x96, 0x14, 0xd8, 0xbf, 0x61, 0xbc, 0xdf, 0x21, 0xa4, 0xd3, 0x45, 0xb6, 0x90, 0x7d, 0xda, 0xdf,
+	0xb1, 0x51, 0x2f, 0x64, 0x07, 0x52, 0xd5, 0x78, 0x6f, 0xf8, 0xa3, 0x1b, 0xc4, 0x9f, 0x16, 0x3a,
+	0xa4, 0x43, 0xc4, 0xd2, 0xe6, 0x2b, 0x75, 0x7a, 0x6b, 0xaa, 0x78, 0xd9, 0x41, 0x88, 0xa8, 0xdd,
+	0x23, 0xfd, 0x80, 0x29, 0xbd, 0xcf, 0xcf, 0xa2, 0x87, 0x58, 0x84, 0x3d, 0x75, 0x3b, 0xe3, 0xf6,
+	0x19, 0x34, 0x7d, 0x44, 0xbd, 0x08, 0x87, 0x8c, 0x44, 0x4a, 0xf9, 0x8b, 0x33, 0x28, 0x73, 0xc4,
+	0xc4, 0x3f, 0x4a, 0xb7, 0x31, 0x8c, 0x0d, 0xc3, 0x3d, 0x44, 0x99, 0xdb, 0x0b, 0xa5, 0x80, 0x79,
+	0x98, 0x83, 0xf9, 0xb5, 0x08, 0xb9, 0x0c, 0x3d, 0x72, 0xe9, 0x9e, 0x83, 0x9e, 0xf5, 0x11, 0x65,
+	0x7a, 0x0b, 0x66, 0x12, 0xf3, 0xdb, 0xd8, 0xaf, 0x6b, 0xcb, 0x5a, 0xb3, 0xba, 0x7a, 0x65, 0x70,
+	0xd4, 0xa8, 0xad, 0xc5, 0xe7, 0xed, 0x75, 0xa7, 0x96, 0x08, 0xb5, 0x7d, 0xdd, 0x86, 0x52, 0x44,
+	0x08, 0xdb, 0xa1, 0xf5, 0xfc, 0x72, 0xbe, 0x59, 0x6b, 0xbd, 0x6b, 0xa5, 0x9e, 0x54, 0x44, 0x67,
+	0xdd, 0xe3, 0x60, 0x3a, 0x4a, 0x4c, 0x5f, 0x80, 0x22, 0x65, 0x3e, 0x0e, 0xea, 0x05, 0x6e, 0xdd,
+	0x91, 0x1b, 0x7d, 0x11, 0x4a, 0x94, 0xf9, 0xa4, 0xcf, 0xea, 0x45, 0x71, 0xac, 0x76, 0xea, 0x1c,
+	0x45, 0x51, 0xbd, 0x94, 0x9c, 0xa3, 0x28, 0xd2, 0x0d, 0xa8, 0x30, 0x14, 0xf5, 0x70, 0xe0, 0x76,
+	0xeb, 0xe5, 0x65, 0xad, 0x59, 0x71, 0x92, 0xbd, 0x7e, 0x07, 0xc0, 0xdb, 0x45, 0xde, 0x5e, 0x48,
+	0x70, 0xc0, 0xea, 0x95, 0x65, 0xad, 0x59, 0x6b, 0x5d, 0x1b, 0x0d, 0x6b, 0x3d, 0x41, 0xdc, 0x49,
+	0xc9, 0xeb, 0x16, 0x94, 0x49, 0xc8, 0x30, 0x09, 0x68, 0xbd, 0x2a, 0x54, 0x17, 0x2c, 0x89, 0xa6,
+	0x15, 0xa3, 0x69, 0xdd, 0x0d, 0x0e, 0x9c, 0x58, 0xc8, 0x7c, 0x02, 0x7a, 0x1a, 0x49, 0x1a, 0x92,
+	0x80, 0xa2, 0x37, 0x82, 0x72, 0x0e, 0xf2, 0x21, 0xf6, 0xeb, 0xb9, 0x65, 0xad, 0x39, 0xeb, 0xf0,
+	0xa5, 0xd9, 0x81, 0x99, 0x87, 0xcc, 0x8d, 0xd8, 0x79, 0x1e, 0xe8, 0x43, 0x28, 0xa3, 0x17, 0xc8,
+	0xdb, 0x56, 0x96, 0xab, 0xab, 0x30, 0x38, 0x6a, 0x94, 0x36, 0x5e, 0x20, 0xaf, 0xbd, 0xee, 0x94,
+	0xf8, 0xa7, 0xb6, 0x6f, 0x7e, 0x00, 0xb3, 0xca, 0x91, 0x8a, 0x5f, 0xc5, 0xa2, 0x1d, 0xc7, 0xb2,
+	0x09, 0xf3, 0xeb, 0xa8, 0x8b, 0xce, 0x9d, 0x31, 0xe6, 0xef, 0x1a, 0x5c, 0x96, 0x96, 0x12, 0x6f,
+	0x8b, 0x90, 0x4b, 0x94, 0x4b, 0x83, 0xa3, 0x46, 0xae, 0xbd, 0xee, 0xe4, 0xf0, 0x29, 0x88, 0xe8,
+	0x0d, 0xa8, 0xa1, 0x17, 0x98, 0x6d, 0x53, 0xe6, 0xb2, 0x3e, 0xcf, 0x39, 0xfe, 0x05, 0xf8, 0xd1,
+	0x43, 0x71, 0xa2, 0xdf, 0x85, 0x2a, 0xdf, 0x21, 0x7f, 0xdb, 0x65, 0x22, 0xc5, 0x6a, 0x2d, 0x63,
+	0xe4, 0x01, 0x1f, 0xc5, 0xe5, 0xb0, 0x5a, 0x39, 0x3c, 0x6a, 0x5c, 0x7a, 0xf9, 0x77, 0x43, 0x73,
+	0x2a, 0x52, 0xed, 0x2e, 0x33, 0x09, 0x2c, 0xc8, 0xf8, 0xb6, 0x22, 0xe2, 0x21, 0x4a, 0x2f, 0x1c,
+	0x7d, 0x04, 0xb0, 0x89, 0x2e, 0xfe, 0x91, 0x37, 0xa0, 0x26, 0xdc, 0x28, 0xd0, 0x6f, 0x41, 0x39,
+	0x94, 0x17, 0x14, 0x2e, 0x86, 0x6a, 0x64, 0xff, 0x86, 0x2a, 0x93, 0x18, 0x84, 0x58, 0xd8, 0x5c,
+	0x81, 0xb9, 0x6f, 0x30, 0x65, 0x3c, 0x0d, 0x12, 0x68, 0x16, 0xa1, 0xb4, 0x83, 0xbb, 0x0c, 0x45,
+	0x32, 0x5a, 0x47, 0xed, 0x78, 0xd2, 0xa4, 0x64, 0x93, 0xda, 0x28, 0x8a, 0x16, 0x5f, 0xd7, 0x44,
+	0xc7, 0x98, 0xec, 0x56, 0x8a, 0x9a, 0x2f, 0x35, 0xa8, 0x7d, 0x8d, 0xbb, 0xdd, 0x8b, 0x06, 0x49,
+	0x34, 0x1c, 0xdc, 0xe1, 0x6d, 0x45, 0xe6, 0x96, 0xda, 0xf1, 0x54, 0x74, 0xbb, 0x5d, 0x91, 0x51,
+	0x15, 0x87, 0x2f, 0xcd, 0x7f, 0x35, 0xd0, 0xb9, 0xf2, 0x5b, 0xc8, 0x92, 0xa4, 0x27, 0xe6, 0x4e,
+	0xef, 0x89, 0xf9, 0x31, 0x3d, 0xb1, 0x30, 0xb6, 0x27, 0x16, 0x87, 0x7a, 0x62, 0x13, 0x0a, 0x34,
+	0x44, 0x9e, 0xe8, 0xa2, 0xe3, 0x5a, 0x9a, 0x90, 0x48, 0xa3, 0x54, 0x1e, 0x9b, 0x4a, 0x57, 0xe1,
+	0x9d, 0x13, 0x57, 0x97, 0x2f, 0x6b, 0xfe, 0xa6, 0xc1, 0x9c, 0x83, 0x28, 0xfe, 0x09, 0x6d, 0xb1,
+	0x83, 0x0b, 0x7f, 0xaa, 0x05, 0x28, 0x3e, 0xc7, 0x3e, 0xdb, 0x55, 0x2f, 0x25, 0x37, 0x1c, 0x9d,
+	0x5d, 0x84, 0x3b, 0xbb, 0xb2, 0xfa, 0x67, 0x1d, 0xb5, 0x33, 0x7f, 0x81, 0xcb, 0x6b, 0x5d, 0x42,
+	0x51, 0xfb, 0xfe, 0xff, 0x11, 0x98, 0x7c, 0xce, 0xbc, 0x78, 0x05, 0xb9, 0x31, 0xbf, 0x82, 0xb9,
+	0x2d, 0xb7, 0x4f, 0xcf, 0xdd, 0x3f, 0x37, 0x61, 0xde, 0x41, 0xb4, 0xdf, 0x3b, 0xb7, 0xa1, 0x0d,
+	0xb8, 0xc2, 0x8b, 0x73, 0x0b, 0xfb, 0xe7, 0x49, 0x5e, 0xd3, 0x91, 0xfd, 0x40, 0x9a, 0x51, 0x25,
+	0xfe, 0x25, 0x54, 0x55, 0xbb, 0x40, 0x71, 0x99, 0x2f, 0x4f, 0x2a, 0xf3, 0x76, 0xb0, 0x43, 0x9c,
+	0x63, 0x15, 0xf3, 0x95, 0x06, 0x57, 0xd7, 0x92, 0x99, 0x7c, 0x5e, 0x8e, 0xb2, 0x0d, 0xf3, 0xa1,
+	0x1b, 0xa1, 0x80, 0x6d, 0xa7, 0x78, 0x81, 0x7c, 0xbe, 0x16, 0xef, 0xff, 0x7f, 0x1d, 0x35, 0x56,
+	0x52, 0x6c, 0x8b, 0x84, 0x28, 0x48, 0xd4, 0xa9, 0xdd, 0x21, 0xd7, 0x7d, 0xdc, 0x41, 0x94, 0x59,
+	0xeb, 0xe2, 0x3f, 0x67, 0x4e, 0x1a, 0x5b, 0x3b, 0x95, 0x33, 0xe4, 0xa7, 0xe1, 0x0c, 0x8f, 0x61,
+	0x71, 0xf8, 0x76, 0x09, 0x70, 0xb5, 0x63, 0x26, 0x78, 0x6a, 0x87, 0x1c, 0x21, 0x2f, 0x69, 0x05,
+	0xf3, 0x67, 0x98, 0xff, 0x36, 0xf4, 0xdf, 0x02, 0xaf, 0x6b, 0x41, 0x35, 0x42, 0x94, 0xf4, 0x23,
+	0x0f, 0x51, 0x81, 0xd5, 0xb8, 0x4b, 0x1d, 0x8b, 0x99, 0x2b, 0x70, 0xf9, 0x9e, 0x24, 0xc0, 0xb1,
+	0xe7, 0x3a, 0x94, 0xe5, 0x24, 0x90, 0x57, 0xa9, 0x3a, 0xf1, 0x96, 0x27, 0x5f, 0x22, 0x9b, 0xcc,
+	0x85, 0xb2, 0xe2, 0xcf, 0xea, 0xde, 0xf5, 0x53, 0xb8, 0xa4, 0x10, 0x70, 0x62, 0x41, 0x73, 0x07,
+	0x6a, 0xdf, 0xbb, 0xf8, 0xe2, 0x67, 0x67, 0x04, 0x33, 0xd2, 0x8f, 0x8a, 0x75, 0x88, 0x87, 0x68,
+	0x93, 0x79, 0x48, 0xee, 0x4d, 0x78, 0x48, 0xeb, 0xd5, 0x0c, 0x14, 0xc5, 0xe4, 0xd4, 0xf7, 0xa0,
+	0x24, 0x39, 0xa6, 0x6e, 0x5b, 0x93, 0x7e, 0x31, 0x59, 0x23, 0x9c, 0xde, 0xf8, 0x74, 0x7a, 0x05,
+	0x75, 0xb5, 0x1f, 0xa1, 0x28, 0xb8, 0xa0, 0xbe, 0x32, 0x59, 0x35, 0xcd, 0x4c, 0x8d, 0x8f, 0xa7,
+	0x92, 0x55, 0x1e, 0x3a, 0x50, 0x92, 0x04, 0x2b, 0xeb, 0x3a, 0x23, 0x84, 0xd3, 0xf8, 0x64, 0x1a,
+	0x85, 0xc4, 0xd1, 0x33, 0x98, 0x3d, 0xc1, 0xe4, 0xf4, 0xd6, 0x34, 0xea, 0x27, 0x07, 0xfa, 0x19,
+	0x5d, 0x3e, 0x81, 0xfc, 0x26, 0x62, 0x7a, 0x73, 0xb2, 0xd2, 0x31, 0xdd, 0x33, 0x3e, 0x9a, 0x42,
+	0x32, 0xc1, 0xad, 0xc0, 0x3b, 0xad, 0x6e, 0x4d, 0x56, 0x19, 0x66, 0x67, 0x86, 0x3d, 0xb5, 0xbc,
+	0x72, 0xd4, 0x86, 0x02, 0x27, 0x5b, 0x7a, 0x46, 0x6c, 0x29, 0x42, 0x66, 0x2c, 0x8e, 0x24, 0xf7,
+	0x06, 0xff, 0xb1, 0xae, 0x6f, 0x41, 0x81, 0x97, 0x92, 0x9e, 0x91, 0x87, 0xa3, 0x44, 0x6a, 0xac,
+	0xc5, 0x87, 0x50, 0x4d, 0x38, 0x46, 0x16, 0x14, 0xc3, 0x64, 0x64, 0xac, 0xd1, 0xfb, 0x50, 0x56,
+	0xec, 0x40, 0xcf, 0x78, 0xef, 0x93, 0x24, 0x62, 0x82, 0xc1, 0xa2, 0x98, 0xf6, 0x59, 0x11, 0x0e,
+	0x53, 0x82, 0xb1, 0x06, 0x1f, 0x40, 0x49, 0x8e, 0xfd, 0xac, 0xa2, 0x19, 0x21, 0x07, 0x63, 0x4d,
+	0x62, 0xa8, 0xc4, 0x93, 0x5b, 0xbf, 0x9e, 0x9d, 0x23, 0x29, 0xa2, 0x60, 0x58, 0xd3, 0x8a, 0xab,
+	0x8c, 0x7a, 0x0e, 0x90, 0x9a, 0x97, 0x37, 0x33, 0x20, 0x3e, 0x6d, 0xf2, 0x1b, 0x9f, 0x9d, 0x4d,
+	0x49, 0x39, 0x7e, 0x00, 0x25, 0x39, 0x10, 0xb3, 0x60, 0x1b, 0x19, 0x9b, 0x63, 0x61, 0xdb, 0x81,
+	0xb2, 0x1a, 0x5d, 0x59, 0xb9, 0x72, 0x72, 0x1a, 0x1a, 0xd7, 0xa7, 0x94, 0x56, 0xa1, 0xff, 0x00,
+	0x05, 0x3e, 0x73, 0xb2, 0xaa, 0x30, 0x35, 0xff, 0x8c, 0x95, 0x69, 0x44, 0xa5, 0xf9, 0xd5, 0xef,
+	0x0e, 0x5f, 0x2f, 0x5d, 0xfa, 0xf3, 0xf5, 0xd2, 0xa5, 0x5f, 0x07, 0x4b, 0xda, 0xe1, 0x60, 0x49,
+	0xfb, 0x63, 0xb0, 0xa4, 0xfd, 0x33, 0x58, 0xd2, 0x9e, 0xdc, 0x79, 0xb3, 0xbf, 0xec, 0xdd, 0x16,
+	0x8b, 0xc7, 0xb9, 0xa7, 0x25, 0x01, 0xd8, 0xcd, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x85, 0xa2,
+	0x4f, 0xd1, 0x22, 0x14, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
index eb37318..90793cb 100644
--- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
@@ -4,7 +4,7 @@
 
 import "google/protobuf/empty.proto";
 import "google/protobuf/any.proto";
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "github.com/containerd/containerd/api/types/mount.proto";
 import "github.com/containerd/containerd/api/types/metrics.proto";
 import "github.com/containerd/containerd/api/types/descriptor.proto";
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
index c403c84..3f6528a 100644
--- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/services/version/v1/version.proto
-// DO NOT EDIT!
 
 /*
 	Package version is a generated protocol buffer package.
@@ -16,8 +15,9 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/empty"
-import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf "github.com/gogo/protobuf/types"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 import (
 	context "golang.org/x/net/context"
@@ -155,24 +155,6 @@
 	return i, nil
 }
 
-func encodeFixed64Version(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Version(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -446,7 +428,7 @@
 }
 
 var fileDescriptorVersion = []byte{
-	// 241 bytes of a gzipped FileDescriptorProto
+	// 243 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
 	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
@@ -460,7 +442,7 @@
 	0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
 	0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
 	0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
-	0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x26, 0xb1,
-	0x81, 0x1d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x37, 0xd8, 0xc6, 0xa7, 0x01, 0x00,
-	0x00,
+	0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
+	0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
+	0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto
index 2398fdc..0e4c3d1 100644
--- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto
@@ -3,7 +3,7 @@
 package containerd.services.version.v1;
 
 import "google/protobuf/empty.proto";
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 
 // TODO(stevvooe): Should version service actually be versioned?
 option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
index 785d050..93e88c0 100644
--- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/descriptor.proto
-// DO NOT EDIT!
 
 /*
 	Package types is a generated protocol buffer package.
@@ -22,7 +21,8 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
 
@@ -95,24 +95,6 @@
 	return i, nil
 }
 
-func encodeFixed64Descriptor(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Descriptor(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -409,7 +391,7 @@
 }
 
 var fileDescriptorDescriptor = []byte{
-	// 232 bytes of a gzipped FileDescriptorProto
+	// 234 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
 	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
@@ -423,6 +405,6 @@
 	0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
 	0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
 	0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
-	0x7c, 0x60, 0x58, 0x83, 0xc9, 0x24, 0x36, 0xb0, 0x07, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
-	0x23, 0x14, 0xc9, 0x7c, 0x47, 0x01, 0x00, 0x00,
+	0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00,
+	0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto
index 7975ab0..5c00dca 100644
--- a/vendor/github.com/containerd/containerd/api/types/descriptor.proto
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto
@@ -2,7 +2,7 @@
 
 package containerd.types;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 
 option go_package = "github.com/containerd/containerd/api/types;types";
 
diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
index f9aacf9..da04678 100644
--- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
@@ -1,13 +1,13 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/metrics.proto
-// DO NOT EDIT!
 
 package types
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import google_protobuf1 "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 
@@ -81,24 +81,6 @@
 	return i, nil
 }
 
-func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -409,7 +391,7 @@
 }
 
 var fileDescriptorMetrics = []byte{
-	// 256 bytes of a gzipped FileDescriptorProto
+	// 258 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
 	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
@@ -424,6 +406,7 @@
 	0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
 	0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
 	0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
-	0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x26, 0xb1, 0x81,
-	0xcd, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x51, 0x36, 0x74, 0x83, 0x01, 0x00, 0x00,
+	0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
+	0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
+	0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto
index d1629c7..0e631d2 100644
--- a/vendor/github.com/containerd/containerd/api/types/metrics.proto
+++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto
@@ -2,7 +2,7 @@
 
 package containerd.types;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/any.proto";
 import "google/protobuf/timestamp.proto";
 
diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
index cc83514..f7a9c3c 100644
--- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
@@ -1,13 +1,13 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/mount.proto
-// DO NOT EDIT!
 
 package types
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 import strings "strings"
 import reflect "reflect"
@@ -96,24 +96,6 @@
 	return i, nil
 }
 
-func encodeFixed64Mount(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Mount(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -457,7 +439,7 @@
 }
 
 var fileDescriptorMount = []byte{
-	// 200 bytes of a gzipped FileDescriptorProto
+	// 202 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
 	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
@@ -469,6 +451,6 @@
 	0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
 	0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
 	0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
-	0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
-	0xe5, 0xc7, 0x07, 0x3f, 0x1b, 0x01, 0x00, 0x00,
+	0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
+	0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto
index 031e654..cd80e44 100644
--- a/vendor/github.com/containerd/containerd/api/types/mount.proto
+++ b/vendor/github.com/containerd/containerd/api/types/mount.proto
@@ -2,7 +2,7 @@
 
 package containerd.types;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 
 option go_package = "github.com/containerd/containerd/api/types;types";
 
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
index 0ca2afc..ba9a3bf 100644
--- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
@@ -1,13 +1,13 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/platform.proto
-// DO NOT EDIT!
 
 package types
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 import strings "strings"
 import reflect "reflect"
@@ -70,24 +70,6 @@
 	return i, nil
 }
 
-func encodeFixed64Platform(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Platform(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -395,7 +377,7 @@
 }
 
 var fileDescriptorPlatform = []byte{
-	// 203 bytes of a gzipped FileDescriptorProto
+	// 205 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
 	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
 	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
@@ -407,6 +389,6 @@
 	0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
 	0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
 	0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
-	0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x4c, 0x62, 0x03, 0x3b, 0xda, 0x18, 0x10, 0x00,
-	0x00, 0xff, 0xff, 0x97, 0xa1, 0x99, 0x56, 0x19, 0x01, 0x00, 0x00,
+	0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
+	0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto
index b1dce06..4cf9834 100644
--- a/vendor/github.com/containerd/containerd/api/types/platform.proto
+++ b/vendor/github.com/containerd/containerd/api/types/platform.proto
@@ -2,7 +2,7 @@
 
 package containerd.types;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 
 option go_package = "github.com/containerd/containerd/api/types;types";
 
diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
index ccc230a..ba34270 100644
--- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
+++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/api/types/task/task.proto
-// DO NOT EDIT!
 
 /*
 	Package task is a generated protocol buffer package.
@@ -17,7 +16,8 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/gogo/protobuf/types"
 import google_protobuf2 "github.com/gogo/protobuf/types"
 
@@ -224,24 +224,6 @@
 	return i, nil
 }
 
-func encodeFixed64Task(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Task(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -869,39 +851,40 @@
 }
 
 var fileDescriptorTask = []byte{
-	// 543 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x6e, 0xd3, 0x4c,
-	0x14, 0xc5, 0x33, 0x6e, 0xe3, 0x24, 0xe3, 0xb6, 0x9f, 0x3f, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea,
-	0xca, 0x62, 0x61, 0x8b, 0x74, 0xc7, 0x2e, 0xff, 0x84, 0x2c, 0x24, 0x37, 0x72, 0x12, 0xb1, 0x8c,
-	0x9c, 0x78, 0x62, 0x46, 0x6d, 0x66, 0x2c, 0x7b, 0x0c, 0x64, 0xc7, 0x12, 0x75, 0xc5, 0x0b, 0x74,
-	0x05, 0x4f, 0xc1, 0x13, 0x64, 0xc9, 0x0a, 0xb1, 0x0a, 0xd4, 0x4f, 0x82, 0xc6, 0x76, 0xd2, 0x08,
-	0xd8, 0x8c, 0xee, 0x3d, 0xbf, 0x33, 0x77, 0xee, 0x1c, 0xf8, 0x22, 0xc2, 0xec, 0x4d, 0x36, 0xb7,
-	0x17, 0x74, 0xe5, 0x2c, 0x28, 0x61, 0x01, 0x26, 0x28, 0x09, 0x0f, 0xcb, 0x20, 0xc6, 0x0e, 0x5b,
-	0xc7, 0x28, 0x75, 0x58, 0x90, 0x5e, 0x17, 0x87, 0x1d, 0x27, 0x94, 0x51, 0xe5, 0xd1, 0x83, 0xcb,
-	0x7e, 0xfb, 0xdc, 0x2e, 0x4c, 0x5a, 0x3b, 0xa2, 0x11, 0x2d, 0xb8, 0xc3, 0xab, 0xd2, 0xaa, 0x19,
-	0x11, 0xa5, 0xd1, 0x0d, 0x72, 0x8a, 0x6e, 0x9e, 0x2d, 0x1d, 0x86, 0x57, 0x28, 0x65, 0xc1, 0x2a,
-	0xae, 0x0c, 0x8f, 0xff, 0x34, 0x04, 0x64, 0x5d, 0xa2, 0x8b, 0x5c, 0x80, 0x8d, 0x51, 0x42, 0x17,
-	0x28, 0x4d, 0x95, 0x0e, 0x3c, 0xd9, 0x3f, 0x3a, 0xc3, 0xa1, 0x0a, 0x4c, 0x60, 0xb5, 0x7a, 0xff,
-	0xe5, 0x5b, 0x43, 0xea, 0xef, 0x74, 0x77, 0xe0, 0x4b, 0x7b, 0x93, 0x1b, 0x2a, 0xe7, 0x50, 0xc0,
-	0xa1, 0x2a, 0x14, 0x4e, 0x31, 0xdf, 0x1a, 0x82, 0x3b, 0xf0, 0x05, 0x1c, 0x2a, 0x32, 0x3c, 0x8a,
-	0x71, 0xa8, 0x1e, 0x99, 0xc0, 0x3a, 0xf5, 0x79, 0xa9, 0x5c, 0x42, 0x31, 0x65, 0x01, 0xcb, 0x52,
-	0xf5, 0xd8, 0x04, 0xd6, 0x59, 0xe7, 0x89, 0xfd, 0x8f, 0x1f, 0xda, 0xe3, 0xc2, 0xe2, 0x57, 0x56,
-	0xa5, 0x0d, 0xeb, 0x29, 0x0b, 0x31, 0x51, 0xeb, 0xfc, 0x05, 0xbf, 0x6c, 0x94, 0x73, 0x3e, 0x2a,
-	0xa4, 0x19, 0x53, 0xc5, 0x42, 0xae, 0xba, 0x4a, 0x47, 0x49, 0xa2, 0x36, 0xf6, 0x3a, 0x4a, 0x12,
-	0x45, 0x83, 0x4d, 0x86, 0x92, 0x15, 0x26, 0xc1, 0x8d, 0xda, 0x34, 0x81, 0xd5, 0xf4, 0xf7, 0xbd,
-	0x62, 0x40, 0x09, 0xbd, 0xc7, 0x6c, 0x56, 0xed, 0xd6, 0x2a, 0x16, 0x86, 0x5c, 0x2a, 0x57, 0x51,
-	0xba, 0xb0, 0xc5, 0x3b, 0x14, 0xce, 0x02, 0xa6, 0x42, 0x13, 0x58, 0x52, 0x47, 0xb3, 0xcb, 0x40,
-	0xed, 0x5d, 0xa0, 0xf6, 0x64, 0x97, 0x78, 0xaf, 0xb9, 0xd9, 0x1a, 0xb5, 0x4f, 0x3f, 0x0d, 0xe0,
-	0x37, 0xcb, 0x6b, 0x5d, 0x76, 0xe1, 0x42, 0xa9, 0xca, 0xd8, 0x25, 0x4b, 0xba, 0xcb, 0x06, 0x3c,
-	0x64, 0x63, 0xc1, 0x63, 0x4c, 0x96, 0xb4, 0xc8, 0x51, 0xea, 0xb4, 0xff, 0x1a, 0xdf, 0x25, 0x6b,
-	0xbf, 0x70, 0x3c, 0xfb, 0x0e, 0xa0, 0x58, 0x2d, 0xa6, 0xc3, 0xc6, 0xd4, 0x7b, 0xe5, 0x5d, 0xbd,
-	0xf6, 0xe4, 0x9a, 0xf6, 0xff, 0xed, 0x9d, 0x79, 0x5a, 0x82, 0x29, 0xb9, 0x26, 0xf4, 0x1d, 0xe1,
-	0xbc, 0xef, 0x0f, 0xbb, 0x93, 0xe1, 0x40, 0x06, 0x87, 0xbc, 0x9f, 0xa0, 0x80, 0xa1, 0x90, 0x73,
-	0x7f, 0xea, 0x79, 0xae, 0xf7, 0x52, 0x16, 0x0e, 0xb9, 0x9f, 0x11, 0x82, 0x49, 0xc4, 0xf9, 0x78,
-	0x72, 0x35, 0x1a, 0x0d, 0x07, 0xf2, 0xd1, 0x21, 0x1f, 0x33, 0x1a, 0xc7, 0x28, 0x54, 0x9e, 0x42,
-	0x71, 0xd4, 0x9d, 0x8e, 0x87, 0x03, 0xf9, 0x58, 0x93, 0x6f, 0xef, 0xcc, 0x93, 0x12, 0x8f, 0x82,
-	0x2c, 0x2d, 0xa7, 0x73, 0xca, 0xa7, 0xd7, 0x0f, 0x6f, 0x73, 0x8c, 0x49, 0xa4, 0x9d, 0x7d, 0xfc,
-	0xac, 0xd7, 0xbe, 0x7e, 0xd1, 0xab, 0xdf, 0xf4, 0xd4, 0xcd, 0xbd, 0x5e, 0xfb, 0x71, 0xaf, 0xd7,
-	0x3e, 0xe4, 0x3a, 0xd8, 0xe4, 0x3a, 0xf8, 0x96, 0xeb, 0xe0, 0x57, 0xae, 0x83, 0xb9, 0x58, 0xc4,
-	0x70, 0xf9, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x19, 0xf7, 0x5b, 0x8f, 0x4e, 0x03, 0x00, 0x00,
+	// 545 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
+	0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
+	0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
+	0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
+	0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
+	0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
+	0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
+	0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
+	0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
+	0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
+	0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
+	0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
+	0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
+	0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
+	0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
+	0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
+	0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
+	0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
+	0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
+	0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
+	0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
+	0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
+	0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
+	0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
+	0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
+	0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
+	0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
+	0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
+	0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
+	0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
+	0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
+	0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
+	0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
+	0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
+	0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.proto b/vendor/github.com/containerd/containerd/api/types/task/task.proto
index 5845edf..da91cb0 100644
--- a/vendor/github.com/containerd/containerd/api/types/task/task.proto
+++ b/vendor/github.com/containerd/containerd/api/types/task/task.proto
@@ -2,7 +2,7 @@
 
 package containerd.v1.types;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/timestamp.proto";
 import "google/protobuf/any.proto";
 
diff --git a/vendor/github.com/containerd/containerd/archive/path.go b/vendor/github.com/containerd/containerd/archive/path.go
deleted file mode 100644
index 0f6cfa3..0000000
--- a/vendor/github.com/containerd/containerd/archive/path.go
+++ /dev/null
@@ -1 +0,0 @@
-package archive
diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go
index 0550f21..c12d4f2 100644
--- a/vendor/github.com/containerd/containerd/archive/tar.go
+++ b/vendor/github.com/containerd/containerd/archive/tar.go
@@ -19,13 +19,12 @@
 	"github.com/pkg/errors"
 )
 
-var (
-	bufferPool = &sync.Pool{
-		New: func() interface{} {
-			return make([]byte, 32*1024)
-		},
-	}
-)
+var bufferPool = &sync.Pool{
+	New: func() interface{} {
+		buffer := make([]byte, 32*1024)
+		return &buffer
+	},
+}
 
 // Diff returns a tar stream of the computed filesystem
 // difference between the provided directories.
@@ -82,6 +81,8 @@
 	// whiteoutOpaqueDir file means directory has been made opaque - meaning
 	// readdir calls to this directory do not follow to lower layers.
 	whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
+
+	paxSchilyXattr = "SCHILY.xattrs."
 )
 
 // Apply applies a tar stream of an OCI style diff tar.
@@ -388,9 +389,10 @@
 		if capability, err := getxattr(source, "security.capability"); err != nil {
 			return errors.Wrap(err, "failed to get capabilities xattr")
 		} else if capability != nil {
-			hdr.Xattrs = map[string]string{
-				"security.capability": string(capability),
+			if hdr.PAXRecords == nil {
+				hdr.PAXRecords = map[string]string{}
 			}
+			hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
 		}
 
 		if err := cw.tw.WriteHeader(hdr); err != nil {
@@ -404,8 +406,8 @@
 			}
 			defer file.Close()
 
-			buf := bufferPool.Get().([]byte)
-			n, err := io.CopyBuffer(cw.tw, file, buf)
+			buf := bufferPool.Get().(*[]byte)
+			n, err := io.CopyBuffer(cw.tw, file, *buf)
 			bufferPool.Put(buf)
 			if err != nil {
 				return errors.Wrap(err, "failed to copy")
@@ -509,13 +511,16 @@
 		}
 	}
 
-	for key, value := range hdr.Xattrs {
-		if err := setxattr(path, key, value); err != nil {
-			if errors.Cause(err) == syscall.ENOTSUP {
-				log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
-				continue
+	for key, value := range hdr.PAXRecords {
+		if strings.HasPrefix(key, paxSchilyXattr) {
+			key = key[len(paxSchilyXattr):]
+			if err := setxattr(path, key, value); err != nil {
+				if errors.Cause(err) == syscall.ENOTSUP {
+					log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
+					continue
+				}
+				return err
 			}
-			return err
 		}
 	}
 
@@ -529,7 +534,7 @@
 }
 
 func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
-	buf := bufferPool.Get().([]byte)
+	buf := bufferPool.Get().(*[]byte)
 	defer bufferPool.Put(buf)
 
 	for {
@@ -540,9 +545,9 @@
 		default:
 		}
 
-		nr, er := src.Read(buf)
+		nr, er := src.Read(*buf)
 		if nr > 0 {
-			nw, ew := dst.Write(buf[0:nr])
+			nw, ew := dst.Write((*buf)[0:nr])
 			if nw > 0 {
 				written += int64(nw)
 			}
diff --git a/vendor/github.com/containerd/containerd/io.go b/vendor/github.com/containerd/containerd/cio/io.go
similarity index 84%
rename from vendor/github.com/containerd/containerd/io.go
rename to vendor/github.com/containerd/containerd/cio/io.go
index 48c06f1..25e3981 100644
--- a/vendor/github.com/containerd/containerd/io.go
+++ b/vendor/github.com/containerd/containerd/cio/io.go
@@ -1,4 +1,4 @@
-package containerd
+package cio
 
 import (
 	"context"
@@ -8,8 +8,8 @@
 	"sync"
 )
 
-// IOConfig holds the io configurations.
-type IOConfig struct {
+// Config holds the io configurations.
+type Config struct {
 	// Terminal is true if one has been allocated
 	Terminal bool
 	// Stdin path
@@ -23,7 +23,7 @@
 // IO holds the io information for a task or process
 type IO interface {
 	// Config returns the IO configuration.
-	Config() IOConfig
+	Config() Config
 	// Cancel aborts all current io operations
 	Cancel()
 	// Wait blocks until all io copy operations have completed
@@ -34,12 +34,12 @@
 
 // cio is a basic container IO implementation.
 type cio struct {
-	config IOConfig
+	config Config
 
 	closer *wgCloser
 }
 
-func (c *cio) Config() IOConfig {
+func (c *cio) Config() Config {
 	return c.config
 }
 
@@ -64,23 +64,23 @@
 	return c.closer.Close()
 }
 
-// IOCreation creates new IO sets for a task
-type IOCreation func(id string) (IO, error)
+// Creation creates new IO sets for a task
+type Creation func(id string) (IO, error)
 
-// IOAttach allows callers to reattach to running tasks
+// Attach allows callers to reattach to running tasks
 //
 // There should only be one reader for a task's IO set
 // because fifo's can only be read from one reader or the output
 // will be sent only to the first reads
-type IOAttach func(*FIFOSet) (IO, error)
+type Attach func(*FIFOSet) (IO, error)
 
-// NewIO returns an IOCreation that will provide IO sets without a terminal
-func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation {
+// NewIO returns an Creation that will provide IO sets without a terminal
+func NewIO(stdin io.Reader, stdout, stderr io.Writer) Creation {
 	return NewIOWithTerminal(stdin, stdout, stderr, false)
 }
 
 // NewIOWithTerminal creates a new io set with the provied io.Reader/Writers for use with a terminal
-func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation {
+func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) Creation {
 	return func(id string) (_ IO, err error) {
 		paths, err := NewFifos(id)
 		if err != nil {
@@ -91,7 +91,7 @@
 				os.RemoveAll(paths.Dir)
 			}
 		}()
-		cfg := IOConfig{
+		cfg := Config{
 			Terminal: terminal,
 			Stdout:   paths.Out,
 			Stderr:   paths.Err,
@@ -113,12 +113,12 @@
 }
 
 // WithAttach attaches the existing io for a task to the provided io.Reader/Writers
-func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
+func WithAttach(stdin io.Reader, stdout, stderr io.Writer) Attach {
 	return func(paths *FIFOSet) (IO, error) {
 		if paths == nil {
 			return nil, fmt.Errorf("cannot attach to existing fifos")
 		}
-		cfg := IOConfig{
+		cfg := Config{
 			Terminal: paths.Terminal,
 			Stdout:   paths.Out,
 			Stderr:   paths.Err,
diff --git a/vendor/github.com/containerd/containerd/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go
similarity index 97%
rename from vendor/github.com/containerd/containerd/io_unix.go
rename to vendor/github.com/containerd/containerd/cio/io_unix.go
index 08aba14..c18f7ec 100644
--- a/vendor/github.com/containerd/containerd/io_unix.go
+++ b/vendor/github.com/containerd/containerd/cio/io_unix.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package containerd
+package cio
 
 import (
 	"context"
@@ -139,9 +139,9 @@
 	return f, nil
 }
 
-// Config returns the IOConfig
-func (f *DirectIO) Config() IOConfig {
-	return IOConfig{
+// Config returns the Config
+func (f *DirectIO) Config() Config {
+	return Config{
 		Terminal: f.terminal,
 		Stdin:    f.set.In,
 		Stdout:   f.set.Out,
diff --git a/vendor/github.com/containerd/containerd/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go
similarity index 98%
rename from vendor/github.com/containerd/containerd/io_windows.go
rename to vendor/github.com/containerd/containerd/cio/io_windows.go
index e37568c..1458c31 100644
--- a/vendor/github.com/containerd/containerd/io_windows.go
+++ b/vendor/github.com/containerd/containerd/cio/io_windows.go
@@ -1,4 +1,4 @@
-package containerd
+package cio
 
 import (
 	"fmt"
diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go
index 663f2fa..39547f5 100644
--- a/vendor/github.com/containerd/containerd/client.go
+++ b/vendor/github.com/containerd/containerd/client.go
@@ -7,6 +7,7 @@
 	"net/http"
 	"runtime"
 	"strconv"
+	"strings"
 	"sync"
 	"time"
 
@@ -17,7 +18,7 @@
 	imagesapi "github.com/containerd/containerd/api/services/images/v1"
 	introspectionapi "github.com/containerd/containerd/api/services/introspection/v1"
 	namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
-	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
+	snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
 	"github.com/containerd/containerd/api/services/tasks/v1"
 	versionservice "github.com/containerd/containerd/api/services/version/v1"
 	"github.com/containerd/containerd/containers"
@@ -29,18 +30,12 @@
 	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/plugin"
-	"github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/remotes/docker/schema1"
-	contentservice "github.com/containerd/containerd/services/content"
-	diffservice "github.com/containerd/containerd/services/diff"
-	imagesservice "github.com/containerd/containerd/services/images"
-	namespacesservice "github.com/containerd/containerd/services/namespaces"
-	snapshotservice "github.com/containerd/containerd/services/snapshot"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	"github.com/containerd/typeurl"
-	pempty "github.com/golang/protobuf/ptypes/empty"
+	ptypes "github.com/gogo/protobuf/types"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
@@ -138,7 +133,7 @@
 // NewContainer will create a new container in container with the provided id
 // the id must be unique within the namespace
 func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
-	ctx, done, err := c.withLease(ctx)
+	ctx, done, err := c.WithLease(ctx)
 	if err != nil {
 		return nil, err
 	}
@@ -219,7 +214,7 @@
 	}
 	store := c.ContentStore()
 
-	ctx, done, err := c.withLease(ctx)
+	ctx, done, err := c.WithLease(ctx)
 	if err != nil {
 		return nil, err
 	}
@@ -339,6 +334,14 @@
 	for i := len(manifestStack) - 1; i >= 0; i-- {
 		_, err := pushHandler(ctx, manifestStack[i])
 		if err != nil {
+			// TODO(estesp): until we have a more complete method for index push, we need to report
+			// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
+			// as a marker for this problem
+			if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
+				manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
+				errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
+				return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
+			}
 			return err
 		}
 	}
@@ -426,7 +429,7 @@
 
 // NamespaceService returns the underlying Namespaces Store
 func (c *Client) NamespaceService() namespaces.Store {
-	return namespacesservice.NewStoreFromClient(namespacesapi.NewNamespacesClient(c.conn))
+	return NewNamespaceStoreFromClient(namespacesapi.NewNamespacesClient(c.conn))
 }
 
 // ContainerService returns the underlying container Store
@@ -436,12 +439,12 @@
 
 // ContentStore returns the underlying content Store
 func (c *Client) ContentStore() content.Store {
-	return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
+	return NewContentStoreFromClient(contentapi.NewContentClient(c.conn))
 }
 
 // SnapshotService returns the underlying snapshotter for the provided snapshotter name
-func (c *Client) SnapshotService(snapshotterName string) snapshot.Snapshotter {
-	return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotsClient(c.conn), snapshotterName)
+func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
+	return NewSnapshotterFromClient(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName)
 }
 
 // TaskService returns the underlying TasksClient
@@ -451,12 +454,12 @@
 
 // ImageService returns the underlying image Store
 func (c *Client) ImageService() images.Store {
-	return imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(c.conn))
+	return NewImageStoreFromClient(imagesapi.NewImagesClient(c.conn))
 }
 
 // DiffService returns the underlying Differ
 func (c *Client) DiffService() diff.Differ {
-	return diffservice.NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
+	return NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
 }
 
 // IntrospectionService returns the underlying Introspection Client
@@ -489,7 +492,7 @@
 
 // Version returns the version of containerd that the client is connected to
 func (c *Client) Version(ctx context.Context) (Version, error) {
-	response, err := c.VersionService().Version(ctx, &pempty.Empty{})
+	response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
 	if err != nil {
 		return Version{}, err
 	}
@@ -499,157 +502,97 @@
 	}, nil
 }
 
-type imageFormat string
-
-const (
-	ociImageFormat imageFormat = "oci"
-)
-
 type importOpts struct {
-	format    imageFormat
-	refObject string
-	labels    map[string]string
 }
 
 // ImportOpt allows the caller to specify import specific options
 type ImportOpt func(c *importOpts) error
 
-// WithImportLabel sets a label to be associated with an imported image
-func WithImportLabel(key, value string) ImportOpt {
-	return func(opts *importOpts) error {
-		if opts.labels == nil {
-			opts.labels = make(map[string]string)
-		}
-
-		opts.labels[key] = value
-		return nil
-	}
-}
-
-// WithImportLabels associates a set of labels to an imported image
-func WithImportLabels(labels map[string]string) ImportOpt {
-	return func(opts *importOpts) error {
-		if opts.labels == nil {
-			opts.labels = make(map[string]string)
-		}
-
-		for k, v := range labels {
-			opts.labels[k] = v
-		}
-		return nil
-	}
-}
-
-// WithOCIImportFormat sets the import format for an OCI image format
-func WithOCIImportFormat() ImportOpt {
-	return func(c *importOpts) error {
-		if c.format != "" {
-			return errors.New("format already set")
-		}
-		c.format = ociImageFormat
-		return nil
-	}
-}
-
-// WithRefObject specifies the ref object to import.
-// If refObject is empty, it is copied from the ref argument of Import().
-func WithRefObject(refObject string) ImportOpt {
-	return func(c *importOpts) error {
-		c.refObject = refObject
-		return nil
-	}
-}
-
-func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
+func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
 	var iopts importOpts
 	for _, o := range opts {
 		if err := o(&iopts); err != nil {
 			return iopts, err
 		}
 	}
-	// use OCI as the default format
-	if iopts.format == "" {
-		iopts.format = ociImageFormat
-	}
-	// if refObject is not explicitly specified, use the one specified in ref
-	if iopts.refObject == "" {
-		refSpec, err := reference.Parse(ref)
-		if err != nil {
-			return iopts, err
-		}
-		iopts.refObject = refSpec.Object
-	}
 	return iopts, nil
 }
 
 // Import imports an image from a Tar stream using reader.
-// OCI format is assumed by default.
-//
-// Note that unreferenced blobs are imported to the content store as well.
-func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
-	iopts, err := resolveImportOpt(ref, opts...)
+// Caller needs to specify importer. Future version may use oci.v1 as the default.
+// Note that unreferrenced blobs may be imported to the content store as well.
+func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
+	_, err := resolveImportOpt(opts...) // unused now
 	if err != nil {
 		return nil, err
 	}
 
-	ctx, done, err := c.withLease(ctx)
+	ctx, done, err := c.WithLease(ctx)
 	if err != nil {
 		return nil, err
 	}
 	defer done()
 
-	switch iopts.format {
-	case ociImageFormat:
-		return c.importFromOCITar(ctx, ref, reader, iopts)
-	default:
-		return nil, errors.Errorf("unsupported format: %s", iopts.format)
+	imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
+	if err != nil {
+		// is.Update() is not called on error
+		return nil, err
 	}
+
+	is := c.ImageService()
+	var images []Image
+	for _, imgrec := range imgrecs {
+		if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
+			if !errdefs.IsNotFound(err) {
+				return nil, err
+			}
+
+			created, err := is.Create(ctx, imgrec)
+			if err != nil {
+				return nil, err
+			}
+
+			imgrec = created
+		} else {
+			imgrec = updated
+		}
+
+		images = append(images, &image{
+			client: c,
+			i:      imgrec,
+		})
+	}
+	return images, nil
 }
 
 type exportOpts struct {
-	format imageFormat
 }
 
-// ExportOpt allows callers to set export options
+// ExportOpt allows the caller to specify export-specific options
 type ExportOpt func(c *exportOpts) error
 
-// WithOCIExportFormat sets the OCI image format as the export target
-func WithOCIExportFormat() ExportOpt {
-	return func(c *exportOpts) error {
-		if c.format != "" {
-			return errors.New("format already set")
+func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
+	var eopts exportOpts
+	for _, o := range opts {
+		if err := o(&eopts); err != nil {
+			return eopts, err
 		}
-		c.format = ociImageFormat
-		return nil
 	}
+	return eopts, nil
 }
 
-// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
-// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
-//      -> application/vnd.oci.image.layer.v1.tar+gzip
-
 // Export exports an image to a Tar stream.
 // OCI format is used by default.
 // It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
-func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
-	var eopts exportOpts
-	for _, o := range opts {
-		if err := o(&eopts); err != nil {
-			return nil, err
-		}
-	}
-	// use OCI as the default format
-	if eopts.format == "" {
-		eopts.format = ociImageFormat
+// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
+func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
+	_, err := resolveExportOpt(opts...) // unused now
+	if err != nil {
+		return nil, err
 	}
 	pr, pw := io.Pipe()
-	switch eopts.format {
-	case ociImageFormat:
-		go func() {
-			pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
-		}()
-	default:
-		return nil, errors.Errorf("unsupported format: %s", eopts.format)
-	}
+	go func() {
+		pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
+	}()
 	return pr, nil
 }
diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go
index a9750ec..716e045 100644
--- a/vendor/github.com/containerd/containerd/container.go
+++ b/vendor/github.com/containerd/containerd/container.go
@@ -8,6 +8,7 @@
 
 	"github.com/containerd/containerd/api/services/tasks/v1"
 	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/typeurl"
@@ -25,17 +26,17 @@
 	// Delete removes the container
 	Delete(context.Context, ...DeleteOpts) error
 	// NewTask creates a new task based on the container metadata
-	NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
+	NewTask(context.Context, cio.Creation, ...NewTaskOpts) (Task, error)
 	// Spec returns the OCI runtime specification
 	Spec(context.Context) (*specs.Spec, error)
 	// Task returns the current task for the container
 	//
-	// If IOAttach options are passed the client will reattach to the IO for the running
+	// If cio.Attach options are passed the client will reattach to the IO for the running
 	// task. If no task exists for the container a NotFound error is returned
 	//
 	// Clients must make sure that only one reader is attached to the task and consuming
 	// the output from the task's fifos
-	Task(context.Context, IOAttach) (Task, error)
+	Task(context.Context, cio.Attach) (Task, error)
 	// Image returns the image that the container is based on
 	Image(context.Context) (Image, error)
 	// Labels returns the labels set on the container
@@ -138,7 +139,7 @@
 	return c.client.ContainerService().Delete(ctx, c.id)
 }
 
-func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) {
+func (c *container) Task(ctx context.Context, attach cio.Attach) (Task, error) {
 	return c.loadTask(ctx, attach)
 }
 
@@ -161,11 +162,17 @@
 	}, nil
 }
 
-func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
+func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (_ Task, err error) {
 	i, err := ioCreate(c.id)
 	if err != nil {
 		return nil, err
 	}
+	defer func() {
+		if err != nil && i != nil {
+			i.Cancel()
+			i.Close()
+		}
+	}()
 	cfg := i.Config()
 	request := &tasks.CreateTaskRequest{
 		ContainerID: c.id,
@@ -251,7 +258,7 @@
 	return nil
 }
 
-func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
+func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) {
 	response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
 		ContainerID: c.id,
 	})
@@ -262,7 +269,7 @@
 		}
 		return nil, err
 	}
-	var i IO
+	var i cio.IO
 	if ioAttach != nil {
 		if i, err = attachExistingIO(response, ioAttach); err != nil {
 			return nil, err
@@ -281,9 +288,9 @@
 	return c.client.ContainerService().Get(ctx, c.id)
 }
 
-func attachExistingIO(response *tasks.GetResponse, ioAttach IOAttach) (IO, error) {
+func attachExistingIO(response *tasks.GetResponse, ioAttach cio.Attach) (cio.IO, error) {
 	// get the existing fifo paths from the task information stored by the daemon
-	paths := &FIFOSet{
+	paths := &cio.FIFOSet{
 		Dir: getFifoDir([]string{
 			response.Process.Stdin,
 			response.Process.Stdout,
diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go
index 4c534fe..fb22a90 100644
--- a/vendor/github.com/containerd/containerd/container_opts.go
+++ b/vendor/github.com/containerd/containerd/container_opts.go
@@ -5,10 +5,12 @@
 
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/typeurl"
 	"github.com/gogo/protobuf/types"
 	"github.com/opencontainers/image-spec/identity"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 )
 
@@ -164,3 +166,29 @@
 		return nil
 	}
 }
+
+// WithNewSpec generates a new spec for a new container
+func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		s, err := oci.GenerateSpec(ctx, client, c, opts...)
+		if err != nil {
+			return err
+		}
+		c.Spec, err = typeurl.MarshalAny(s)
+		return err
+	}
+}
+
+// WithSpec sets the provided spec on the container
+func WithSpec(s *specs.Spec, opts ...oci.SpecOpts) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		for _, o := range opts {
+			if err := o(ctx, client, c, s); err != nil {
+				return err
+			}
+		}
+		var err error
+		c.Spec, err = typeurl.MarshalAny(s)
+		return err
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go
index e2cd7c9..b678033 100644
--- a/vendor/github.com/containerd/containerd/container_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/container_opts_unix.go
@@ -6,12 +6,17 @@
 	"context"
 	"encoding/json"
 	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"syscall"
 
 	"github.com/containerd/containerd/api/types"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/platforms"
 	"github.com/gogo/protobuf/proto"
 	protobuf "github.com/gogo/protobuf/types"
@@ -122,3 +127,94 @@
 
 	return &index, nil
 }
+
+// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
+// filesystem to be used by a container with user namespaces
+func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
+	return withRemappedSnapshotBase(id, i, uid, gid, false)
+}
+
+// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
+func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
+	return withRemappedSnapshotBase(id, i, uid, gid, true)
+}
+
+func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		if err != nil {
+			return err
+		}
+
+		setSnapshotterIfEmpty(c)
+
+		var (
+			snapshotter = client.SnapshotService(c.Snapshotter)
+			parent      = identity.ChainID(diffIDs).String()
+			usernsID    = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
+		)
+		if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
+			if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
+				c.SnapshotKey = id
+				c.Image = i.Name()
+				return nil
+			} else if !errdefs.IsNotFound(err) {
+				return err
+			}
+		}
+		mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent)
+		if err != nil {
+			return err
+		}
+		if err := remapRootFS(mounts, uid, gid); err != nil {
+			snapshotter.Remove(ctx, usernsID)
+			return err
+		}
+		if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil {
+			return err
+		}
+		if readonly {
+			_, err = snapshotter.View(ctx, id, usernsID)
+		} else {
+			_, err = snapshotter.Prepare(ctx, id, usernsID)
+		}
+		if err != nil {
+			return err
+		}
+		c.SnapshotKey = id
+		c.Image = i.Name()
+		return nil
+	}
+}
+
+func remapRootFS(mounts []mount.Mount, uid, gid uint32) error {
+	root, err := ioutil.TempDir("", "ctd-remap")
+	if err != nil {
+		return err
+	}
+	defer os.Remove(root)
+	for _, m := range mounts {
+		if err := m.Mount(root); err != nil {
+			return err
+		}
+	}
+	err = filepath.Walk(root, incrementFS(root, uid, gid))
+	if uerr := mount.Unmount(root, 0); err == nil {
+		err = uerr
+	}
+	return err
+}
+
+func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
+	return func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		var (
+			stat = info.Sys().(*syscall.Stat_t)
+			u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc)
+		)
+		// be sure the lchown the path as to not de-reference the symlink to a host file
+		return os.Lchown(path, u, g)
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go
index 32efc6c..86b8536 100644
--- a/vendor/github.com/containerd/containerd/content/helpers.go
+++ b/vendor/github.com/containerd/containerd/content/helpers.go
@@ -2,7 +2,6 @@
 
 import (
 	"context"
-	"fmt"
 	"io"
 	"sync"
 
@@ -11,13 +10,12 @@
 	"github.com/pkg/errors"
 )
 
-var (
-	bufPool = sync.Pool{
-		New: func() interface{} {
-			return make([]byte, 1<<20)
-		},
-	}
-)
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		buffer := make([]byte, 1<<20)
+		return &buffer
+	},
+}
 
 // NewReader returns a io.Reader from a ReaderAt
 func NewReader(ra ReaderAt) io.Reader {
@@ -89,10 +87,10 @@
 		}
 	}
 
-	buf := bufPool.Get().([]byte)
+	buf := bufPool.Get().(*[]byte)
 	defer bufPool.Put(buf)
 
-	if _, err := io.CopyBuffer(cw, r, buf); err != nil {
+	if _, err := io.CopyBuffer(cw, r, *buf); err != nil {
 		return err
 	}
 
@@ -119,7 +117,7 @@
 	if ok {
 		nn, err := seeker.Seek(offset, io.SeekStart)
 		if nn != offset {
-			return nil, fmt.Errorf("failed to seek to offset %v", offset)
+			return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
 		}
 
 		if err != nil {
diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go
index cf5d0c5..9a6c62f 100644
--- a/vendor/github.com/containerd/containerd/content/local/locks.go
+++ b/vendor/github.com/containerd/containerd/content/local/locks.go
@@ -8,7 +8,6 @@
 )
 
 // Handles locking references
-// TODO: use boltdb for lock status
 
 var (
 	// locks lets us lock in process
diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go
index 14a9888..9ff95de 100644
--- a/vendor/github.com/containerd/containerd/content/local/store.go
+++ b/vendor/github.com/containerd/containerd/content/local/store.go
@@ -5,6 +5,7 @@
 	"fmt"
 	"io"
 	"io/ioutil"
+	"math/rand"
 	"os"
 	"path/filepath"
 	"strconv"
@@ -20,13 +21,12 @@
 	"github.com/pkg/errors"
 )
 
-var (
-	bufPool = sync.Pool{
-		New: func() interface{} {
-			return make([]byte, 1<<20)
-		},
-	}
-)
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		buffer := make([]byte, 1<<20)
+		return &buffer
+	},
+}
 
 // LabelStore is used to store mutable labels for digests
 type LabelStore interface {
@@ -62,7 +62,7 @@
 // require labels and should use `NewStore`. `NewLabeledStore` is primarily
 // useful for tests or standalone implementations.
 func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
-	if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
+	if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
 		return nil, err
 	}
 
@@ -219,7 +219,7 @@
 
 		// TODO(stevvooe): There are few more cases with subdirs that should be
 		// handled in case the layout gets corrupted. This isn't strict enough
-		// an may spew bad data.
+		// and may spew bad data.
 
 		if path == root {
 			return nil
@@ -324,12 +324,28 @@
 		return content.Status{}, err
 	}
 
+	startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
+	if err != nil {
+		return content.Status{}, errors.Wrapf(err, "could not read startedat")
+	}
+
+	updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
+	if err != nil {
+		return content.Status{}, errors.Wrapf(err, "could not read updatedat")
+	}
+
+	// because we don't write updatedat on every write, the mod time may
+	// actually be more up to date.
+	if fi.ModTime().After(updatedAt) {
+		updatedAt = fi.ModTime()
+	}
+
 	return content.Status{
 		Ref:       ref,
 		Offset:    fi.Size(),
 		Total:     s.total(ingestPath),
-		UpdatedAt: fi.ModTime(),
-		StartedAt: getStartTime(fi),
+		UpdatedAt: updatedAt,
+		StartedAt: startedAt,
 	}, nil
 }
 
@@ -369,6 +385,37 @@
 //
 // The argument `ref` is used to uniquely identify a long-lived writer transaction.
 func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
+	var lockErr error
+	for count := uint64(0); count < 10; count++ {
+		time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<<count)))
+		if err := tryLock(ref); err != nil {
+			if !errdefs.IsUnavailable(err) {
+				return nil, err
+			}
+
+			lockErr = err
+		} else {
+			lockErr = nil
+			break
+		}
+	}
+
+	if lockErr != nil {
+		return nil, lockErr
+	}
+
+	w, err := s.writer(ctx, ref, total, expected)
+	if err != nil {
+		unlock(ref)
+		return nil, err
+	}
+
+	return w, nil // lock is now held by w.
+}
+
+// writer provides the main implementation of the Writer method. The caller
+// must hold the lock correctly and release on error if there is a problem.
+func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
 	// TODO(stevvooe): Need to actually store expected here. We have
 	// code in the service that shouldn't be dealing with this.
 	if expected != "" {
@@ -380,10 +427,6 @@
 
 	path, refp, data := s.ingestPaths(ref)
 
-	if err := tryLock(ref); err != nil {
-		return nil, errors.Wrapf(err, "locking ref %v failed", ref)
-	}
-
 	var (
 		digester  = digest.Canonical.Digester()
 		offset    int64
@@ -412,17 +455,17 @@
 			return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
 		}
 
-		// slow slow slow!!, send to goroutine or use resumable hashes
+		// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
 		fp, err := os.Open(data)
 		if err != nil {
 			return nil, err
 		}
 		defer fp.Close()
 
-		p := bufPool.Get().([]byte)
+		p := bufPool.Get().(*[]byte)
 		defer bufPool.Put(p)
 
-		offset, err = io.CopyBuffer(digester.Hash(), fp, p)
+		offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
 		if err != nil {
 			return nil, err
 		}
@@ -431,20 +474,28 @@
 		startedAt = status.StartedAt
 		total = status.Total
 	} else {
+		startedAt = time.Now()
+		updatedAt = startedAt
+
 		// the ingest is new, we need to setup the target location.
 		// write the ref to a file for later use
 		if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
 			return nil, err
 		}
 
+		if writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
+			return nil, err
+		}
+
+		if writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
+			return nil, err
+		}
+
 		if total > 0 {
 			if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
 				return nil, err
 			}
 		}
-
-		startedAt = time.Now()
-		updatedAt = startedAt
 	}
 
 	fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
@@ -452,6 +503,10 @@
 		return nil, errors.Wrap(err, "failed to open data file")
 	}
 
+	if _, err := fp.Seek(offset, io.SeekStart); err != nil {
+		return nil, errors.Wrap(err, "could not seek to current write offset")
+	}
+
 	return &writer{
 		s:         s,
 		fp:        fp,
@@ -509,3 +564,30 @@
 	p, err := ioutil.ReadFile(path)
 	return string(p), err
 }
+
+// readFileTimestamp reads a file with just a timestamp present.
+func readFileTimestamp(p string) (time.Time, error) {
+	b, err := ioutil.ReadFile(p)
+	if err != nil {
+		if os.IsNotExist(err) {
+			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
+		}
+		return time.Time{}, err
+	}
+
+	var t time.Time
+	if err := t.UnmarshalText(b); err != nil {
+		return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
+	}
+
+	return t, nil
+}
+
+func writeTimestampFile(p string, t time.Time) error {
+	b, err := t.MarshalText()
+	if err != nil {
+		return err
+	}
+
+	return ioutil.WriteFile(p, b, 0666)
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go
index 0d500b8..c0587e1 100644
--- a/vendor/github.com/containerd/containerd/content/local/store_unix.go
+++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go
@@ -10,15 +10,6 @@
 	"github.com/containerd/containerd/sys"
 )
 
-func getStartTime(fi os.FileInfo) time.Time {
-	if st, ok := fi.Sys().(*syscall.Stat_t); ok {
-		return time.Unix(int64(sys.StatCtime(st).Sec),
-			int64(sys.StatCtime(st).Nsec))
-	}
-
-	return fi.ModTime()
-}
-
 func getATime(fi os.FileInfo) time.Time {
 	if st, ok := fi.Sys().(*syscall.Stat_t); ok {
 		return time.Unix(int64(sys.StatAtime(st).Sec),
diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go
index 5f12ea5..f745aaf 100644
--- a/vendor/github.com/containerd/containerd/content/local/store_windows.go
+++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go
@@ -5,10 +5,6 @@
 	"time"
 )
 
-func getStartTime(fi os.FileInfo) time.Time {
-	return fi.ModTime()
-}
-
 func getATime(fi os.FileInfo) time.Time {
 	return fi.ModTime()
 }
diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go
index 8f1e92d..fbf39f7 100644
--- a/vendor/github.com/containerd/containerd/content/local/writer.go
+++ b/vendor/github.com/containerd/containerd/content/local/writer.go
@@ -152,6 +152,7 @@
 	if w.fp != nil {
 		w.fp.Sync()
 		err = w.fp.Close()
+		writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt)
 		w.fp = nil
 		unlock(w.ref)
 		return
diff --git a/vendor/github.com/containerd/containerd/services/content/reader.go b/vendor/github.com/containerd/containerd/content_reader.go
similarity index 97%
rename from vendor/github.com/containerd/containerd/services/content/reader.go
rename to vendor/github.com/containerd/containerd/content_reader.go
index 024251c..7acd2d3 100644
--- a/vendor/github.com/containerd/containerd/services/content/reader.go
+++ b/vendor/github.com/containerd/containerd/content_reader.go
@@ -1,4 +1,4 @@
-package content
+package containerd
 
 import (
 	"context"
diff --git a/vendor/github.com/containerd/containerd/services/content/store.go b/vendor/github.com/containerd/containerd/content_store.go
similarity index 74%
rename from vendor/github.com/containerd/containerd/services/content/store.go
rename to vendor/github.com/containerd/containerd/content_store.go
index b5aaa85..1b53969 100644
--- a/vendor/github.com/containerd/containerd/services/content/store.go
+++ b/vendor/github.com/containerd/containerd/content_store.go
@@ -1,4 +1,4 @@
-package content
+package containerd
 
 import (
 	"context"
@@ -11,18 +11,18 @@
 	digest "github.com/opencontainers/go-digest"
 )
 
-type remoteStore struct {
+type remoteContent struct {
 	client contentapi.ContentClient
 }
 
-// NewStoreFromClient returns a new content store
-func NewStoreFromClient(client contentapi.ContentClient) content.Store {
-	return &remoteStore{
+// NewContentStoreFromClient returns a new content store
+func NewContentStoreFromClient(client contentapi.ContentClient) content.Store {
+	return &remoteContent{
 		client: client,
 	}
 }
 
-func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
+func (rs *remoteContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
 	resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
 		Digest: dgst,
 	})
@@ -33,7 +33,7 @@
 	return infoFromGRPC(resp.Info), nil
 }
 
-func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
+func (rs *remoteContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
 	session, err := rs.client.List(ctx, &contentapi.ListContentRequest{
 		Filters: filters,
 	})
@@ -61,7 +61,7 @@
 	return nil
 }
 
-func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error {
+func (rs *remoteContent) Delete(ctx context.Context, dgst digest.Digest) error {
 	if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
 		Digest: dgst,
 	}); err != nil {
@@ -71,7 +71,7 @@
 	return nil
 }
 
-func (rs *remoteStore) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
+func (rs *remoteContent) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
 	i, err := rs.Info(ctx, dgst)
 	if err != nil {
 		return nil, err
@@ -85,7 +85,7 @@
 	}, nil
 }
 
-func (rs *remoteStore) Status(ctx context.Context, ref string) (content.Status, error) {
+func (rs *remoteContent) Status(ctx context.Context, ref string) (content.Status, error) {
 	resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
 		Ref: ref,
 	})
@@ -104,7 +104,7 @@
 	}, nil
 }
 
-func (rs *remoteStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
+func (rs *remoteContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
 	resp, err := rs.client.Update(ctx, &contentapi.UpdateRequest{
 		Info: infoToGRPC(info),
 		UpdateMask: &protobuftypes.FieldMask{
@@ -117,7 +117,7 @@
 	return infoFromGRPC(resp.Info), nil
 }
 
-func (rs *remoteStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
+func (rs *remoteContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
 	resp, err := rs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
 		Filters: filters,
 	})
@@ -140,7 +140,7 @@
 	return statuses, nil
 }
 
-func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
+func (rs *remoteContent) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
 	wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
 	if err != nil {
 		return nil, errdefs.FromGRPC(err)
@@ -154,7 +154,7 @@
 }
 
 // Abort implements asynchronous abort. It starts a new write session on the ref l
-func (rs *remoteStore) Abort(ctx context.Context, ref string) error {
+func (rs *remoteContent) Abort(ctx context.Context, ref string) error {
 	if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
 		Ref: ref,
 	}); err != nil {
@@ -164,7 +164,7 @@
 	return nil
 }
 
-func (rs *remoteStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
+func (rs *remoteContent) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
 	wrclient, err := rs.client.Write(ctx)
 	if err != nil {
 		return nil, 0, err
diff --git a/vendor/github.com/containerd/containerd/services/content/writer.go b/vendor/github.com/containerd/containerd/content_writer.go
similarity index 95%
rename from vendor/github.com/containerd/containerd/services/content/writer.go
rename to vendor/github.com/containerd/containerd/content_writer.go
index cb45957..b18f351 100644
--- a/vendor/github.com/containerd/containerd/services/content/writer.go
+++ b/vendor/github.com/containerd/containerd/content_writer.go
@@ -1,4 +1,4 @@
-package content
+package containerd
 
 import (
 	"context"
@@ -41,7 +41,7 @@
 		Action: contentapi.WriteActionStat,
 	})
 	if err != nil {
-		return content.Status{}, err
+		return content.Status{}, errors.Wrap(err, "error getting writer status")
 	}
 
 	return content.Status{
diff --git a/vendor/github.com/containerd/containerd/services/diff/client.go b/vendor/github.com/containerd/containerd/diff.go
similarity index 84%
rename from vendor/github.com/containerd/containerd/services/diff/client.go
rename to vendor/github.com/containerd/containerd/diff.go
index d34848b..4e47efa 100644
--- a/vendor/github.com/containerd/containerd/services/diff/client.go
+++ b/vendor/github.com/containerd/containerd/diff.go
@@ -1,4 +1,4 @@
-package diff
+package containerd
 
 import (
 	diffapi "github.com/containerd/containerd/api/services/diff/v1"
@@ -12,16 +12,16 @@
 // NewDiffServiceFromClient returns a new diff service which communicates
 // over a GRPC connection.
 func NewDiffServiceFromClient(client diffapi.DiffClient) diff.Differ {
-	return &remote{
+	return &diffRemote{
 		client: client,
 	}
 }
 
-type remote struct {
+type diffRemote struct {
 	client diffapi.DiffClient
 }
 
-func (r *remote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
+func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
 	req := &diffapi.ApplyRequest{
 		Diff:   fromDescriptor(diff),
 		Mounts: fromMounts(mounts),
@@ -33,7 +33,7 @@
 	return toDescriptor(resp.Applied), nil
 }
 
-func (r *remote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) {
+func (r *diffRemote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) {
 	var config diff.Config
 	for _, opt := range opts {
 		if err := opt(&config); err != nil {
diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go
index 2aa2e11..6a3bbca 100644
--- a/vendor/github.com/containerd/containerd/errdefs/grpc.go
+++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go
@@ -4,7 +4,6 @@
 	"strings"
 
 	"github.com/pkg/errors"
-	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -61,7 +60,7 @@
 
 	var cls error // divide these into error classes, becomes the cause
 
-	switch grpc.Code(err) {
+	switch code(err) {
 	case codes.InvalidArgument:
 		cls = ErrInvalidArgument
 	case codes.AlreadyExists:
@@ -94,7 +93,7 @@
 // Effectively, we just remove the string of cls from the end of err if it
 // appears there.
 func rebaseMessage(cls error, err error) string {
-	desc := grpc.ErrorDesc(err)
+	desc := errDesc(err)
 	clss := cls.Error()
 	if desc == clss {
 		return ""
@@ -107,3 +106,17 @@
 	_, ok := status.FromError(err)
 	return ok
 }
+
+func code(err error) codes.Code {
+	if s, ok := status.FromError(err); ok {
+		return s.Code()
+	}
+	return codes.Unknown
+}
+
+func errDesc(err error) string {
+	if s, ok := status.FromError(err); ok {
+		return s.Message()
+	}
+	return err.Error()
+}
diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go
index efe2f59..2bfedb1 100644
--- a/vendor/github.com/containerd/containerd/events/events.go
+++ b/vendor/github.com/containerd/containerd/events/events.go
@@ -2,10 +2,50 @@
 
 import (
 	"context"
+	"time"
 
-	events "github.com/containerd/containerd/api/services/events/v1"
+	"github.com/containerd/typeurl"
+	"github.com/gogo/protobuf/types"
 )
 
+// Envelope provides the packaging for an event.
+type Envelope struct {
+	Timestamp time.Time
+	Namespace string
+	Topic     string
+	Event     *types.Any
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (e *Envelope) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: timestamp
+	case "namespace":
+		return string(e.Namespace), len(e.Namespace) > 0
+	case "topic":
+		return string(e.Topic), len(e.Topic) > 0
+	case "event":
+		decoded, err := typeurl.UnmarshalAny(e.Event)
+		if err != nil {
+			return "", false
+		}
+
+		adaptor, ok := decoded.(interface {
+			Field([]string) (string, bool)
+		})
+		if !ok {
+			return "", false
+		}
+		return adaptor.Field(fieldpath[1:])
+	}
+	return "", false
+}
+
 // Event is a generic interface for any type of event
 type Event interface{}
 
@@ -16,16 +56,10 @@
 
 // Forwarder forwards an event to the underlying event bus
 type Forwarder interface {
-	Forward(ctx context.Context, envelope *events.Envelope) error
-}
-
-type publisherFunc func(ctx context.Context, topic string, event Event) error
-
-func (fn publisherFunc) Publish(ctx context.Context, topic string, event Event) error {
-	return fn(ctx, topic, event)
+	Forward(ctx context.Context, envelope *Envelope) error
 }
 
 // Subscriber allows callers to subscribe to events
 type Subscriber interface {
-	Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error)
+	Subscribe(ctx context.Context, filters ...string) (ch <-chan *Envelope, errs <-chan error)
 }
diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go
index 3fefb9c..3178fc4 100644
--- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go
+++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go
@@ -5,7 +5,6 @@
 	"strings"
 	"time"
 
-	v1 "github.com/containerd/containerd/api/services/events/v1"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/filters"
@@ -31,11 +30,15 @@
 	}
 }
 
+var _ events.Publisher = &Exchange{}
+var _ events.Forwarder = &Exchange{}
+var _ events.Subscriber = &Exchange{}
+
 // Forward accepts an envelope to be direcly distributed on the exchange.
 //
 // This is useful when an event is forwaded on behalf of another namespace or
 // when the event is propagated on behalf of another publisher.
-func (e *Exchange) Forward(ctx context.Context, envelope *v1.Envelope) (err error) {
+func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
 	if err := validateEnvelope(envelope); err != nil {
 		return err
 	}
@@ -64,7 +67,7 @@
 	var (
 		namespace string
 		encoded   *types.Any
-		envelope  v1.Envelope
+		envelope  events.Envelope
 	)
 
 	namespace, err = namespaces.NamespaceRequired(ctx)
@@ -109,9 +112,9 @@
 // Zero or more filters may be provided as strings. Only events that match
 // *any* of the provided filters will be sent on the channel. The filters use
 // the standard containerd filters package syntax.
-func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *v1.Envelope, errs <-chan error) {
+func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) {
 	var (
-		evch                  = make(chan *v1.Envelope)
+		evch                  = make(chan *events.Envelope)
 		errq                  = make(chan error, 1)
 		channel               = goevents.NewChannel(0)
 		queue                 = goevents.NewQueue(channel)
@@ -151,7 +154,7 @@
 		for {
 			select {
 			case ev := <-channel.C:
-				env, ok := ev.(*v1.Envelope)
+				env, ok := ev.(*events.Envelope)
 				if !ok {
 					// TODO(stevvooe): For the most part, we are well protected
 					// from this condition. Both Forward and Publish protect
@@ -205,7 +208,7 @@
 	return nil
 }
 
-func validateEnvelope(envelope *v1.Envelope) error {
+func validateEnvelope(envelope *events.Envelope) error {
 	if err := namespaces.Validate(envelope.Namespace); err != nil {
 		return errors.Wrapf(err, "event envelope has invalid namespace")
 	}
diff --git a/vendor/github.com/containerd/containerd/export.go b/vendor/github.com/containerd/containerd/export.go
deleted file mode 100644
index 76bebe3..0000000
--- a/vendor/github.com/containerd/containerd/export.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package containerd
-
-import (
-	"archive/tar"
-	"context"
-	"encoding/json"
-	"io"
-	"sort"
-
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/platforms"
-	ocispecs "github.com/opencontainers/image-spec/specs-go"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
-)
-
-func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
-	tw := tar.NewWriter(writer)
-	defer tw.Close()
-
-	records := []tarRecord{
-		ociLayoutFile(""),
-		ociIndexRecord(desc),
-	}
-
-	cs := c.ContentStore()
-	algorithms := map[string]struct{}{}
-	exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
-		records = append(records, blobRecord(cs, desc))
-		algorithms[desc.Digest.Algorithm().String()] = struct{}{}
-		return nil, nil
-	}
-
-	handlers := images.Handlers(
-		images.ChildrenHandler(cs, platforms.Default()),
-		images.HandlerFunc(exportHandler),
-	)
-
-	// Walk sequentially since the number of fetchs is likely one and doing in
-	// parallel requires locking the export handler
-	if err := images.Walk(ctx, handlers, desc); err != nil {
-		return err
-	}
-
-	if len(algorithms) > 0 {
-		records = append(records, directoryRecord("blobs/", 0755))
-		for alg := range algorithms {
-			records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
-		}
-	}
-
-	return writeTar(ctx, tw, records)
-}
-
-type tarRecord struct {
-	Header *tar.Header
-	CopyTo func(context.Context, io.Writer) (int64, error)
-}
-
-func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
-	path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     path,
-			Mode:     0444,
-			Size:     desc.Size,
-			Typeflag: tar.TypeReg,
-		},
-		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
-			r, err := cs.ReaderAt(ctx, desc.Digest)
-			if err != nil {
-				return 0, err
-			}
-			defer r.Close()
-
-			// Verify digest
-			dgstr := desc.Digest.Algorithm().Digester()
-
-			n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
-			if err != nil {
-				return 0, err
-			}
-			if dgstr.Digest() != desc.Digest {
-				return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
-			}
-			return n, nil
-		},
-	}
-}
-
-func directoryRecord(name string, mode int64) tarRecord {
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     name,
-			Mode:     mode,
-			Typeflag: tar.TypeDir,
-		},
-	}
-}
-
-func ociLayoutFile(version string) tarRecord {
-	if version == "" {
-		version = ocispec.ImageLayoutVersion
-	}
-	layout := ocispec.ImageLayout{
-		Version: version,
-	}
-
-	b, err := json.Marshal(layout)
-	if err != nil {
-		panic(err)
-	}
-
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     ocispec.ImageLayoutFile,
-			Mode:     0444,
-			Size:     int64(len(b)),
-			Typeflag: tar.TypeReg,
-		},
-		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
-			n, err := w.Write(b)
-			return int64(n), err
-		},
-	}
-
-}
-
-func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
-	index := ocispec.Index{
-		Versioned: ocispecs.Versioned{
-			SchemaVersion: 2,
-		},
-		Manifests: manifests,
-	}
-
-	b, err := json.Marshal(index)
-	if err != nil {
-		panic(err)
-	}
-
-	return tarRecord{
-		Header: &tar.Header{
-			Name:     "index.json",
-			Mode:     0644,
-			Size:     int64(len(b)),
-			Typeflag: tar.TypeReg,
-		},
-		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
-			n, err := w.Write(b)
-			return int64(n), err
-		},
-	}
-}
-
-func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
-	sort.Sort(tarRecordsByName(records))
-
-	for _, record := range records {
-		if err := tw.WriteHeader(record.Header); err != nil {
-			return err
-		}
-		if record.CopyTo != nil {
-			n, err := record.CopyTo(ctx, tw)
-			if err != nil {
-				return err
-			}
-			if n != record.Header.Size {
-				return errors.Errorf("unexpected copy size for %s", record.Header.Name)
-			}
-		} else if record.Header.Size > 0 {
-			return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
-		}
-	}
-	return nil
-}
-
-type tarRecordsByName []tarRecord
-
-func (t tarRecordsByName) Len() int {
-	return len(t)
-}
-func (t tarRecordsByName) Swap(i, j int) {
-	t[i], t[j] = t[j], t[i]
-}
-func (t tarRecordsByName) Less(i, j int) bool {
-	return t[i].Header.Name < t[j].Header.Name
-}
diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go
index c9b0984..c765ea0 100644
--- a/vendor/github.com/containerd/containerd/filters/parser.go
+++ b/vendor/github.com/containerd/containerd/filters/parser.go
@@ -3,7 +3,6 @@
 import (
 	"fmt"
 	"io"
-	"strconv"
 
 	"github.com/containerd/containerd/errdefs"
 	"github.com/pkg/errors"
@@ -134,7 +133,12 @@
 		return selector{}, err
 	}
 
-	value, err := p.value()
+	var allowAltQuotes bool
+	if op == operatorMatches {
+		allowAltQuotes = true
+	}
+
+	value, err := p.value(allowAltQuotes)
 	if err != nil {
 		if err == io.EOF {
 			return selector{}, io.ErrUnexpectedEOF
@@ -188,7 +192,7 @@
 	case tokenField:
 		return s, nil
 	case tokenQuoted:
-		return p.unquote(pos, s)
+		return p.unquote(pos, s, false)
 	}
 
 	return "", p.mkerr(pos, "expected field or quoted")
@@ -213,21 +217,25 @@
 	return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
 }
 
-func (p *parser) value() (string, error) {
+func (p *parser) value(allowAltQuotes bool) (string, error) {
 	pos, tok, s := p.scanner.scan()
 
 	switch tok {
 	case tokenValue, tokenField:
 		return s, nil
 	case tokenQuoted:
-		return p.unquote(pos, s)
+		return p.unquote(pos, s, allowAltQuotes)
 	}
 
 	return "", p.mkerr(pos, "expected value or quoted")
 }
 
-func (p *parser) unquote(pos int, s string) (string, error) {
-	uq, err := strconv.Unquote(s)
+func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
+	if !allowAlts && s[0] != '\'' && s[0] != '"' {
+		return "", p.mkerr(pos, "invalid quote encountered")
+	}
+
+	uq, err := unquote(s)
 	if err != nil {
 		return "", p.mkerr(pos, "unquoting failed: %v", err)
 	}
diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go
new file mode 100644
index 0000000..08698e1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/quote.go
@@ -0,0 +1,237 @@
+package filters
+
+import (
+	"unicode/utf8"
+
+	"github.com/pkg/errors"
+)
+
+// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
+// strconv package and modified to be able to handle quoting with `/` and `|`
+// as delimiters.  The copyright is held by the Go authors.
+
+var errQuoteSyntax = errors.New("quote syntax error")
+
+// UnquoteChar decodes the first character or byte in the escaped string
+// or character literal represented by the string s.
+// It returns four values:
+//
+//	1) value, the decoded Unicode code point or byte value;
+//	2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+//	3) tail, the remainder of the string after the character; and
+//	4) an error that will be nil if the character is syntactically valid.
+//
+// The second argument, quote, specifies the type of literal being parsed
+// and therefore which escaped quote character is permitted.
+// If set to a single quote, it permits the sequence \' and disallows unescaped '.
+// If set to a double quote, it permits \" and disallows unescaped ".
+// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
+//
+// This is from Go strconv package, modified to support `|` and `/` as double
+// quotes for use with regular expressions.
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+	// easy cases
+	switch c := s[0]; {
+	case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
+		err = errQuoteSyntax
+		return
+	case c >= utf8.RuneSelf:
+		r, size := utf8.DecodeRuneInString(s)
+		return r, true, s[size:], nil
+	case c != '\\':
+		return rune(s[0]), false, s[1:], nil
+	}
+
+	// hard case: c is backslash
+	if len(s) <= 1 {
+		err = errQuoteSyntax
+		return
+	}
+	c := s[1]
+	s = s[2:]
+
+	switch c {
+	case 'a':
+		value = '\a'
+	case 'b':
+		value = '\b'
+	case 'f':
+		value = '\f'
+	case 'n':
+		value = '\n'
+	case 'r':
+		value = '\r'
+	case 't':
+		value = '\t'
+	case 'v':
+		value = '\v'
+	case 'x', 'u', 'U':
+		n := 0
+		switch c {
+		case 'x':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		var v rune
+		if len(s) < n {
+			err = errQuoteSyntax
+			return
+		}
+		for j := 0; j < n; j++ {
+			x, ok := unhex(s[j])
+			if !ok {
+				err = errQuoteSyntax
+				return
+			}
+			v = v<<4 | x
+		}
+		s = s[n:]
+		if c == 'x' {
+			// single-byte string, possibly not UTF-8
+			value = v
+			break
+		}
+		if v > utf8.MaxRune {
+			err = errQuoteSyntax
+			return
+		}
+		value = v
+		multibyte = true
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		v := rune(c) - '0'
+		if len(s) < 2 {
+			err = errQuoteSyntax
+			return
+		}
+		for j := 0; j < 2; j++ { // one digit already; two more
+			x := rune(s[j]) - '0'
+			if x < 0 || x > 7 {
+				err = errQuoteSyntax
+				return
+			}
+			v = (v << 3) | x
+		}
+		s = s[2:]
+		if v > 255 {
+			err = errQuoteSyntax
+			return
+		}
+		value = v
+	case '\\':
+		value = '\\'
+	case '\'', '"', '|', '/':
+		if c != quote {
+			err = errQuoteSyntax
+			return
+		}
+		value = rune(c)
+	default:
+		err = errQuoteSyntax
+		return
+	}
+	tail = s
+	return
+}
+
+// unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes.  (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+//
+// This is modified from the standard library to support `|` and `/` as quote
+// characters for use with regular expressions.
+func unquote(s string) (string, error) {
+	n := len(s)
+	if n < 2 {
+		return "", errQuoteSyntax
+	}
+	quote := s[0]
+	if quote != s[n-1] {
+		return "", errQuoteSyntax
+	}
+	s = s[1 : n-1]
+
+	if quote == '`' {
+		if contains(s, '`') {
+			return "", errQuoteSyntax
+		}
+		if contains(s, '\r') {
+			// -1 because we know there is at least one \r to remove.
+			buf := make([]byte, 0, len(s)-1)
+			for i := 0; i < len(s); i++ {
+				if s[i] != '\r' {
+					buf = append(buf, s[i])
+				}
+			}
+			return string(buf), nil
+		}
+		return s, nil
+	}
+	if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
+		return "", errQuoteSyntax
+	}
+	if contains(s, '\n') {
+		return "", errQuoteSyntax
+	}
+
+	// Is it trivial?  Avoid allocation.
+	if !contains(s, '\\') && !contains(s, quote) {
+		switch quote {
+		case '"', '/', '|': // pipe and slash are treated like double quote
+			return s, nil
+		case '\'':
+			r, size := utf8.DecodeRuneInString(s)
+			if size == len(s) && (r != utf8.RuneError || size != 1) {
+				return s, nil
+			}
+		}
+	}
+
+	var runeTmp [utf8.UTFMax]byte
+	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+	for len(s) > 0 {
+		c, multibyte, ss, err := unquoteChar(s, quote)
+		if err != nil {
+			return "", err
+		}
+		s = ss
+		if c < utf8.RuneSelf || !multibyte {
+			buf = append(buf, byte(c))
+		} else {
+			n := utf8.EncodeRune(runeTmp[:], c)
+			buf = append(buf, runeTmp[:n]...)
+		}
+		if quote == '\'' && len(s) != 0 {
+			// single-quoted must be single character
+			return "", errQuoteSyntax
+		}
+	}
+	return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+	c := rune(b)
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0', true
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10, true
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10, true
+	}
+	return
+}
diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go
index 5a55e0a..3a8e723 100644
--- a/vendor/github.com/containerd/containerd/filters/scanner.go
+++ b/vendor/github.com/containerd/containerd/filters/scanner.go
@@ -87,7 +87,7 @@
 	return ch
 }
 
-func (s *scanner) scan() (int, token, string) {
+func (s *scanner) scan() (nextp int, tk token, text string) {
 	var (
 		ch  = s.next()
 		pos = s.pos
@@ -101,6 +101,7 @@
 		s.scanQuoted(ch)
 		return pos, tokenQuoted, s.input[pos:s.ppos]
 	case isSeparatorRune(ch):
+		s.value = false
 		return pos, tokenSeparator, s.input[pos:s.ppos]
 	case isOperatorRune(ch):
 		s.scanOperator()
@@ -241,7 +242,7 @@
 
 func isQuoteRune(r rune) bool {
 	switch r {
-	case '"': // maybe add single quoting?
+	case '/', '|', '"': // maybe add single quoting?
 		return true
 	}
 
diff --git a/vendor/github.com/containerd/containerd/fs/copy.go b/vendor/github.com/containerd/containerd/fs/copy.go
index 0d11fa5..e8f4528 100644
--- a/vendor/github.com/containerd/containerd/fs/copy.go
+++ b/vendor/github.com/containerd/containerd/fs/copy.go
@@ -9,13 +9,12 @@
 	"github.com/pkg/errors"
 )
 
-var (
-	bufferPool = &sync.Pool{
-		New: func() interface{} {
-			return make([]byte, 32*1024)
-		},
-	}
-)
+var bufferPool = &sync.Pool{
+	New: func() interface{} {
+		buffer := make([]byte, 32*1024)
+		return &buffer
+	},
+}
 
 // CopyDir copies the directory from src to dst.
 // Most efficient copy of files is attempted.
diff --git a/vendor/github.com/containerd/containerd/fs/copy_linux.go b/vendor/github.com/containerd/containerd/fs/copy_linux.go
index efe4753..c1fb2d1 100644
--- a/vendor/github.com/containerd/containerd/fs/copy_linux.go
+++ b/vendor/github.com/containerd/containerd/fs/copy_linux.go
@@ -43,8 +43,8 @@
 			return errors.Wrap(err, "copy file range failed")
 		}
 
-		buf := bufferPool.Get().([]byte)
-		_, err = io.CopyBuffer(dst, src, buf)
+		buf := bufferPool.Get().(*[]byte)
+		_, err = io.CopyBuffer(dst, src, *buf)
 		bufferPool.Put(buf)
 		return err
 	}
diff --git a/vendor/github.com/containerd/containerd/fs/copy_unix.go b/vendor/github.com/containerd/containerd/fs/copy_unix.go
index 6234f3d..b31a14f 100644
--- a/vendor/github.com/containerd/containerd/fs/copy_unix.go
+++ b/vendor/github.com/containerd/containerd/fs/copy_unix.go
@@ -34,8 +34,8 @@
 }
 
 func copyFileContent(dst, src *os.File) error {
-	buf := bufferPool.Get().([]byte)
-	_, err := io.CopyBuffer(dst, src, buf)
+	buf := bufferPool.Get().(*[]byte)
+	_, err := io.CopyBuffer(dst, src, *buf)
 	bufferPool.Put(buf)
 
 	return err
diff --git a/vendor/github.com/containerd/containerd/fs/copy_windows.go b/vendor/github.com/containerd/containerd/fs/copy_windows.go
index fb4933c..6fb3de5 100644
--- a/vendor/github.com/containerd/containerd/fs/copy_windows.go
+++ b/vendor/github.com/containerd/containerd/fs/copy_windows.go
@@ -18,8 +18,8 @@
 }
 
 func copyFileContent(dst, src *os.File) error {
-	buf := bufferPool.Get().([]byte)
-	_, err := io.CopyBuffer(dst, src, buf)
+	buf := bufferPool.Get().(*[]byte)
+	_, err := io.CopyBuffer(dst, src, *buf)
 	bufferPool.Put(buf)
 	return err
 }
diff --git a/vendor/github.com/containerd/containerd/fs/diff.go b/vendor/github.com/containerd/containerd/fs/diff.go
index 9073d0d..3a53f42 100644
--- a/vendor/github.com/containerd/containerd/fs/diff.go
+++ b/vendor/github.com/containerd/containerd/fs/diff.go
@@ -222,8 +222,10 @@
 		c1 = make(chan *currentPath)
 		c2 = make(chan *currentPath)
 
-		f1, f2 *currentPath
-		rmdir  string
+		f1, f2         *currentPath
+		rmdir          string
+		lastEmittedDir = string(filepath.Separator)
+		parents        []os.FileInfo
 	)
 	g.Go(func() error {
 		defer close(c1)
@@ -258,7 +260,10 @@
 				continue
 			}
 
-			var f os.FileInfo
+			var (
+				f    os.FileInfo
+				emit = true
+			)
 			k, p := pathChange(f1, f2)
 			switch k {
 			case ChangeKindAdd:
@@ -294,13 +299,35 @@
 				f2 = nil
 				if same {
 					if !isLinked(f) {
-						continue
+						emit = false
 					}
 					k = ChangeKindUnmodified
 				}
 			}
-			if err := changeFn(k, p, f, nil); err != nil {
-				return err
+			if emit {
+				emittedDir, emitParents := commonParents(lastEmittedDir, p, parents)
+				for _, pf := range emitParents {
+					p := filepath.Join(emittedDir, pf.Name())
+					if err := changeFn(ChangeKindUnmodified, p, pf, nil); err != nil {
+						return err
+					}
+					emittedDir = p
+				}
+
+				if err := changeFn(k, p, f, nil); err != nil {
+					return err
+				}
+
+				if f != nil && f.IsDir() {
+					lastEmittedDir = p
+				} else {
+					lastEmittedDir = emittedDir
+				}
+
+				parents = parents[:0]
+			} else if f.IsDir() {
+				lastEmittedDir, parents = commonParents(lastEmittedDir, p, parents)
+				parents = append(parents, f)
 			}
 		}
 		return nil
@@ -308,3 +335,47 @@
 
 	return g.Wait()
 }
+
+func commonParents(base, updated string, dirs []os.FileInfo) (string, []os.FileInfo) {
+	if basePrefix := makePrefix(base); strings.HasPrefix(updated, basePrefix) {
+		var (
+			parents []os.FileInfo
+			last    = base
+		)
+		for _, d := range dirs {
+			next := filepath.Join(last, d.Name())
+			if strings.HasPrefix(updated, makePrefix(last)) {
+				parents = append(parents, d)
+				last = next
+			} else {
+				break
+			}
+		}
+		return base, parents
+	}
+
+	baseS := strings.Split(base, string(filepath.Separator))
+	updatedS := strings.Split(updated, string(filepath.Separator))
+	commonS := []string{string(filepath.Separator)}
+
+	min := len(baseS)
+	if len(updatedS) < min {
+		min = len(updatedS)
+	}
+	for i := 0; i < min; i++ {
+		if baseS[i] == updatedS[i] {
+			commonS = append(commonS, baseS[i])
+		} else {
+			break
+		}
+	}
+
+	return filepath.Join(commonS...), []os.FileInfo{}
+}
+
+func makePrefix(d string) string {
+	if d == "" || d[len(d)-1] != filepath.Separator {
+		return d + string(filepath.Separator)
+	}
+	return d
+}
diff --git a/vendor/github.com/containerd/containerd/fs/diff_unix.go b/vendor/github.com/containerd/containerd/fs/diff_unix.go
index 36a0f3f..3751814 100644
--- a/vendor/github.com/containerd/containerd/fs/diff_unix.go
+++ b/vendor/github.com/containerd/containerd/fs/diff_unix.go
@@ -5,36 +5,12 @@
 import (
 	"bytes"
 	"os"
-	"path/filepath"
-	"strings"
 	"syscall"
 
 	"github.com/containerd/continuity/sysx"
 	"github.com/pkg/errors"
 )
 
-// whiteouts are files with a special meaning for the layered filesystem.
-// Docker uses AUFS whiteout files inside exported archives. In other
-// filesystems these files are generated/handled on tar creation/extraction.
-
-// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
-// filename this means that file has been removed from the base layer.
-const whiteoutPrefix = ".wh."
-
-// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
-// for removing an actual file. Normally these files are excluded from exported
-// archives.
-const whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
-
-// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
-// layers. Normally these should not go into exported archives and all changed
-// hardlinks should be copied to the top layer.
-const whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
-
-// whiteoutOpaqueDir file means directory has been made opaque - meaning
-// readdir calls to this directory do not follow to lower layers.
-const whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
-
 // detectDirDiff returns diff dir options if a directory could
 // be found in the mount info for upper which is the direct
 // diff with the provided lower directory
@@ -45,26 +21,6 @@
 	return nil
 }
 
-func aufsMetadataSkip(path string) (skip bool, err error) {
-	skip, err = filepath.Match(string(os.PathSeparator)+whiteoutMetaPrefix+"*", path)
-	if err != nil {
-		skip = true
-	}
-	return
-}
-
-func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
-	f := filepath.Base(path)
-
-	// If there is a whiteout, then the file was removed
-	if strings.HasPrefix(f, whiteoutPrefix) {
-		originalFile := f[len(whiteoutPrefix):]
-		return filepath.Join(filepath.Dir(path), originalFile), nil
-	}
-
-	return "", nil
-}
-
 // compareSysStat returns whether the stats are equivalent,
 // whether the files are considered the same file, and
 // an error
diff --git a/vendor/github.com/containerd/containerd/fs/diff_windows.go b/vendor/github.com/containerd/containerd/fs/diff_windows.go
index 7bbd662..8eed365 100644
--- a/vendor/github.com/containerd/containerd/fs/diff_windows.go
+++ b/vendor/github.com/containerd/containerd/fs/diff_windows.go
@@ -1,14 +1,25 @@
 package fs
 
-import "os"
+import (
+	"os"
+
+	"golang.org/x/sys/windows"
+)
 
 func detectDirDiff(upper, lower string) *diffDirOptions {
 	return nil
 }
 
 func compareSysStat(s1, s2 interface{}) (bool, error) {
-	// TODO: Use windows specific sys type
-	return false, nil
+	f1, ok := s1.(windows.Win32FileAttributeData)
+	if !ok {
+		return false, nil
+	}
+	f2, ok := s2.(windows.Win32FileAttributeData)
+	if !ok {
+		return false, nil
+	}
+	return f1.FileAttributes == f2.FileAttributes, nil
 }
 
 func compareCapabilities(p1, p2 string) (bool, error) {
diff --git a/vendor/github.com/containerd/containerd/fs/du.go b/vendor/github.com/containerd/containerd/fs/du.go
index 61f439d..26f5333 100644
--- a/vendor/github.com/containerd/containerd/fs/du.go
+++ b/vendor/github.com/containerd/containerd/fs/du.go
@@ -1,5 +1,7 @@
 package fs
 
+import "context"
+
 // Usage of disk information
 type Usage struct {
 	Inodes int64
@@ -11,3 +13,10 @@
 func DiskUsage(roots ...string) (Usage, error) {
 	return diskUsage(roots...)
 }
+
+// DiffUsage counts the numbers of inodes and disk usage in the
+// diff between the 2 directories. The first path is intended
+// as the base directory and the second as the changed directory.
+func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
+	return diffUsage(ctx, a, b)
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_unix.go b/vendor/github.com/containerd/containerd/fs/du_unix.go
index d8654d3..6328e80 100644
--- a/vendor/github.com/containerd/containerd/fs/du_unix.go
+++ b/vendor/github.com/containerd/containerd/fs/du_unix.go
@@ -3,17 +3,19 @@
 package fs
 
 import (
+	"context"
 	"os"
 	"path/filepath"
 	"syscall"
 )
 
+type inode struct {
+	// TODO(stevvooe): Can probably reduce memory usage by not tracking
+	// device, but we can leave this right for now.
+	dev, ino uint64
+}
+
 func diskUsage(roots ...string) (Usage, error) {
-	type inode struct {
-		// TODO(stevvooe): Can probably reduce memory usage by not tracking
-		// device, but we can leave this right for now.
-		dev, ino uint64
-	}
 
 	var (
 		size   int64
@@ -45,3 +47,37 @@
 		Size:   size,
 	}, nil
 }
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+	var (
+		size   int64
+		inodes = map[inode]struct{}{} // expensive!
+	)
+
+	if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if kind == ChangeKindAdd || kind == ChangeKindModify {
+			stat := fi.Sys().(*syscall.Stat_t)
+
+			inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
+			if _, ok := inodes[inoKey]; !ok {
+				inodes[inoKey] = struct{}{}
+				size += fi.Size()
+			}
+
+			return nil
+
+		}
+		return nil
+	}); err != nil {
+		return Usage{}, err
+	}
+
+	return Usage{
+		Inodes: int64(len(inodes)),
+		Size:   size,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_windows.go b/vendor/github.com/containerd/containerd/fs/du_windows.go
index 4a0363c..3f852fc 100644
--- a/vendor/github.com/containerd/containerd/fs/du_windows.go
+++ b/vendor/github.com/containerd/containerd/fs/du_windows.go
@@ -3,6 +3,7 @@
 package fs
 
 import (
+	"context"
 	"os"
 	"path/filepath"
 )
@@ -31,3 +32,29 @@
 		Size: size,
 	}, nil
 }
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+	var (
+		size int64
+	)
+
+	if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if kind == ChangeKindAdd || kind == ChangeKindModify {
+			size += fi.Size()
+
+			return nil
+
+		}
+		return nil
+	}); err != nil {
+		return Usage{}, err
+	}
+
+	return Usage{
+		Size: size,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/path.go b/vendor/github.com/containerd/containerd/fs/path.go
index 644b1ee..412da67 100644
--- a/vendor/github.com/containerd/containerd/fs/path.go
+++ b/vendor/github.com/containerd/containerd/fs/path.go
@@ -74,11 +74,14 @@
 		// If the timestamp may have been truncated in one of the
 		// files, check content of file to determine difference
 		if t1.Nanosecond() == 0 || t2.Nanosecond() == 0 {
-			if f1.f.Size() > 0 {
-				eq, err := compareFileContent(f1.fullPath, f2.fullPath)
-				if err != nil || !eq {
-					return eq, err
-				}
+			var eq bool
+			if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink {
+				eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath)
+			} else if f1.f.Size() > 0 {
+				eq, err = compareFileContent(f1.fullPath, f2.fullPath)
+			}
+			if err != nil || !eq {
+				return eq, err
 			}
 		} else if t1.Nanosecond() != t2.Nanosecond() {
 			return false, nil
@@ -88,6 +91,18 @@
 	return true, nil
 }
 
+func compareSymlinkTarget(p1, p2 string) (bool, error) {
+	t1, err := os.Readlink(p1)
+	if err != nil {
+		return false, err
+	}
+	t2, err := os.Readlink(p2)
+	if err != nil {
+		return false, err
+	}
+	return t1 == t2, nil
+}
+
 const compareChuckSize = 32 * 1024
 
 // compareFileContent compares the content of 2 same sized files
diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go
index 70838a7..66898c5 100644
--- a/vendor/github.com/containerd/containerd/gc/gc.go
+++ b/vendor/github.com/containerd/containerd/gc/gc.go
@@ -36,7 +36,7 @@
 	var (
 		grays     []Node                // maintain a gray "stack"
 		seen      = map[Node]struct{}{} // or not "white", basically "seen"
-		reachable = map[Node]struct{}{} // or "block", in tri-color parlance
+		reachable = map[Node]struct{}{} // or "black", in tri-color parlance
 	)
 
 	grays = append(grays, roots...)
diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go
index 4f978e2..6e9f4bd 100644
--- a/vendor/github.com/containerd/containerd/image.go
+++ b/vendor/github.com/containerd/containerd/image.go
@@ -9,7 +9,7 @@
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/rootfs"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	digest "github.com/opencontainers/go-digest"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@@ -32,6 +32,8 @@
 	Config(ctx context.Context) (ocispec.Descriptor, error)
 	// IsUnpacked returns whether or not an image is unpacked.
 	IsUnpacked(context.Context, string) (bool, error)
+	// ContentStore provides a content store which contains image blob data
+	ContentStore() content.Store
 }
 
 var _ = (Image)(&image{})
@@ -86,6 +88,12 @@
 }
 
 func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
+	ctx, done, err := i.client.WithLease(ctx)
+	if err != nil {
+		return err
+	}
+	defer done()
+
 	layers, err := i.getLayers(ctx, platforms.Default())
 	if err != nil {
 		return err
@@ -104,7 +112,7 @@
 			"containerd.io/uncompressed": layer.Diff.Digest.String(),
 		}
 
-		unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a, snapshot.WithLabels(labels))
+		unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a, snapshots.WithLabels(labels))
 		if err != nil {
 			return err
 		}
@@ -139,7 +147,7 @@
 
 	manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
 	if err != nil {
-		return nil, errors.Wrap(err, "")
+		return nil, err
 	}
 
 	diffIDs, err := i.i.RootFS(ctx, cs, platform)
@@ -160,3 +168,7 @@
 	}
 	return layers, nil
 }
+
+func (i *image) ContentStore() content.Store {
+	return i.client.ContentStore()
+}
diff --git a/vendor/github.com/containerd/containerd/image_store.go b/vendor/github.com/containerd/containerd/image_store.go
new file mode 100644
index 0000000..9a3aafc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/image_store.go
@@ -0,0 +1,136 @@
+package containerd
+
+import (
+	"context"
+
+	imagesapi "github.com/containerd/containerd/api/services/images/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	ptypes "github.com/gogo/protobuf/types"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type remoteImages struct {
+	client imagesapi.ImagesClient
+}
+
+// NewImageStoreFromClient returns a new image store client
+func NewImageStoreFromClient(client imagesapi.ImagesClient) images.Store {
+	return &remoteImages{
+		client: client,
+	}
+}
+
+func (s *remoteImages) Get(ctx context.Context, name string) (images.Image, error) {
+	resp, err := s.client.Get(ctx, &imagesapi.GetImageRequest{
+		Name: name,
+	})
+	if err != nil {
+		return images.Image{}, errdefs.FromGRPC(err)
+	}
+
+	return imageFromProto(resp.Image), nil
+}
+
+func (s *remoteImages) List(ctx context.Context, filters ...string) ([]images.Image, error) {
+	resp, err := s.client.List(ctx, &imagesapi.ListImagesRequest{
+		Filters: filters,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	return imagesFromProto(resp.Images), nil
+}
+
+func (s *remoteImages) Create(ctx context.Context, image images.Image) (images.Image, error) {
+	created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{
+		Image: imageToProto(&image),
+	})
+	if err != nil {
+		return images.Image{}, errdefs.FromGRPC(err)
+	}
+
+	return imageFromProto(&created.Image), nil
+}
+
+func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) {
+	var updateMask *ptypes.FieldMask
+	if len(fieldpaths) > 0 {
+		updateMask = &ptypes.FieldMask{
+			Paths: fieldpaths,
+		}
+	}
+
+	updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{
+		Image:      imageToProto(&image),
+		UpdateMask: updateMask,
+	})
+	if err != nil {
+		return images.Image{}, errdefs.FromGRPC(err)
+	}
+
+	return imageFromProto(&updated.Image), nil
+}
+
+func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error {
+	var do images.DeleteOptions
+	for _, opt := range opts {
+		if err := opt(ctx, &do); err != nil {
+			return err
+		}
+	}
+	_, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{
+		Name: name,
+		Sync: do.Synchronous,
+	})
+
+	return errdefs.FromGRPC(err)
+}
+
+func imageToProto(image *images.Image) imagesapi.Image {
+	return imagesapi.Image{
+		Name:      image.Name,
+		Labels:    image.Labels,
+		Target:    descToProto(&image.Target),
+		CreatedAt: image.CreatedAt,
+		UpdatedAt: image.UpdatedAt,
+	}
+}
+
+func imageFromProto(imagepb *imagesapi.Image) images.Image {
+	return images.Image{
+		Name:      imagepb.Name,
+		Labels:    imagepb.Labels,
+		Target:    descFromProto(&imagepb.Target),
+		CreatedAt: imagepb.CreatedAt,
+		UpdatedAt: imagepb.UpdatedAt,
+	}
+}
+
+func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
+	var images []images.Image
+
+	for _, image := range imagespb {
+		images = append(images, imageFromProto(&image))
+	}
+
+	return images
+}
+
+func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
+	return ocispec.Descriptor{
+		MediaType: desc.MediaType,
+		Size:      desc.Size_,
+		Digest:    desc.Digest,
+	}
+}
+
+func descToProto(desc *ocispec.Descriptor) types.Descriptor {
+	return types.Descriptor{
+		MediaType: desc.MediaType,
+		Size_:     desc.Size,
+		Digest:    desc.Digest,
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go
index 4c78c6c..7b4215f 100644
--- a/vendor/github.com/containerd/containerd/images/image.go
+++ b/vendor/github.com/containerd/containerd/images/image.go
@@ -38,6 +38,23 @@
 	CreatedAt, UpdatedAt time.Time
 }
 
+// DeleteOptions provide options on image delete
+type DeleteOptions struct {
+	Synchronous bool
+}
+
+// DeleteOpt allows configuring a delete operation
+type DeleteOpt func(context.Context, *DeleteOptions) error
+
+// SynchronousDelete is used to indicate that an image deletion and removal of
+// the image resources should occur synchronously before returning a result.
+func SynchronousDelete() DeleteOpt {
+	return func(ctx context.Context, o *DeleteOptions) error {
+		o.Synchronous = true
+		return nil
+	}
+}
+
 // Store and interact with images
 type Store interface {
 	Get(ctx context.Context, name string) (Image, error)
@@ -48,7 +65,7 @@
 	// one or more fieldpaths are provided, only those fields will be updated.
 	Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
 
-	Delete(ctx context.Context, name string) error
+	Delete(ctx context.Context, name string, opts ...DeleteOpt) error
 }
 
 // TODO(stevvooe): Many of these functions make strong platform assumptions,
@@ -170,13 +187,13 @@
 			return descs, nil
 
 		}
-		return nil, errors.Wrap(errdefs.ErrNotFound, "could not resolve manifest")
+		return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
 	}), image); err != nil {
 		return ocispec.Manifest{}, err
 	}
 
 	if m == nil {
-		return ocispec.Manifest{}, errors.Wrap(errdefs.ErrNotFound, "manifest not found")
+		return ocispec.Manifest{}, errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
 	}
 
 	return *m, nil
@@ -240,7 +257,7 @@
 			return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
 		}
 
-		return false, nil, nil, nil, errors.Wrap(err, "image check failed")
+		return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
 	}
 
 	// TODO(stevvooe): It is possible that referenced conponents could have
@@ -255,7 +272,7 @@
 				missing = append(missing, desc)
 				continue
 			} else {
-				return false, nil, nil, nil, err
+				return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
 			}
 		}
 		ra.Close()
diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go
new file mode 100644
index 0000000..f8cf742
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/images/importexport.go
@@ -0,0 +1,21 @@
+package images
+
+import (
+	"context"
+	"io"
+
+	"github.com/containerd/containerd/content"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Importer is the interface for image importer.
+type Importer interface {
+	// Import imports an image from a tar stream.
+	Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
+}
+
+// Exporter is the interface for image exporter.
+type Exporter interface {
+	// Export exports an image to a tar stream.
+	Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error
+}
diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go
deleted file mode 100644
index 9f8f9af..0000000
--- a/vendor/github.com/containerd/containerd/import.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package containerd
-
-import (
-	"archive/tar"
-	"context"
-	"encoding/json"
-	"io"
-	"io/ioutil"
-	"strings"
-
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/reference"
-	digest "github.com/opencontainers/go-digest"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
-)
-
-func resolveOCIIndex(idx ocispec.Index, refObject string) (*ocispec.Descriptor, error) {
-	tag, dgst := reference.SplitObject(refObject)
-	if tag == "" && dgst == "" {
-		return nil, errors.Errorf("unexpected object: %q", refObject)
-	}
-	for _, m := range idx.Manifests {
-		if m.Digest == dgst {
-			return &m, nil
-		}
-		annot, ok := m.Annotations[ocispec.AnnotationRefName]
-		if ok && annot == tag && tag != "" {
-			return &m, nil
-		}
-	}
-	return nil, errors.Errorf("not found: %q", refObject)
-}
-
-func (c *Client) importFromOCITar(ctx context.Context, ref string, reader io.Reader, iopts importOpts) (Image, error) {
-	tr := tar.NewReader(reader)
-	store := c.ContentStore()
-	var desc *ocispec.Descriptor
-	for {
-		hdr, err := tr.Next()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			return nil, err
-		}
-		if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
-			continue
-		}
-		if hdr.Name == "index.json" {
-			desc, err = onUntarIndexJSON(tr, iopts.refObject)
-			if err != nil {
-				return nil, err
-			}
-			continue
-		}
-		if strings.HasPrefix(hdr.Name, "blobs/") {
-			if err := onUntarBlob(ctx, tr, store, hdr.Name, hdr.Size); err != nil {
-				return nil, err
-			}
-		}
-	}
-	if desc == nil {
-		return nil, errors.Errorf("no descriptor found for reference object %q", iopts.refObject)
-	}
-	imgrec := images.Image{
-		Name:   ref,
-		Target: *desc,
-		Labels: iopts.labels,
-	}
-	is := c.ImageService()
-	if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
-		if !errdefs.IsNotFound(err) {
-			return nil, err
-		}
-
-		created, err := is.Create(ctx, imgrec)
-		if err != nil {
-			return nil, err
-		}
-
-		imgrec = created
-	} else {
-		imgrec = updated
-	}
-
-	img := &image{
-		client: c,
-		i:      imgrec,
-	}
-	return img, nil
-}
-
-func onUntarIndexJSON(r io.Reader, refObject string) (*ocispec.Descriptor, error) {
-	b, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, err
-	}
-	var idx ocispec.Index
-	if err := json.Unmarshal(b, &idx); err != nil {
-		return nil, err
-	}
-	return resolveOCIIndex(idx, refObject)
-}
-
-func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
-	// name is like "blobs/sha256/deadbeef"
-	split := strings.Split(name, "/")
-	if len(split) != 3 {
-		return errors.Errorf("unexpected name: %q", name)
-	}
-	algo := digest.Algorithm(split[1])
-	if !algo.Available() {
-		return errors.Errorf("unsupported algorithm: %s", algo)
-	}
-	dgst := digest.NewDigestFromHex(algo.String(), split[2])
-	return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
-}
diff --git a/vendor/github.com/containerd/containerd/lease.go b/vendor/github.com/containerd/containerd/lease.go
index 6ecc58d..8eb3bc0 100644
--- a/vendor/github.com/containerd/containerd/lease.go
+++ b/vendor/github.com/containerd/containerd/lease.go
@@ -51,7 +51,8 @@
 	return leases, nil
 }
 
-func (c *Client) withLease(ctx context.Context) (context.Context, func() error, error) {
+// WithLease attaches a lease on the context
+func (c *Client) WithLease(ctx context.Context) (context.Context, func() error, error) {
 	_, ok := leases.Lease(ctx)
 	if ok {
 		return ctx, func() error {
diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go
index 72fcab9..629d7f5 100644
--- a/vendor/github.com/containerd/containerd/linux/bundle.go
+++ b/vendor/github.com/containerd/containerd/linux/bundle.go
@@ -10,7 +10,7 @@
 	"path/filepath"
 
 	"github.com/containerd/containerd/events/exchange"
-	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/containerd/linux/runctypes"
 	"github.com/containerd/containerd/linux/shim"
 	"github.com/containerd/containerd/linux/shim/client"
 	"github.com/pkg/errors"
@@ -72,32 +72,32 @@
 }
 
 // ShimOpt specifies shim options for initialization and connection
-type ShimOpt func(*bundle, string, *runcopts.RuncOptions) (shim.Config, client.Opt)
+type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt)
 
 // ShimRemote is a ShimOpt for connecting and starting a remote shim
-func ShimRemote(shimBinary, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) ShimOpt {
-	return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (shim.Config, client.Opt) {
+func ShimRemote(shimBinary, daemonAddress, cgroup string, debug bool, exitHandler func()) ShimOpt {
+	return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
 		return b.shimConfig(ns, ropts),
-			client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, nonewns, debug, exitHandler)
+			client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, debug, exitHandler)
 	}
 }
 
 // ShimLocal is a ShimOpt for using an in process shim implementation
 func ShimLocal(exchange *exchange.Exchange) ShimOpt {
-	return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (shim.Config, client.Opt) {
+	return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
 		return b.shimConfig(ns, ropts), client.WithLocal(exchange)
 	}
 }
 
 // ShimConnect is a ShimOpt for connecting to an existing remote shim
 func ShimConnect() ShimOpt {
-	return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (shim.Config, client.Opt) {
+	return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
 		return b.shimConfig(ns, ropts), client.WithConnect(b.shimAddress(ns))
 	}
 }
 
 // NewShimClient connects to the shim managing the bundle and tasks creating it if needed
-func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientOpts ShimOpt, runcOpts *runcopts.RuncOptions) (*client.Client, error) {
+func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientOpts ShimOpt, runcOpts *runctypes.RuncOptions) (*client.Client, error) {
 	cfg, opt := getClientOpts(b, namespace, runcOpts)
 	return client.New(ctx, cfg, opt)
 }
@@ -120,7 +120,7 @@
 	return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock")
 }
 
-func (b *bundle) shimConfig(namespace string, runcOptions *runcopts.RuncOptions) shim.Config {
+func (b *bundle) shimConfig(namespace string, runcOptions *runctypes.RuncOptions) shim.Config {
 	var (
 		criuPath      string
 		runtimeRoot   string
diff --git a/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go b/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go
similarity index 79%
rename from vendor/github.com/containerd/containerd/linux/shim/deleted_state.go
rename to vendor/github.com/containerd/containerd/linux/proc/deleted_state.go
index 6d22735..fb25878 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/deleted_state.go
@@ -1,12 +1,12 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
 
 	"github.com/containerd/console"
-	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	google_protobuf "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 )
 
@@ -21,11 +21,11 @@
 	return errors.Errorf("cannot resume a deleted process")
 }
 
-func (s *deletedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (s *deletedState) Update(context context.Context, r *google_protobuf.Any) error {
 	return errors.Errorf("cannot update a deleted process")
 }
 
-func (s *deletedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+func (s *deletedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
 	return errors.Errorf("cannot checkpoint a deleted process")
 }
 
diff --git a/vendor/github.com/containerd/containerd/linux/shim/exec.go b/vendor/github.com/containerd/containerd/linux/proc/exec.go
similarity index 73%
rename from vendor/github.com/containerd/containerd/linux/shim/exec.go
rename to vendor/github.com/containerd/containerd/linux/proc/exec.go
index 3d27c91..00a8547 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/exec.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/exec.go
@@ -1,12 +1,12 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
-	"encoding/json"
 	"fmt"
 	"io"
+	"os"
 	"path/filepath"
 	"sync"
 	"syscall"
@@ -15,8 +15,6 @@
 	"golang.org/x/sys/unix"
 
 	"github.com/containerd/console"
-	"github.com/containerd/containerd/identifiers"
-	shimapi "github.com/containerd/containerd/linux/shim/v1"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -26,7 +24,7 @@
 type execProcess struct {
 	wg sync.WaitGroup
 
-	processState
+	State
 
 	mu      sync.Mutex
 	id      string
@@ -37,42 +35,14 @@
 	pid     int
 	closers []io.Closer
 	stdin   io.Closer
-	stdio   stdio
+	stdio   Stdio
 	path    string
 	spec    specs.Process
 
-	parent    *initProcess
+	parent    *Init
 	waitBlock chan struct{}
 }
 
-func newExecProcess(context context.Context, path string, r *shimapi.ExecProcessRequest, parent *initProcess, id string) (process, error) {
-	if err := identifiers.Validate(id); err != nil {
-		return nil, errors.Wrapf(err, "invalid exec id")
-	}
-	// process exec request
-	var spec specs.Process
-	if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
-		return nil, err
-	}
-	spec.Terminal = r.Terminal
-
-	e := &execProcess{
-		id:     id,
-		path:   path,
-		parent: parent,
-		spec:   spec,
-		stdio: stdio{
-			stdin:    r.Stdin,
-			stdout:   r.Stdout,
-			stderr:   r.Stderr,
-			terminal: r.Terminal,
-		},
-		waitBlock: make(chan struct{}),
-	}
-	e.processState = &execCreatedState{p: e}
-	return e, nil
-}
-
 func (e *execProcess) Wait() {
 	<-e.waitBlock
 }
@@ -102,7 +72,7 @@
 func (e *execProcess) setExited(status int) {
 	e.status = status
 	e.exited = time.Now()
-	e.parent.platform.shutdownConsole(context.Background(), e.console)
+	e.parent.platform.ShutdownConsole(context.Background(), e.console)
 	close(e.waitBlock)
 }
 
@@ -114,6 +84,9 @@
 		}
 		e.io.Close()
 	}
+	pidfile := filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id))
+	// silently ignore error
+	os.Remove(pidfile)
 	return nil
 }
 
@@ -138,7 +111,7 @@
 	return e.stdin
 }
 
-func (e *execProcess) Stdio() stdio {
+func (e *execProcess) Stdio() Stdio {
 	return e.stdio
 }
 
@@ -147,12 +120,12 @@
 		socket  *runc.Socket
 		pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id))
 	)
-	if e.stdio.terminal {
+	if e.stdio.Terminal {
 		if socket, err = runc.NewTempConsoleSocket(); err != nil {
 			return errors.Wrap(err, "failed to create runc console socket")
 		}
 		defer socket.Close()
-	} else if e.stdio.isNull() {
+	} else if e.stdio.IsNull() {
 		if e.io, err = runc.NewNullIO(); err != nil {
 			return errors.Wrap(err, "creating new NULL IO")
 		}
@@ -170,12 +143,13 @@
 		opts.ConsoleSocket = socket
 	}
 	if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil {
+		close(e.waitBlock)
 		return e.parent.runtimeError(err, "OCI runtime exec failed")
 	}
-	if e.stdio.stdin != "" {
-		sc, err := fifo.OpenFifo(ctx, e.stdio.stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
+	if e.stdio.Stdin != "" {
+		sc, err := fifo.OpenFifo(ctx, e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
 		if err != nil {
-			return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.stdin)
+			return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin)
 		}
 		e.closers = append(e.closers, sc)
 		e.stdin = sc
@@ -186,11 +160,11 @@
 		if err != nil {
 			return errors.Wrap(err, "failed to retrieve console master")
 		}
-		if e.console, err = e.parent.platform.copyConsole(ctx, console, e.stdio.stdin, e.stdio.stdout, e.stdio.stderr, &e.wg, &copyWaitGroup); err != nil {
+		if e.console, err = e.parent.platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, &copyWaitGroup); err != nil {
 			return errors.Wrap(err, "failed to start console copy")
 		}
-	} else if !e.stdio.isNull() {
-		if err := copyPipes(ctx, e.io, e.stdio.stdin, e.stdio.stdout, e.stdio.stderr, &e.wg, &copyWaitGroup); err != nil {
+	} else if !e.stdio.IsNull() {
+		if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, &copyWaitGroup); err != nil {
 			return errors.Wrap(err, "failed to start io pipe copy")
 		}
 	}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/exec_state.go b/vendor/github.com/containerd/containerd/linux/proc/exec_state.go
similarity index 93%
rename from vendor/github.com/containerd/containerd/linux/shim/exec_state.go
rename to vendor/github.com/containerd/containerd/linux/proc/exec_state.go
index 4a4aaa2..3c3c265 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/exec_state.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/exec_state.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
@@ -16,11 +16,11 @@
 func (s *execCreatedState) transition(name string) error {
 	switch name {
 	case "running":
-		s.p.processState = &execRunningState{p: s.p}
+		s.p.State = &execRunningState{p: s.p}
 	case "stopped":
-		s.p.processState = &execStoppedState{p: s.p}
+		s.p.State = &execStoppedState{p: s.p}
 	case "deleted":
-		s.p.processState = &deletedState{}
+		s.p.State = &deletedState{}
 	default:
 		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
 	}
@@ -77,7 +77,7 @@
 func (s *execRunningState) transition(name string) error {
 	switch name {
 	case "stopped":
-		s.p.processState = &execStoppedState{p: s.p}
+		s.p.State = &execStoppedState{p: s.p}
 	default:
 		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
 	}
@@ -130,7 +130,7 @@
 func (s *execStoppedState) transition(name string) error {
 	switch name {
 	case "deleted":
-		s.p.processState = &deletedState{}
+		s.p.State = &deletedState{}
 	default:
 		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
 	}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/init.go b/vendor/github.com/containerd/containerd/linux/proc/init.go
similarity index 69%
rename from vendor/github.com/containerd/containerd/linux/shim/init.go
rename to vendor/github.com/containerd/containerd/linux/proc/init.go
index 01c305b..f24f92f 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/init.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/init.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
@@ -15,14 +15,13 @@
 	"time"
 
 	"github.com/containerd/console"
-	"github.com/containerd/containerd/identifiers"
-	"github.com/containerd/containerd/linux/runcopts"
-	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/linux/runctypes"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
 	"github.com/containerd/typeurl"
+	google_protobuf "github.com/gogo/protobuf/types"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 )
@@ -30,7 +29,8 @@
 // InitPidFile name of the file that contains the init pid
 const InitPidFile = "init.pid"
 
-type initProcess struct {
+// Init represents an initial process for a container
+type Init struct {
 	wg sync.WaitGroup
 	initState
 
@@ -47,7 +47,7 @@
 	id       string
 	bundle   string
 	console  console.Console
-	platform platform
+	platform Platform
 	io       runc.IO
 	runtime  *runc.Runc
 	status   int
@@ -55,28 +55,42 @@
 	pid      int
 	closers  []io.Closer
 	stdin    io.Closer
-	stdio    stdio
+	stdio    Stdio
 	rootfs   string
 	IoUID    int
 	IoGID    int
 }
 
-func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskRequest) (*initProcess, error) {
+// NewRunc returns a new runc instance for a process
+func NewRunc(root, path, namespace, runtime, criu string, systemd bool) *runc.Runc {
+	if root == "" {
+		root = RuncRoot
+	}
+	return &runc.Runc{
+		Command:       runtime,
+		Log:           filepath.Join(path, "log.json"),
+		LogFormat:     runc.JSON,
+		PdeathSignal:  syscall.SIGKILL,
+		Root:          filepath.Join(root, namespace),
+		Criu:          criu,
+		SystemdCgroup: systemd,
+	}
+}
+
+// New returns a new init process
+func New(context context.Context, path, workDir, runtimeRoot, namespace, criu string, systemdCgroup bool, platform Platform, r *CreateConfig) (*Init, error) {
 	var success bool
 
-	if err := identifiers.Validate(r.ID); err != nil {
-		return nil, errors.Wrapf(err, "invalid task id")
-	}
-	var options runcopts.CreateOptions
+	var options runctypes.CreateOptions
 	if r.Options != nil {
 		v, err := typeurl.UnmarshalAny(r.Options)
 		if err != nil {
 			return nil, err
 		}
-		options = *v.(*runcopts.CreateOptions)
+		options = *v.(*runctypes.CreateOptions)
 	}
 
-	rootfs := filepath.Join(s.config.Path, "rootfs")
+	rootfs := filepath.Join(path, "rootfs")
 	// count the number of successful mounts so we can undo
 	// what was actually done rather than what should have been
 	// done.
@@ -98,32 +112,20 @@
 			return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m)
 		}
 	}
-	root := s.config.RuntimeRoot
-	if root == "" {
-		root = RuncRoot
-	}
-	runtime := &runc.Runc{
-		Command:       r.Runtime,
-		Log:           filepath.Join(s.config.Path, "log.json"),
-		LogFormat:     runc.JSON,
-		PdeathSignal:  syscall.SIGKILL,
-		Root:          filepath.Join(root, s.config.Namespace),
-		Criu:          s.config.Criu,
-		SystemdCgroup: s.config.SystemdCgroup,
-	}
-	p := &initProcess{
+	runtime := NewRunc(runtimeRoot, path, namespace, r.Runtime, criu, systemdCgroup)
+	p := &Init{
 		id:       r.ID,
 		bundle:   r.Bundle,
 		runtime:  runtime,
-		platform: s.platform,
-		stdio: stdio{
-			stdin:    r.Stdin,
-			stdout:   r.Stdout,
-			stderr:   r.Stderr,
-			terminal: r.Terminal,
+		platform: platform,
+		stdio: Stdio{
+			Stdin:    r.Stdin,
+			Stdout:   r.Stdout,
+			Stderr:   r.Stderr,
+			Terminal: r.Terminal,
 		},
 		rootfs:    rootfs,
-		workDir:   s.config.WorkDir,
+		workDir:   workDir,
 		status:    0,
 		waitBlock: make(chan struct{}),
 		IoUID:     int(options.IoUid),
@@ -148,7 +150,7 @@
 			return nil, errors.Wrap(err, "failed to create OCI runtime io pipes")
 		}
 	}
-	pidFile := filepath.Join(s.config.Path, InitPidFile)
+	pidFile := filepath.Join(path, InitPidFile)
 	if r.Checkpoint != "" {
 		opts := &runc.RestoreOpts{
 			CheckpointOpts: runc.CheckpointOpts{
@@ -195,7 +197,7 @@
 		if err != nil {
 			return nil, errors.Wrap(err, "failed to retrieve console master")
 		}
-		console, err = s.platform.copyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, &copyWaitGroup)
+		console, err = platform.CopyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, &copyWaitGroup)
 		if err != nil {
 			return nil, errors.Wrap(err, "failed to start console copy")
 		}
@@ -216,31 +218,37 @@
 	return p, nil
 }
 
-func (p *initProcess) Wait() {
+// Wait for the process to exit
+func (p *Init) Wait() {
 	<-p.waitBlock
 }
 
-func (p *initProcess) ID() string {
+// ID of the process
+func (p *Init) ID() string {
 	return p.id
 }
 
-func (p *initProcess) Pid() int {
+// Pid of the process
+func (p *Init) Pid() int {
 	return p.pid
 }
 
-func (p *initProcess) ExitStatus() int {
+// ExitStatus of the process
+func (p *Init) ExitStatus() int {
 	p.mu.Lock()
 	defer p.mu.Unlock()
 	return p.status
 }
 
-func (p *initProcess) ExitedAt() time.Time {
+// ExitedAt at time when the process exited
+func (p *Init) ExitedAt() time.Time {
 	p.mu.Lock()
 	defer p.mu.Unlock()
 	return p.exited
 }
 
-func (p *initProcess) Status(ctx context.Context) (string, error) {
+// Status of the process
+func (p *Init) Status(ctx context.Context) (string, error) {
 	p.mu.Lock()
 	defer p.mu.Unlock()
 	c, err := p.runtime.State(ctx, p.id)
@@ -253,20 +261,20 @@
 	return c.Status, nil
 }
 
-func (p *initProcess) start(context context.Context) error {
+func (p *Init) start(context context.Context) error {
 	err := p.runtime.Start(context, p.id)
 	return p.runtimeError(err, "OCI runtime start failed")
 }
 
-func (p *initProcess) setExited(status int) {
+func (p *Init) setExited(status int) {
 	p.exited = time.Now()
 	p.status = status
-	p.platform.shutdownConsole(context.Background(), p.console)
+	p.platform.ShutdownConsole(context.Background(), p.console)
 	close(p.waitBlock)
 }
 
-func (p *initProcess) delete(context context.Context) error {
-	p.killAll(context)
+func (p *Init) delete(context context.Context) error {
+	p.KillAll(context)
 	p.wg.Wait()
 	err := p.runtime.Delete(context, p.id, nil)
 	// ignore errors if a runtime has already deleted the process
@@ -296,49 +304,82 @@
 	return err
 }
 
-func (p *initProcess) resize(ws console.WinSize) error {
+func (p *Init) resize(ws console.WinSize) error {
 	if p.console == nil {
 		return nil
 	}
 	return p.console.Resize(ws)
 }
 
-func (p *initProcess) pause(context context.Context) error {
+func (p *Init) pause(context context.Context) error {
 	err := p.runtime.Pause(context, p.id)
 	return p.runtimeError(err, "OCI runtime pause failed")
 }
 
-func (p *initProcess) resume(context context.Context) error {
+func (p *Init) resume(context context.Context) error {
 	err := p.runtime.Resume(context, p.id)
 	return p.runtimeError(err, "OCI runtime resume failed")
 }
 
-func (p *initProcess) kill(context context.Context, signal uint32, all bool) error {
+func (p *Init) kill(context context.Context, signal uint32, all bool) error {
 	err := p.runtime.Kill(context, p.id, int(signal), &runc.KillOpts{
 		All: all,
 	})
 	return checkKillError(err)
 }
 
-func (p *initProcess) killAll(context context.Context) error {
+// KillAll processes belonging to the init process
+func (p *Init) KillAll(context context.Context) error {
 	err := p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runc.KillOpts{
 		All: true,
 	})
 	return p.runtimeError(err, "OCI runtime killall failed")
 }
 
-func (p *initProcess) Stdin() io.Closer {
+// Stdin of the process
+func (p *Init) Stdin() io.Closer {
 	return p.stdin
 }
 
-func (p *initProcess) checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
-	var options runcopts.CheckpointOptions
+// Runtime returns the OCI runtime configured for the init process
+func (p *Init) Runtime() *runc.Runc {
+	return p.runtime
+}
+
+// Exec returns a new exec'd process
+func (p *Init) Exec(context context.Context, path string, r *ExecConfig) (Process, error) {
+	// process exec request
+	var spec specs.Process
+	if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
+		return nil, err
+	}
+	spec.Terminal = r.Terminal
+
+	e := &execProcess{
+		id:     r.ID,
+		path:   path,
+		parent: p,
+		spec:   spec,
+		stdio: Stdio{
+			Stdin:    r.Stdin,
+			Stdout:   r.Stdout,
+			Stderr:   r.Stderr,
+			Terminal: r.Terminal,
+		},
+		waitBlock: make(chan struct{}),
+	}
+	e.State = &execCreatedState{p: e}
+	return e, nil
+}
+
+func (p *Init) checkpoint(context context.Context, r *CheckpointConfig) error {
+	var options runctypes.CheckpointOptions
 	if r.Options != nil {
 		v, err := typeurl.UnmarshalAny(r.Options)
 		if err != nil {
 			return err
 		}
-		options = *v.(*runcopts.CheckpointOptions)
+		options = *v.(*runctypes.CheckpointOptions)
 	}
 	var actions []runc.CheckpointAction
 	if !options.Exit {
@@ -364,19 +405,20 @@
 	return nil
 }
 
-func (p *initProcess) update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (p *Init) update(context context.Context, r *google_protobuf.Any) error {
 	var resources specs.LinuxResources
-	if err := json.Unmarshal(r.Resources.Value, &resources); err != nil {
+	if err := json.Unmarshal(r.Value, &resources); err != nil {
 		return err
 	}
 	return p.runtime.Update(context, p.id, &resources)
 }
 
-func (p *initProcess) Stdio() stdio {
+// Stdio of the process
+func (p *Init) Stdio() Stdio {
 	return p.stdio
 }
 
-func (p *initProcess) runtimeError(rErr error, msg string) error {
+func (p *Init) runtimeError(rErr error, msg string) error {
 	if rErr == nil {
 		return nil
 	}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/init_state.go b/vendor/github.com/containerd/containerd/linux/proc/init_state.go
similarity index 87%
rename from vendor/github.com/containerd/containerd/linux/shim/init_state.go
rename to vendor/github.com/containerd/containerd/linux/proc/init_state.go
index da7e15b..b5b398e 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/init_state.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/init_state.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
@@ -9,23 +9,23 @@
 
 	"github.com/containerd/console"
 	"github.com/containerd/containerd/errdefs"
-	shimapi "github.com/containerd/containerd/linux/shim/v1"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
+	google_protobuf "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 )
 
 type initState interface {
-	processState
+	State
 
 	Pause(context.Context) error
 	Resume(context.Context) error
-	Update(context.Context, *shimapi.UpdateTaskRequest) error
-	Checkpoint(context.Context, *shimapi.CheckpointTaskRequest) error
+	Update(context.Context, *google_protobuf.Any) error
+	Checkpoint(context.Context, *CheckpointConfig) error
 }
 
 type createdState struct {
-	p *initProcess
+	p *Init
 }
 
 func (s *createdState) transition(name string) error {
@@ -56,14 +56,14 @@
 	return errors.Errorf("cannot resume task in created state")
 }
 
-func (s *createdState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (s *createdState) Update(context context.Context, r *google_protobuf.Any) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
 	return s.p.update(context, r)
 }
 
-func (s *createdState) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
+func (s *createdState) Checkpoint(context context.Context, r *CheckpointConfig) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
@@ -114,7 +114,7 @@
 }
 
 type createdCheckpointState struct {
-	p    *initProcess
+	p    *Init
 	opts *runc.RestoreOpts
 }
 
@@ -146,14 +146,14 @@
 	return errors.Errorf("cannot resume task in created state")
 }
 
-func (s *createdCheckpointState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (s *createdCheckpointState) Update(context context.Context, r *google_protobuf.Any) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
 	return s.p.update(context, r)
 }
 
-func (s *createdCheckpointState) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
+func (s *createdCheckpointState) Checkpoint(context context.Context, r *CheckpointConfig) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
@@ -175,17 +175,17 @@
 		return p.runtimeError(err, "OCI runtime restore failed")
 	}
 	sio := p.stdio
-	if sio.stdin != "" {
-		sc, err := fifo.OpenFifo(ctx, sio.stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
+	if sio.Stdin != "" {
+		sc, err := fifo.OpenFifo(ctx, sio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
 		if err != nil {
-			return errors.Wrapf(err, "failed to open stdin fifo %s", sio.stdin)
+			return errors.Wrapf(err, "failed to open stdin fifo %s", sio.Stdin)
 		}
 		p.stdin = sc
 		p.closers = append(p.closers, sc)
 	}
 	var copyWaitGroup sync.WaitGroup
-	if !sio.isNull() {
-		if err := copyPipes(ctx, p.io, sio.stdin, sio.stdout, sio.stderr, &p.wg, &copyWaitGroup); err != nil {
+	if !sio.IsNull() {
+		if err := copyPipes(ctx, p.io, sio.Stdin, sio.Stdout, sio.Stderr, &p.wg, &copyWaitGroup); err != nil {
 			return errors.Wrap(err, "failed to start io pipe copy")
 		}
 	}
@@ -228,7 +228,7 @@
 }
 
 type runningState struct {
-	p *initProcess
+	p *Init
 }
 
 func (s *runningState) transition(name string) error {
@@ -259,14 +259,14 @@
 	return errors.Errorf("cannot resume a running process")
 }
 
-func (s *runningState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (s *runningState) Update(context context.Context, r *google_protobuf.Any) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
 	return s.p.update(context, r)
 }
 
-func (s *runningState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+func (s *runningState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
@@ -313,7 +313,7 @@
 }
 
 type pausedState struct {
-	p *initProcess
+	p *Init
 }
 
 func (s *pausedState) transition(name string) error {
@@ -345,14 +345,14 @@
 	return s.transition("running")
 }
 
-func (s *pausedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (s *pausedState) Update(context context.Context, r *google_protobuf.Any) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
 	return s.p.update(context, r)
 }
 
-func (s *pausedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+func (s *pausedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
@@ -400,7 +400,7 @@
 }
 
 type stoppedState struct {
-	p *initProcess
+	p *Init
 }
 
 func (s *stoppedState) transition(name string) error {
@@ -427,14 +427,14 @@
 	return errors.Errorf("cannot resume a stopped container")
 }
 
-func (s *stoppedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+func (s *stoppedState) Update(context context.Context, r *google_protobuf.Any) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
 	return errors.Errorf("cannot update a stopped container")
 }
 
-func (s *stoppedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+func (s *stoppedState) Checkpoint(ctx context.Context, r *CheckpointConfig) error {
 	s.p.mu.Lock()
 	defer s.p.mu.Unlock()
 
diff --git a/vendor/github.com/containerd/containerd/linux/shim/io.go b/vendor/github.com/containerd/containerd/linux/proc/io.go
similarity index 66%
rename from vendor/github.com/containerd/containerd/linux/shim/io.go
rename to vendor/github.com/containerd/containerd/linux/proc/io.go
index 49ba8e0..e78b383 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/io.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/io.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
@@ -9,44 +9,10 @@
 	"sync"
 	"syscall"
 
-	"github.com/containerd/console"
 	"github.com/containerd/fifo"
 	runc "github.com/containerd/go-runc"
 )
 
-func copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
-	if stdin != "" {
-		in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
-		if err != nil {
-			return err
-		}
-		cwg.Add(1)
-		go func() {
-			cwg.Done()
-			io.Copy(console, in)
-		}()
-	}
-	outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0)
-	if err != nil {
-		return err
-	}
-	outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0)
-	if err != nil {
-		return err
-	}
-	wg.Add(1)
-	cwg.Add(1)
-	go func() {
-		cwg.Done()
-		io.Copy(outw, console)
-		console.Close()
-		outr.Close()
-		outw.Close()
-		wg.Done()
-	}()
-	return nil
-}
-
 func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
 	for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){
 		stdout: func(wc io.WriteCloser, rc io.Closer) {
diff --git a/vendor/github.com/containerd/containerd/linux/shim/process.go b/vendor/github.com/containerd/containerd/linux/proc/process.go
similarity index 61%
rename from vendor/github.com/containerd/containerd/linux/shim/process.go
rename to vendor/github.com/containerd/containerd/linux/proc/process.go
index f0b4692..c0d33ad 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/process.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/process.go
@@ -1,30 +1,36 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"context"
 	"io"
+	"sync"
 	"time"
 
 	"github.com/containerd/console"
 	"github.com/pkg/errors"
 )
 
-type stdio struct {
-	stdin    string
-	stdout   string
-	stderr   string
-	terminal bool
+// RuncRoot is the path to the root runc state directory
+const RuncRoot = "/run/containerd/runc"
+
+// Stdio of a process
+type Stdio struct {
+	Stdin    string
+	Stdout   string
+	Stderr   string
+	Terminal bool
 }
 
-func (s stdio) isNull() bool {
-	return s.stdin == "" && s.stdout == "" && s.stderr == ""
+// IsNull returns true if the stdio is not defined
+func (s Stdio) IsNull() bool {
+	return s.Stdin == "" && s.Stdout == "" && s.Stderr == ""
 }
 
-type process interface {
-	processState
-
+// Process on a linux system
+type Process interface {
+	State
 	// ID returns the id for the process
 	ID() string
 	// Pid returns the pid for the process
@@ -36,14 +42,15 @@
 	// Stdin returns the process STDIN
 	Stdin() io.Closer
 	// Stdio returns io information for the container
-	Stdio() stdio
+	Stdio() Stdio
 	// Status returns the process status
 	Status(context.Context) (string, error)
 	// Wait blocks until the process has exited
 	Wait()
 }
 
-type processState interface {
+// State of a process
+type State interface {
 	// Resize resizes the process console
 	Resize(ws console.WinSize) error
 	// Start execution of the process
@@ -71,3 +78,12 @@
 	}
 	panic(errors.Errorf("invalid state %v", v))
 }
+
+// Platform handles platform-specific behavior that may differs across
+// platform implementations
+type Platform interface {
+	CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string,
+		wg, cwg *sync.WaitGroup) (console.Console, error)
+	ShutdownConsole(ctx context.Context, console console.Console) error
+	Close() error
+}
diff --git a/vendor/github.com/containerd/containerd/linux/proc/types.go b/vendor/github.com/containerd/containerd/linux/proc/types.go
new file mode 100644
index 0000000..9055c25
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/proc/types.go
@@ -0,0 +1,37 @@
+package proc
+
+import (
+	containerd_types "github.com/containerd/containerd/api/types"
+	google_protobuf "github.com/gogo/protobuf/types"
+)
+
+// CreateConfig hold task creation configuration
+type CreateConfig struct {
+	ID               string
+	Bundle           string
+	Runtime          string
+	Rootfs           []*containerd_types.Mount
+	Terminal         bool
+	Stdin            string
+	Stdout           string
+	Stderr           string
+	Checkpoint       string
+	ParentCheckpoint string
+	Options          *google_protobuf.Any
+}
+
+// ExecConfig holds exec creation configuration
+type ExecConfig struct {
+	ID       string
+	Terminal bool
+	Stdin    string
+	Stdout   string
+	Stderr   string
+	Spec     *google_protobuf.Any
+}
+
+// CheckpointConfig holds task checkpoint configuration
+type CheckpointConfig struct {
+	Path    string
+	Options *google_protobuf.Any
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/utils.go b/vendor/github.com/containerd/containerd/linux/proc/utils.go
similarity index 92%
rename from vendor/github.com/containerd/containerd/linux/shim/utils.go
rename to vendor/github.com/containerd/containerd/linux/proc/utils.go
index 317f8da..1197957 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/utils.go
+++ b/vendor/github.com/containerd/containerd/linux/proc/utils.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package shim
+package proc
 
 import (
 	"encoding/json"
@@ -10,7 +10,6 @@
 	"time"
 
 	"github.com/containerd/containerd/errdefs"
-	shimapi "github.com/containerd/containerd/linux/shim/v1"
 	runc "github.com/containerd/go-runc"
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
@@ -81,6 +80,6 @@
 	return errors.Wrapf(err, "unknown error after kill")
 }
 
-func hasNoIO(r *shimapi.CreateTaskRequest) bool {
+func hasNoIO(r *CreateConfig) bool {
 	return r.Stdin == "" && r.Stdout == "" && r.Stderr == ""
 }
diff --git a/vendor/github.com/containerd/containerd/linux/process.go b/vendor/github.com/containerd/containerd/linux/process.go
index 0febff9..10acc69 100644
--- a/vendor/github.com/containerd/containerd/linux/process.go
+++ b/vendor/github.com/containerd/containerd/linux/process.go
@@ -5,6 +5,7 @@
 import (
 	"context"
 
+	eventstypes "github.com/containerd/containerd/api/events"
 	"github.com/containerd/containerd/api/types/task"
 	"github.com/containerd/containerd/errdefs"
 	shim "github.com/containerd/containerd/linux/shim/v1"
@@ -96,12 +97,17 @@
 
 // Start the process
 func (p *Process) Start(ctx context.Context) error {
-	_, err := p.t.shim.Start(ctx, &shim.StartRequest{
+	r, err := p.t.shim.Start(ctx, &shim.StartRequest{
 		ID: p.id,
 	})
 	if err != nil {
 		return errdefs.FromGRPC(err)
 	}
+	p.t.events.Publish(ctx, runtime.TaskExecStartedEventTopic, &eventstypes.TaskExecStarted{
+		ContainerID: p.t.id,
+		Pid:         r.Pid,
+		ExecID:      p.id,
+	})
 	return nil
 }
 
diff --git a/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go b/vendor/github.com/containerd/containerd/linux/runctypes/runc.pb.go
similarity index 81%
rename from vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go
rename to vendor/github.com/containerd/containerd/linux/runctypes/runc.pb.go
index 0415e23..00a27bf 100644
--- a/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go
+++ b/vendor/github.com/containerd/containerd/linux/runctypes/runc.pb.go
@@ -1,24 +1,25 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/linux/runcopts/runc.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/containerd/containerd/linux/runctypes/runc.proto
 
 /*
-	Package runcopts is a generated protocol buffer package.
+	Package runctypes is a generated protocol buffer package.
 
 	It is generated from these files:
-		github.com/containerd/containerd/linux/runcopts/runc.proto
+		github.com/containerd/containerd/linux/runctypes/runc.proto
 
 	It has these top-level messages:
 		RuncOptions
 		CreateOptions
 		CheckpointOptions
+		ProcessDetails
 */
-package runcopts
+package runctypes
 
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 
 import strings "strings"
 import reflect "reflect"
@@ -79,10 +80,19 @@
 func (*CheckpointOptions) ProtoMessage()               {}
 func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{2} }
 
+type ProcessDetails struct {
+	ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *ProcessDetails) Reset()                    { *m = ProcessDetails{} }
+func (*ProcessDetails) ProtoMessage()               {}
+func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{3} }
+
 func init() {
 	proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions")
 	proto.RegisterType((*CreateOptions)(nil), "containerd.linux.runc.CreateOptions")
 	proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions")
+	proto.RegisterType((*ProcessDetails)(nil), "containerd.linux.runc.ProcessDetails")
 }
 func (m *RuncOptions) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
@@ -334,24 +344,30 @@
 	return i, nil
 }
 
-func encodeFixed64Runc(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
+func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
 }
-func encodeFixed32Runc(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
+
+func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
 }
+
 func encodeVarintRunc(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -457,6 +473,16 @@
 	return n
 }
 
+func (m *ProcessDetails) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	return n
+}
+
 func sovRunc(x uint64) (n int) {
 	for {
 		n++
@@ -519,6 +545,16 @@
 	}, "")
 	return s
 }
+func (this *ProcessDetails) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ProcessDetails{`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func valueToStringRunc(v interface{}) string {
 	rv := reflect.ValueOf(v)
 	if rv.IsNil() {
@@ -1187,6 +1223,85 @@
 	}
 	return nil
 }
+func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRunc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRunc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRunc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func skipRunc(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
@@ -1293,40 +1408,43 @@
 )
 
 func init() {
-	proto.RegisterFile("github.com/containerd/containerd/linux/runcopts/runc.proto", fileDescriptorRunc)
+	proto.RegisterFile("github.com/containerd/containerd/linux/runctypes/runc.proto", fileDescriptorRunc)
 }
 
 var fileDescriptorRunc = []byte{
-	// 495 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0x31, 0x6f, 0xd3, 0x40,
-	0x14, 0xc7, 0x7b, 0xb4, 0x4d, 0x9c, 0x4b, 0x53, 0xe0, 0x20, 0xd2, 0x51, 0x84, 0x09, 0x11, 0x48,
-	0x61, 0x49, 0x24, 0x58, 0x10, 0x6c, 0x64, 0x60, 0x00, 0x4a, 0x65, 0xe8, 0xc2, 0x72, 0x72, 0xcf,
-	0x87, 0xf3, 0x94, 0xf8, 0xde, 0xe9, 0xee, 0x4c, 0x93, 0xad, 0x9f, 0x80, 0xcf, 0xd5, 0x91, 0x91,
-	0x91, 0xe6, 0x8b, 0x80, 0x7c, 0xb6, 0x0b, 0x2b, 0x2b, 0xdb, 0xff, 0xfd, 0xfe, 0xcf, 0x7e, 0x4f,
-	0xff, 0xd3, 0xa3, 0x2f, 0x73, 0xf0, 0x8b, 0xf2, 0x6c, 0x2a, 0xb1, 0x98, 0x49, 0xd4, 0x3e, 0x05,
-	0xad, 0x6c, 0xf6, 0xb7, 0x5c, 0x81, 0x2e, 0xd7, 0x33, 0x5b, 0x6a, 0x89, 0xc6, 0xbb, 0x20, 0xa6,
-	0xc6, 0xa2, 0x47, 0x36, 0xfc, 0xd3, 0x35, 0x0d, 0x5d, 0xd3, 0xca, 0x3c, 0xba, 0x9b, 0x63, 0x8e,
-	0xa1, 0x63, 0x56, 0xa9, 0xba, 0x79, 0xfc, 0x8d, 0xd0, 0x7e, 0x52, 0x6a, 0xf9, 0xc1, 0x78, 0x40,
-	0xed, 0x18, 0xa7, 0x5d, 0x5b, 0x6a, 0x0f, 0x85, 0xe2, 0x64, 0x44, 0x26, 0xbd, 0xa4, 0x2d, 0xd9,
-	0x23, 0x7a, 0xd0, 0x48, 0x61, 0x11, 0x3d, 0xbf, 0x11, 0xec, 0x7e, 0xc3, 0x12, 0x44, 0xcf, 0xee,
-	0xd3, 0x9e, 0xb4, 0x50, 0x0a, 0x93, 0xfa, 0x05, 0xdf, 0x0d, 0x7e, 0x54, 0x81, 0x93, 0xd4, 0x2f,
-	0xd8, 0x13, 0x7a, 0xe8, 0x36, 0xce, 0xab, 0x22, 0x13, 0x32, 0xb7, 0x58, 0x1a, 0xbe, 0x37, 0x22,
-	0x93, 0x28, 0x19, 0x34, 0x74, 0x1e, 0xe0, 0xf8, 0x62, 0x97, 0x0e, 0xe6, 0x56, 0xa5, 0x5e, 0xb5,
-	0x2b, 0x8d, 0xe9, 0x40, 0xa3, 0x30, 0xf0, 0x15, 0x7d, 0x3d, 0x99, 0x84, 0xef, 0xfa, 0x1a, 0x4f,
-	0x2a, 0x16, 0x26, 0xdf, 0xa3, 0x11, 0x1a, 0xa5, 0x85, 0x97, 0x26, 0x2c, 0x16, 0x25, 0xdd, 0xaa,
-	0xfe, 0x24, 0x0d, 0x7b, 0x46, 0x87, 0x6a, 0xed, 0x95, 0xd5, 0xe9, 0x4a, 0x94, 0x1a, 0xd6, 0xc2,
-	0xa1, 0x5c, 0x2a, 0xef, 0xc2, 0x82, 0x51, 0x72, 0xa7, 0x35, 0x4f, 0x35, 0xac, 0x3f, 0xd6, 0x16,
-	0x3b, 0xa2, 0x91, 0x57, 0xb6, 0x00, 0x9d, 0xae, 0x9a, 0x2d, 0xaf, 0x6b, 0xf6, 0x80, 0xd2, 0x2f,
-	0xb0, 0x52, 0x62, 0x85, 0x72, 0xe9, 0xf8, 0x7e, 0x70, 0x7b, 0x15, 0x79, 0x57, 0x01, 0xf6, 0x94,
-	0xde, 0x52, 0x85, 0xf1, 0x1b, 0xa1, 0xd3, 0x42, 0x39, 0x93, 0x4a, 0xe5, 0x78, 0x67, 0xb4, 0x3b,
-	0xe9, 0x25, 0x37, 0x03, 0x3f, 0xbe, 0xc6, 0x55, 0xa2, 0x75, 0x12, 0x4e, 0x14, 0x98, 0x29, 0xde,
-	0xad, 0x13, 0x6d, 0xd8, 0x7b, 0xcc, 0x14, 0x7b, 0x4c, 0x0f, 0x35, 0x0a, 0xad, 0xce, 0xc5, 0x52,
-	0x6d, 0x2c, 0xe8, 0x9c, 0x47, 0x61, 0xe0, 0x81, 0xc6, 0x63, 0x75, 0xfe, 0xb6, 0x66, 0xec, 0x21,
-	0xed, 0xbb, 0x05, 0x14, 0x6d, 0xae, 0xbd, 0xf0, 0x1f, 0x5a, 0xa1, 0x3a, 0x54, 0x36, 0xa4, 0x1d,
-	0x40, 0x51, 0x42, 0xc6, 0xe9, 0x88, 0x4c, 0x06, 0xc9, 0x3e, 0xe0, 0x29, 0x64, 0x0d, 0xce, 0x21,
-	0xe3, 0xfd, 0x16, 0xbf, 0x81, 0x6c, 0xfc, 0x8b, 0xd0, 0xdb, 0xf3, 0x85, 0x92, 0x4b, 0x83, 0xa0,
-	0x7d, 0xfb, 0x0c, 0x8c, 0xee, 0xa9, 0x35, 0xb4, 0xe9, 0x07, 0xfd, 0xbf, 0xc6, 0xfe, 0x3a, 0xb9,
-	0xbc, 0x8a, 0x77, 0x7e, 0x5c, 0xc5, 0x3b, 0x17, 0xdb, 0x98, 0x5c, 0x6e, 0x63, 0xf2, 0x7d, 0x1b,
-	0x93, 0x9f, 0xdb, 0x98, 0x7c, 0x7e, 0xf1, 0x8f, 0x87, 0xf9, 0xaa, 0x15, 0x67, 0x9d, 0x70, 0x70,
-	0xcf, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xbb, 0xf0, 0x6c, 0xdb, 0x03, 0x00, 0x00,
+	// 540 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0xc1, 0x6e, 0xd3, 0x40,
+	0x10, 0x86, 0x6b, 0xda, 0x26, 0xce, 0xa4, 0x29, 0xb0, 0x50, 0xc9, 0x14, 0x91, 0x86, 0x00, 0x52,
+	0xb8, 0xa4, 0x12, 0x88, 0x0b, 0xbd, 0xb5, 0x45, 0xa8, 0x02, 0x4a, 0x65, 0x5a, 0x09, 0x71, 0x59,
+	0xb9, 0xeb, 0x21, 0x59, 0x25, 0xde, 0x59, 0xed, 0xae, 0xa9, 0x73, 0xeb, 0x13, 0xf0, 0x5c, 0x3d,
+	0x72, 0xe4, 0x84, 0x68, 0x5e, 0x04, 0xe4, 0x75, 0x1c, 0xb8, 0x72, 0xe5, 0xf6, 0xcf, 0xf7, 0x8f,
+	0x3d, 0xa3, 0x7f, 0x35, 0xb0, 0x37, 0x92, 0x6e, 0x9c, 0x9f, 0x0f, 0x05, 0x65, 0xbb, 0x82, 0x94,
+	0x4b, 0xa4, 0x42, 0x93, 0xfe, 0x2d, 0xa7, 0x52, 0xe5, 0xc5, 0xae, 0xc9, 0x95, 0x70, 0x33, 0x8d,
+	0xd6, 0xab, 0xa1, 0x36, 0xe4, 0x88, 0x6d, 0xfd, 0x69, 0x1b, 0xfa, 0xb6, 0x61, 0x69, 0x6e, 0xdf,
+	0x1d, 0xd1, 0x88, 0x7c, 0xc7, 0x6e, 0xa9, 0xaa, 0xe6, 0xfe, 0xd7, 0x00, 0xda, 0x71, 0xae, 0xc4,
+	0x7b, 0xed, 0x24, 0x29, 0xcb, 0x22, 0x68, 0x9a, 0x5c, 0x39, 0x99, 0x61, 0x14, 0xf4, 0x82, 0x41,
+	0x2b, 0xae, 0x4b, 0xf6, 0x10, 0x36, 0x16, 0x92, 0x1b, 0x22, 0x17, 0xdd, 0xf0, 0x76, 0x7b, 0xc1,
+	0x62, 0x22, 0xc7, 0xee, 0x43, 0x4b, 0x18, 0x99, 0x73, 0x9d, 0xb8, 0x71, 0xb4, 0xea, 0xfd, 0xb0,
+	0x04, 0x27, 0x89, 0x1b, 0xb3, 0x27, 0xb0, 0x69, 0x67, 0xd6, 0x61, 0x96, 0x72, 0x31, 0x32, 0x94,
+	0xeb, 0x68, 0xad, 0x17, 0x0c, 0xc2, 0xb8, 0xb3, 0xa0, 0x07, 0x1e, 0xf6, 0x2f, 0x57, 0xa1, 0x73,
+	0x60, 0x30, 0x71, 0x58, 0xaf, 0xd4, 0x87, 0x8e, 0x22, 0xae, 0xe5, 0x17, 0x72, 0xd5, 0xe4, 0xc0,
+	0x7f, 0xd7, 0x56, 0x74, 0x52, 0x32, 0x3f, 0xf9, 0x1e, 0x84, 0xa4, 0x51, 0x71, 0x27, 0xb4, 0x5f,
+	0x2c, 0x8c, 0x9b, 0x65, 0x7d, 0x2a, 0x34, 0x7b, 0x06, 0x5b, 0x58, 0x38, 0x34, 0x2a, 0x99, 0xf2,
+	0x5c, 0xc9, 0x82, 0x5b, 0x12, 0x13, 0x74, 0xd6, 0x2f, 0x18, 0xc6, 0x77, 0x6a, 0xf3, 0x4c, 0xc9,
+	0xe2, 0x43, 0x65, 0xb1, 0x6d, 0x08, 0x1d, 0x9a, 0x4c, 0xaa, 0x64, 0xba, 0xd8, 0x72, 0x59, 0xb3,
+	0x07, 0x00, 0x9f, 0xe5, 0x14, 0xf9, 0x94, 0xc4, 0xc4, 0x46, 0xeb, 0xde, 0x6d, 0x95, 0xe4, 0x6d,
+	0x09, 0xd8, 0x53, 0xb8, 0x85, 0x99, 0x76, 0x33, 0xae, 0x92, 0x0c, 0xad, 0x4e, 0x04, 0xda, 0xa8,
+	0xd1, 0x5b, 0x1d, 0xb4, 0xe2, 0x9b, 0x9e, 0x1f, 0x2f, 0x71, 0x99, 0x68, 0x95, 0x84, 0xe5, 0x19,
+	0xa5, 0x18, 0x35, 0xab, 0x44, 0x17, 0xec, 0x1d, 0xa5, 0xc8, 0x1e, 0xc3, 0xa6, 0x22, 0xae, 0xf0,
+	0x82, 0x4f, 0x70, 0x66, 0xa4, 0x1a, 0x45, 0xa1, 0x1f, 0xb8, 0xa1, 0xe8, 0x18, 0x2f, 0xde, 0x54,
+	0x8c, 0xed, 0x40, 0xdb, 0x8e, 0x65, 0x56, 0xe7, 0xda, 0xf2, 0xff, 0x81, 0x12, 0x55, 0xa1, 0xb2,
+	0x2d, 0x68, 0x48, 0xe2, 0xb9, 0x4c, 0x23, 0xe8, 0x05, 0x83, 0x4e, 0xbc, 0x2e, 0xe9, 0x4c, 0xa6,
+	0x0b, 0x3c, 0x92, 0x69, 0xd4, 0xae, 0xf1, 0x6b, 0x99, 0xf6, 0x7f, 0x05, 0x70, 0xfb, 0x60, 0x8c,
+	0x62, 0xa2, 0x49, 0x2a, 0x57, 0x3f, 0x03, 0x83, 0x35, 0x2c, 0x64, 0x9d, 0xbe, 0xd7, 0xff, 0x6b,
+	0xec, 0xfd, 0x17, 0xb0, 0x79, 0x62, 0x48, 0xa0, 0xb5, 0x87, 0xe8, 0x12, 0x39, 0xb5, 0xec, 0x11,
+	0x34, 0xb1, 0x40, 0xc1, 0x65, 0x5a, 0xdd, 0xc5, 0x3e, 0xcc, 0x7f, 0xec, 0x34, 0x5e, 0x15, 0x28,
+	0x8e, 0x0e, 0xe3, 0x46, 0x69, 0x1d, 0xa5, 0xfb, 0xa7, 0x57, 0xd7, 0xdd, 0x95, 0xef, 0xd7, 0xdd,
+	0x95, 0xcb, 0x79, 0x37, 0xb8, 0x9a, 0x77, 0x83, 0x6f, 0xf3, 0x6e, 0xf0, 0x73, 0xde, 0x0d, 0x3e,
+	0xbd, 0xfc, 0xd7, 0x83, 0xde, 0x5b, 0xaa, 0x8f, 0x2b, 0xe7, 0x0d, 0x7f, 0xab, 0xcf, 0x7f, 0x07,
+	0x00, 0x00, 0xff, 0xff, 0xb1, 0xca, 0x85, 0x39, 0x17, 0x04, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto b/vendor/github.com/containerd/containerd/linux/runctypes/runc.proto
similarity index 87%
rename from vendor/github.com/containerd/containerd/linux/runcopts/runc.proto
rename to vendor/github.com/containerd/containerd/linux/runctypes/runc.proto
index 3d10dc9..a73b1ca 100644
--- a/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto
+++ b/vendor/github.com/containerd/containerd/linux/runctypes/runc.proto
@@ -2,9 +2,9 @@
 
 package containerd.linux.runc;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 
-option go_package = "github.com/containerd/containerd/linux/runcopts;runcopts";
+option go_package = "github.com/containerd/containerd/linux/runctypes;runctypes";
 
 message RuncOptions {
 	string runtime = 1;
@@ -36,3 +36,7 @@
 	repeated string empty_namespaces = 6;
 	string cgroups_mode = 7;
 }
+
+message ProcessDetails {
+	string exec_id = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go
index 44219e4..82ed4f4 100644
--- a/vendor/github.com/containerd/containerd/linux/runtime.go
+++ b/vendor/github.com/containerd/containerd/linux/runtime.go
@@ -11,17 +11,18 @@
 	"time"
 
 	"github.com/boltdb/bolt"
-	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	eventstypes "github.com/containerd/containerd/api/events"
 	"github.com/containerd/containerd/api/types"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/events/exchange"
 	"github.com/containerd/containerd/identifiers"
-	"github.com/containerd/containerd/linux/runcopts"
-	client "github.com/containerd/containerd/linux/shim"
+	"github.com/containerd/containerd/linux/proc"
+	"github.com/containerd/containerd/linux/runctypes"
 	shim "github.com/containerd/containerd/linux/shim/v1"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/plugin"
@@ -30,7 +31,7 @@
 	"github.com/containerd/containerd/sys"
 	runc "github.com/containerd/go-runc"
 	"github.com/containerd/typeurl"
-	google_protobuf "github.com/golang/protobuf/ptypes/empty"
+	ptypes "github.com/gogo/protobuf/types"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -39,7 +40,7 @@
 
 var (
 	pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux")
-	empty    = &google_protobuf.Empty{}
+	empty    = &ptypes.Empty{}
 )
 
 const (
@@ -78,17 +79,6 @@
 	NoShim bool `toml:"no_shim"`
 	// Debug enable debug on the shim
 	ShimDebug bool `toml:"shim_debug"`
-	// ShimNoMountNS prevents the runtime from putting shims into their own mount namespace.
-	//
-	// Putting the shim in its own mount namespace ensure that any mounts made
-	// by it in order to get the task rootfs ready will be undone regardless
-	// on how the shim dies.
-	//
-	// NOTE: This should only be used in kernel older than 3.18 to avoid shims
-	// from causing a DoS in their parent namespace due to having a copy of
-	// mounts previously there which would prevent unlink, rename and remove
-	// operations on those mountpoints.
-	ShimNoMountNS bool `toml:"shim_no_newns"`
 }
 
 // New returns a configured runtime
@@ -193,7 +183,7 @@
 			if err != nil {
 				return nil, err
 			}
-			cgroup = v.(*runcopts.CreateOptions).ShimCgroup
+			cgroup = v.(*runctypes.CreateOptions).ShimCgroup
 		}
 		exitHandler := func() {
 			log.G(ctx).WithField("id", id).Info("shim reaped")
@@ -226,8 +216,7 @@
 				}).Warn("failed to clen up after killed shim")
 			}
 		}
-		shimopt = ShimRemote(r.config.Shim, r.address, cgroup,
-			r.config.ShimNoMountNS, r.config.ShimDebug, exitHandler)
+		shimopt = ShimRemote(r.config.Shim, r.address, cgroup, r.config.ShimDebug, exitHandler)
 	}
 
 	s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts)
@@ -242,14 +231,14 @@
 		}
 	}()
 
-	runtime := r.config.Runtime
+	rt := r.config.Runtime
 	if ropts != nil && ropts.Runtime != "" {
-		runtime = ropts.Runtime
+		rt = ropts.Runtime
 	}
 	sopts := &shim.CreateTaskRequest{
 		ID:         id,
 		Bundle:     bundle.path,
-		Runtime:    runtime,
+		Runtime:    rt,
 		Stdin:      opts.IO.Stdin,
 		Stdout:     opts.IO.Stdout,
 		Stderr:     opts.IO.Stderr,
@@ -268,7 +257,8 @@
 	if err != nil {
 		return nil, errdefs.FromGRPC(err)
 	}
-	t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor)
+	t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor, r.events,
+		proc.NewRunc(ropts.RuntimeRoot, sopts.Bundle, namespace, rt, ropts.CriuPath, ropts.SystemdCgroup))
 	if err != nil {
 		return nil, err
 	}
@@ -285,6 +275,20 @@
 			return nil, err
 		}
 	}
+	r.events.Publish(ctx, runtime.TaskCreateEventTopic, &eventstypes.TaskCreate{
+		ContainerID: sopts.ID,
+		Bundle:      sopts.Bundle,
+		Rootfs:      sopts.Rootfs,
+		IO: &eventstypes.TaskIO{
+			Stdin:    sopts.Stdin,
+			Stdout:   sopts.Stdout,
+			Stderr:   sopts.Stderr,
+			Terminal: sopts.Terminal,
+		},
+		Checkpoint: sopts.Checkpoint,
+		Pid:        uint32(t.pid),
+	})
+
 	return t, nil
 }
 
@@ -322,6 +326,12 @@
 	if err := bundle.Delete(); err != nil {
 		log.G(ctx).WithError(err).Error("failed to delete bundle")
 	}
+	r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{
+		ContainerID: lc.id,
+		ExitStatus:  rsp.ExitStatus,
+		ExitedAt:    rsp.ExitedAt,
+		Pid:         rsp.Pid,
+	})
 	return &runtime.Exit{
 		Status:    rsp.ExitStatus,
 		Timestamp: rsp.ExitedAt,
@@ -376,7 +386,8 @@
 			filepath.Join(r.state, ns, id),
 			filepath.Join(r.root, ns, id),
 		)
-		pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, client.InitPidFile))
+		ctx = namespaces.WithNamespace(ctx, ns)
+		pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, proc.InitPidFile))
 		s, err := bundle.NewShimClient(ctx, ns, ShimConnect(), nil)
 		if err != nil {
 			log.G(ctx).WithError(err).WithFields(logrus.Fields{
@@ -390,8 +401,15 @@
 			}
 			continue
 		}
+		ropts, err := r.getRuncOptions(ctx, id)
+		if err != nil {
+			log.G(ctx).WithError(err).WithField("id", id).
+				Error("get runtime options")
+			continue
+		}
 
-		t, err := newTask(id, ns, pid, s, r.monitor)
+		t, err := newTask(id, ns, pid, s, r.monitor, r.events,
+			proc.NewRunc(ropts.RuntimeRoot, bundle.path, ns, ropts.Runtime, ropts.CriuPath, ropts.SystemdCgroup))
 		if err != nil {
 			log.G(ctx).WithError(err).Error("loading task type")
 			continue
@@ -423,7 +441,7 @@
 
 	// Notify Client
 	exitedAt := time.Now().UTC()
-	r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventsapi.TaskExit{
+	r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventstypes.TaskExit{
 		ContainerID: id,
 		ID:          id,
 		Pid:         uint32(pid),
@@ -435,7 +453,7 @@
 		log.G(ctx).WithError(err).Error("delete bundle")
 	}
 
-	r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventsapi.TaskDelete{
+	r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventstypes.TaskDelete{
 		ContainerID: id,
 		Pid:         uint32(pid),
 		ExitStatus:  128 + uint32(unix.SIGKILL),
@@ -457,7 +475,7 @@
 	}); err != nil {
 		log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
 	}
-	if err := unix.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
+	if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
 		log.G(ctx).WithError(err).WithFields(logrus.Fields{
 			"path": bundle.path,
 			"id":   id,
@@ -474,7 +492,7 @@
 
 	var (
 		cmd  = r.config.Runtime
-		root = client.RuncRoot
+		root = proc.RuncRoot
 	)
 	if ropts != nil {
 		if ropts.Runtime != "" {
@@ -493,7 +511,7 @@
 	}, nil
 }
 
-func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runcopts.RuncOptions, error) {
+func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runctypes.RuncOptions, error) {
 	var container containers.Container
 
 	if err := r.db.View(func(tx *bolt.Tx) error {
@@ -510,12 +528,12 @@
 		if err != nil {
 			return nil, err
 		}
-		ropts, ok := v.(*runcopts.RuncOptions)
+		ropts, ok := v.(*runctypes.RuncOptions)
 		if !ok {
 			return nil, errors.New("invalid runtime options format")
 		}
 
 		return ropts, nil
 	}
-	return nil, nil
+	return &runctypes.RuncOptions{}, nil
 }
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client.go b/vendor/github.com/containerd/containerd/linux/shim/client/client.go
index 1cfe766..1fb949e 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client.go
@@ -4,7 +4,6 @@
 
 import (
 	"context"
-	"fmt"
 	"io"
 	"net"
 	"os"
@@ -18,6 +17,7 @@
 
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
+	"github.com/stevvooe/ttrpc"
 
 	"github.com/containerd/containerd/events"
 	"github.com/containerd/containerd/linux/shim"
@@ -25,18 +25,17 @@
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/reaper"
 	"github.com/containerd/containerd/sys"
-	google_protobuf "github.com/golang/protobuf/ptypes/empty"
-	"google.golang.org/grpc"
+	ptypes "github.com/gogo/protobuf/types"
 )
 
-var empty = &google_protobuf.Empty{}
+var empty = &ptypes.Empty{}
 
 // Opt is an option for a shim client configuration
-type Opt func(context.Context, shim.Config) (shimapi.ShimClient, io.Closer, error)
+type Opt func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error)
 
 // WithStart executes a new shim process
-func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) Opt {
-	return func(ctx context.Context, config shim.Config) (_ shimapi.ShimClient, _ io.Closer, err error) {
+func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHandler func()) Opt {
+	return func(ctx context.Context, config shim.Config) (_ shimapi.ShimService, _ io.Closer, err error) {
 		socket, err := newSocket(address)
 		if err != nil {
 			return nil, nil, err
@@ -48,7 +47,7 @@
 		}
 		defer f.Close()
 
-		cmd := newCommand(binary, daemonAddress, nonewns, debug, config, f)
+		cmd := newCommand(binary, daemonAddress, debug, config, f)
 		ec, err := reaper.Default.Start(cmd)
 		if err != nil {
 			return nil, nil, errors.Wrapf(err, "failed to start shim")
@@ -88,11 +87,16 @@
 	}
 }
 
-func newCommand(binary, daemonAddress string, nonewns, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
+func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
+	selfExe, err := os.Executable()
+	if err != nil {
+		panic(err)
+	}
 	args := []string{
 		"-namespace", config.Namespace,
 		"-workdir", config.WorkDir,
 		"-address", daemonAddress,
+		"-containerd-binary", selfExe,
 	}
 
 	if config.Criu != "" {
@@ -113,7 +117,7 @@
 	// make sure the shim can be re-parented to system init
 	// and is cloned in a new mount namespace because the overlay/filesystems
 	// will be mounted by the shim
-	cmd.SysProcAttr = getSysProcAttr(nonewns)
+	cmd.SysProcAttr = getSysProcAttr()
 	cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
 	if debug {
 		cmd.Stdout = os.Stdout
@@ -134,24 +138,8 @@
 	return l.(*net.UnixListener), nil
 }
 
-func connect(address string, d func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) {
-	gopts := []grpc.DialOption{
-		grpc.WithBlock(),
-		grpc.WithInsecure(),
-		grpc.WithTimeout(100 * time.Second),
-		grpc.WithDialer(d),
-		grpc.FailOnNonTempDialError(true),
-	}
-	conn, err := grpc.Dial(dialAddress(address), gopts...)
-	if err != nil {
-		return nil, errors.Wrapf(err, "failed to dial %q", address)
-	}
-	return conn, nil
-}
-
-func dialer(address string, timeout time.Duration) (net.Conn, error) {
-	address = strings.TrimPrefix(address, "unix://")
-	return net.DialTimeout("unix", address, timeout)
+func connect(address string, d func(string, time.Duration) (net.Conn, error)) (net.Conn, error) {
+	return d(address, 100*time.Second)
 }
 
 func annonDialer(address string, timeout time.Duration) (net.Conn, error) {
@@ -159,24 +147,20 @@
 	return net.DialTimeout("unix", "\x00"+address, timeout)
 }
 
-func dialAddress(address string) string {
-	return fmt.Sprintf("unix://%s", address)
-}
-
 // WithConnect connects to an existing shim
 func WithConnect(address string) Opt {
-	return func(ctx context.Context, config shim.Config) (shimapi.ShimClient, io.Closer, error) {
+	return func(ctx context.Context, config shim.Config) (shimapi.ShimService, io.Closer, error) {
 		conn, err := connect(address, annonDialer)
 		if err != nil {
 			return nil, nil, err
 		}
-		return shimapi.NewShimClient(conn), conn, nil
+		return shimapi.NewShimClient(ttrpc.NewClient(conn)), conn, nil
 	}
 }
 
 // WithLocal uses an in process shim
-func WithLocal(publisher events.Publisher) func(context.Context, shim.Config) (shimapi.ShimClient, io.Closer, error) {
-	return func(ctx context.Context, config shim.Config) (shimapi.ShimClient, io.Closer, error) {
+func WithLocal(publisher events.Publisher) func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error) {
+	return func(ctx context.Context, config shim.Config) (shimapi.ShimService, io.Closer, error) {
 		service, err := shim.NewService(config, publisher)
 		if err != nil {
 			return nil, nil, err
@@ -192,15 +176,15 @@
 		return nil, err
 	}
 	return &Client{
-		ShimClient: s,
-		c:          c,
-		exitCh:     make(chan struct{}),
+		ShimService: s,
+		c:           c,
+		exitCh:      make(chan struct{}),
 	}, nil
 }
 
 // Client is a shim client containing the connection to a shim
 type Client struct {
-	shimapi.ShimClient
+	shimapi.ShimService
 
 	c        io.Closer
 	exitCh   chan struct{}
@@ -212,10 +196,9 @@
 func (c *Client) IsAlive(ctx context.Context) (bool, error) {
 	_, err := c.ShimInfo(ctx, empty)
 	if err != nil {
-		if err != grpc.ErrServerStopped {
-			return false, err
-		}
-		return false, nil
+		// TODO(stevvooe): There are some error conditions that need to be
+		// handle with unix sockets existence to give the right answer here.
+		return false, err
 	}
 	return true, nil
 }
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
index 03ebba0..3125541 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
@@ -10,14 +10,10 @@
 	"github.com/pkg/errors"
 )
 
-func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
-	attr := syscall.SysProcAttr{
+func getSysProcAttr() *syscall.SysProcAttr {
+	return &syscall.SysProcAttr{
 		Setpgid: true,
 	}
-	if !nonewns {
-		attr.Cloneflags = syscall.CLONE_NEWNS
-	}
-	return &attr
 }
 
 func setCgroup(cgroupPath string, cmd *exec.Cmd) error {
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
index b34cf4d..0a24ce4 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
@@ -7,7 +7,7 @@
 	"syscall"
 )
 
-func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
+func getSysProcAttr() *syscall.SysProcAttr {
 	return &syscall.SysProcAttr{
 		Setpgid: true,
 	}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/local.go b/vendor/github.com/containerd/containerd/linux/shim/local.go
index 5e5634d..6e21926 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/local.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/local.go
@@ -3,17 +3,16 @@
 package shim
 
 import (
+	"context"
 	"path/filepath"
 
 	shimapi "github.com/containerd/containerd/linux/shim/v1"
-	google_protobuf "github.com/golang/protobuf/ptypes/empty"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-	"google.golang.org/grpc"
+	"github.com/containerd/containerd/mount"
+	ptypes "github.com/gogo/protobuf/types"
 )
 
 // NewLocal returns a shim client implementation for issue commands to a shim
-func NewLocal(s *Service) shimapi.ShimClient {
+func NewLocal(s *Service) shimapi.ShimService {
 	return &local{
 		s: s,
 	}
@@ -23,70 +22,70 @@
 	s *Service
 }
 
-func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest, opts ...grpc.CallOption) (*shimapi.CreateTaskResponse, error) {
+func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) {
 	return c.s.Create(ctx, in)
 }
 
-func (c *local) Start(ctx context.Context, in *shimapi.StartRequest, opts ...grpc.CallOption) (*shimapi.StartResponse, error) {
+func (c *local) Start(ctx context.Context, in *shimapi.StartRequest) (*shimapi.StartResponse, error) {
 	return c.s.Start(ctx, in)
 }
 
-func (c *local) Delete(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
+func (c *local) Delete(ctx context.Context, in *ptypes.Empty) (*shimapi.DeleteResponse, error) {
 	// make sure we unmount the containers rootfs for this local
-	if err := unix.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
+	if err := mount.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
 		return nil, err
 	}
 	return c.s.Delete(ctx, in)
 }
 
-func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
+func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) {
 	return c.s.DeleteProcess(ctx, in)
 }
 
-func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest) (*ptypes.Empty, error) {
 	return c.s.Exec(ctx, in)
 }
 
-func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest) (*ptypes.Empty, error) {
 	return c.s.ResizePty(ctx, in)
 }
 
-func (c *local) State(ctx context.Context, in *shimapi.StateRequest, opts ...grpc.CallOption) (*shimapi.StateResponse, error) {
+func (c *local) State(ctx context.Context, in *shimapi.StateRequest) (*shimapi.StateResponse, error) {
 	return c.s.State(ctx, in)
 }
 
-func (c *local) Pause(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) Pause(ctx context.Context, in *ptypes.Empty) (*ptypes.Empty, error) {
 	return c.s.Pause(ctx, in)
 }
 
-func (c *local) Resume(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) Resume(ctx context.Context, in *ptypes.Empty) (*ptypes.Empty, error) {
 	return c.s.Resume(ctx, in)
 }
 
-func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest) (*ptypes.Empty, error) {
 	return c.s.Kill(ctx, in)
 }
 
-func (c *local) ListPids(ctx context.Context, in *shimapi.ListPidsRequest, opts ...grpc.CallOption) (*shimapi.ListPidsResponse, error) {
+func (c *local) ListPids(ctx context.Context, in *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) {
 	return c.s.ListPids(ctx, in)
 }
 
-func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest) (*ptypes.Empty, error) {
 	return c.s.CloseIO(ctx, in)
 }
 
-func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) {
 	return c.s.Checkpoint(ctx, in)
 }
 
-func (c *local) ShimInfo(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.ShimInfoResponse, error) {
+func (c *local) ShimInfo(ctx context.Context, in *ptypes.Empty) (*shimapi.ShimInfoResponse, error) {
 	return c.s.ShimInfo(ctx, in)
 }
 
-func (c *local) Update(ctx context.Context, in *shimapi.UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+func (c *local) Update(ctx context.Context, in *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) {
 	return c.s.Update(ctx, in)
 }
 
-func (c *local) Wait(ctx context.Context, in *shimapi.WaitRequest, opts ...grpc.CallOption) (*shimapi.WaitResponse, error) {
+func (c *local) Wait(ctx context.Context, in *shimapi.WaitRequest) (*shimapi.WaitResponse, error) {
 	return c.s.Wait(ctx, in)
 }
diff --git a/vendor/github.com/containerd/containerd/linux/shim/service.go b/vendor/github.com/containerd/containerd/linux/shim/service.go
index 7b5c5e1..1150d1c 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/service.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/service.go
@@ -3,34 +3,33 @@
 package shim
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"sync"
 
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-
 	"github.com/containerd/console"
-	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	eventstypes "github.com/containerd/containerd/api/events"
 	"github.com/containerd/containerd/api/types/task"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/linux/proc"
+	"github.com/containerd/containerd/linux/runctypes"
 	shimapi "github.com/containerd/containerd/linux/shim/v1"
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/namespaces"
 	"github.com/containerd/containerd/reaper"
 	"github.com/containerd/containerd/runtime"
 	runc "github.com/containerd/go-runc"
-	google_protobuf "github.com/golang/protobuf/ptypes/empty"
+	"github.com/containerd/typeurl"
+	ptypes "github.com/gogo/protobuf/types"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
-	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
 )
 
-var empty = &google_protobuf.Empty{}
-
-// RuncRoot is the path to the root runc state directory
-const RuncRoot = "/run/containerd/runc"
+var empty = &ptypes.Empty{}
 
 // Config contains shim specific configuration
 type Config struct {
@@ -47,16 +46,16 @@
 	if config.Namespace == "" {
 		return nil, fmt.Errorf("shim namespace cannot be empty")
 	}
-	context := namespaces.WithNamespace(context.Background(), config.Namespace)
-	context = log.WithLogger(context, logrus.WithFields(logrus.Fields{
+	ctx := namespaces.WithNamespace(context.Background(), config.Namespace)
+	ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{
 		"namespace": config.Namespace,
 		"path":      config.Path,
 		"pid":       os.Getpid(),
 	}))
 	s := &Service{
 		config:    config,
-		context:   context,
-		processes: make(map[string]process),
+		context:   ctx,
+		processes: make(map[string]proc.Process),
 		events:    make(chan interface{}, 128),
 		ec:        reaper.Default.Subscribe(),
 	}
@@ -68,23 +67,15 @@
 	return s, nil
 }
 
-// platform handles platform-specific behavior that may differs across
-// platform implementations
-type platform interface {
-	copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error)
-	shutdownConsole(ctx context.Context, console console.Console) error
-	close() error
-}
-
 // Service is the shim implementation of a remote shim over GRPC
 type Service struct {
 	mu sync.Mutex
 
 	config    Config
 	context   context.Context
-	processes map[string]process
+	processes map[string]proc.Process
 	events    chan interface{}
-	platform  platform
+	platform  proc.Platform
 	ec        chan runc.Exit
 
 	// Filled by Create()
@@ -96,7 +87,29 @@
 func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	process, err := s.newInitProcess(ctx, r)
+	process, err := proc.New(
+		ctx,
+		s.config.Path,
+		s.config.WorkDir,
+		s.config.RuntimeRoot,
+		s.config.Namespace,
+		s.config.Criu,
+		s.config.SystemdCgroup,
+		s.platform,
+		&proc.CreateConfig{
+			ID:               r.ID,
+			Bundle:           r.Bundle,
+			Runtime:          r.Runtime,
+			Rootfs:           r.Rootfs,
+			Terminal:         r.Terminal,
+			Stdin:            r.Stdin,
+			Stdout:           r.Stdout,
+			Stderr:           r.Stderr,
+			Checkpoint:       r.Checkpoint,
+			ParentCheckpoint: r.ParentCheckpoint,
+			Options:          r.Options,
+		},
+	)
 	if err != nil {
 		return nil, errdefs.ToGRPC(err)
 	}
@@ -105,19 +118,6 @@
 	s.bundle = r.Bundle
 	pid := process.Pid()
 	s.processes[r.ID] = process
-	s.events <- &eventsapi.TaskCreate{
-		ContainerID: r.ID,
-		Bundle:      r.Bundle,
-		Rootfs:      r.Rootfs,
-		IO: &eventsapi.TaskIO{
-			Stdin:    r.Stdin,
-			Stdout:   r.Stdout,
-			Stderr:   r.Stderr,
-			Terminal: r.Terminal,
-		},
-		Checkpoint: r.Checkpoint,
-		Pid:        uint32(pid),
-	}
 	return &shimapi.CreateTaskResponse{
 		Pid: uint32(pid),
 	}, nil
@@ -134,19 +134,6 @@
 	if err := p.Start(ctx); err != nil {
 		return nil, err
 	}
-	if r.ID == s.id {
-		s.events <- &eventsapi.TaskStart{
-			ContainerID: s.id,
-			Pid:         uint32(p.Pid()),
-		}
-	} else {
-		pid := p.Pid()
-		s.events <- &eventsapi.TaskExecStarted{
-			ContainerID: s.id,
-			ExecID:      r.ID,
-			Pid:         uint32(pid),
-		}
-	}
 	return &shimapi.StartResponse{
 		ID:  p.ID(),
 		Pid: uint32(p.Pid()),
@@ -154,25 +141,18 @@
 }
 
 // Delete the initial process and container
-func (s *Service) Delete(ctx context.Context, r *google_protobuf.Empty) (*shimapi.DeleteResponse, error) {
+func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	p := s.processes[s.id]
 	if p == nil {
 		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
-
 	if err := p.Delete(ctx); err != nil {
 		return nil, err
 	}
 	delete(s.processes, s.id)
-	s.platform.close()
-	s.events <- &eventsapi.TaskDelete{
-		ContainerID: s.id,
-		ExitStatus:  uint32(p.ExitStatus()),
-		ExitedAt:    p.ExitedAt(),
-		Pid:         uint32(p.Pid()),
-	}
+	s.platform.Close()
 	return &shimapi.DeleteResponse{
 		ExitStatus: uint32(p.ExitStatus()),
 		ExitedAt:   p.ExitedAt(),
@@ -185,7 +165,7 @@
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if r.ID == s.id {
-		return nil, grpc.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess")
+		return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess")
 	}
 	p := s.processes[r.ID]
 	if p == nil {
@@ -203,7 +183,7 @@
 }
 
 // Exec an additional process inside the container
-func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*google_protobuf.Empty, error) {
+func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 
@@ -216,21 +196,23 @@
 		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
 
-	process, err := newExecProcess(ctx, s.config.Path, r, p.(*initProcess), r.ID)
+	process, err := p.(*proc.Init).Exec(ctx, s.config.Path, &proc.ExecConfig{
+		ID:       r.ID,
+		Terminal: r.Terminal,
+		Stdin:    r.Stdin,
+		Stdout:   r.Stdout,
+		Stderr:   r.Stderr,
+		Spec:     r.Spec,
+	})
 	if err != nil {
 		return nil, errdefs.ToGRPC(err)
 	}
 	s.processes[r.ID] = process
-
-	s.events <- &eventsapi.TaskExecAdded{
-		ContainerID: s.id,
-		ExecID:      r.ID,
-	}
 	return empty, nil
 }
 
 // ResizePty of a process
-func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*google_protobuf.Empty, error) {
+func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if r.ID == "" {
@@ -281,51 +263,45 @@
 		Bundle:     s.bundle,
 		Pid:        uint32(p.Pid()),
 		Status:     status,
-		Stdin:      sio.stdin,
-		Stdout:     sio.stdout,
-		Stderr:     sio.stderr,
-		Terminal:   sio.terminal,
+		Stdin:      sio.Stdin,
+		Stdout:     sio.Stdout,
+		Stderr:     sio.Stderr,
+		Terminal:   sio.Terminal,
 		ExitStatus: uint32(p.ExitStatus()),
 		ExitedAt:   p.ExitedAt(),
 	}, nil
 }
 
 // Pause the container
-func (s *Service) Pause(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
+func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	p := s.processes[s.id]
 	if p == nil {
 		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
-	if err := p.(*initProcess).Pause(ctx); err != nil {
+	if err := p.(*proc.Init).Pause(ctx); err != nil {
 		return nil, err
 	}
-	s.events <- &eventsapi.TaskPaused{
-		ContainerID: s.id,
-	}
 	return empty, nil
 }
 
 // Resume the container
-func (s *Service) Resume(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
+func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	p := s.processes[s.id]
 	if p == nil {
 		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
-	if err := p.(*initProcess).Resume(ctx); err != nil {
+	if err := p.(*proc.Init).Resume(ctx); err != nil {
 		return nil, err
 	}
-	s.events <- &eventsapi.TaskResumed{
-		ContainerID: s.id,
-	}
 	return empty, nil
 }
 
 // Kill a process with the provided signal
-func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_protobuf.Empty, error) {
+func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if r.ID == "" {
@@ -357,9 +333,23 @@
 	}
 	var processes []*task.ProcessInfo
 	for _, pid := range pids {
-		processes = append(processes, &task.ProcessInfo{
+		pInfo := task.ProcessInfo{
 			Pid: pid,
-		})
+		}
+		for _, p := range s.processes {
+			if p.Pid() == int(pid) {
+				d := &runctypes.ProcessDetails{
+					ExecID: p.ID(),
+				}
+				a, err := typeurl.MarshalAny(d)
+				if err != nil {
+					return nil, errors.Wrapf(err, "failed to marshal process %d info", pid)
+				}
+				pInfo.Info = a
+				break
+			}
+		}
+		processes = append(processes, &pInfo)
 	}
 	return &shimapi.ListPidsResponse{
 		Processes: processes,
@@ -367,7 +357,7 @@
 }
 
 // CloseIO of a process
-func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*google_protobuf.Empty, error) {
+func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	p := s.processes[r.ID]
@@ -383,38 +373,38 @@
 }
 
 // Checkpoint the container
-func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*google_protobuf.Empty, error) {
+func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	p := s.processes[s.id]
 	if p == nil {
 		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
-	if err := p.(*initProcess).Checkpoint(ctx, r); err != nil {
+	if err := p.(*proc.Init).Checkpoint(ctx, &proc.CheckpointConfig{
+		Path:    r.Path,
+		Options: r.Options,
+	}); err != nil {
 		return nil, errdefs.ToGRPC(err)
 	}
-	s.events <- &eventsapi.TaskCheckpointed{
-		ContainerID: s.id,
-	}
 	return empty, nil
 }
 
 // ShimInfo returns shim information such as the shim's pid
-func (s *Service) ShimInfo(ctx context.Context, r *google_protobuf.Empty) (*shimapi.ShimInfoResponse, error) {
+func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) {
 	return &shimapi.ShimInfoResponse{
 		ShimPid: uint32(os.Getpid()),
 	}, nil
 }
 
 // Update a running container
-func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*google_protobuf.Empty, error) {
+func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	p := s.processes[s.id]
 	if p == nil {
 		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
-	if err := p.(*initProcess).Update(ctx, r); err != nil {
+	if err := p.(*proc.Init).Update(ctx, r.Resources); err != nil {
 		return nil, errdefs.ToGRPC(err)
 	}
 	return empty, nil
@@ -447,15 +437,15 @@
 	defer s.mu.Unlock()
 	for _, p := range s.processes {
 		if p.Pid() == e.Pid {
-			if ip, ok := p.(*initProcess); ok {
+			if ip, ok := p.(*proc.Init); ok {
 				// Ensure all children are killed
-				if err := ip.killAll(s.context); err != nil {
+				if err := ip.KillAll(s.context); err != nil {
 					log.G(s.context).WithError(err).WithField("id", ip.ID()).
 						Error("failed to kill init's children")
 				}
 			}
 			p.SetExited(e.Status)
-			s.events <- &eventsapi.TaskExit{
+			s.events <- &eventstypes.TaskExit{
 				ContainerID: s.id,
 				ID:          p.ID(),
 				Pid:         uint32(e.Pid),
@@ -475,7 +465,7 @@
 		return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created")
 	}
 
-	ps, err := p.(*initProcess).runtime.Ps(ctx, id)
+	ps, err := p.(*proc.Init).Runtime().Ps(ctx, id)
 	if err != nil {
 		return nil, err
 	}
@@ -489,32 +479,32 @@
 func (s *Service) forward(publisher events.Publisher) {
 	for e := range s.events {
 		if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil {
-			logrus.WithError(err).Error("post event")
+			log.G(s.context).WithError(err).Error("post event")
 		}
 	}
 }
 
 func getTopic(ctx context.Context, e interface{}) string {
 	switch e.(type) {
-	case *eventsapi.TaskCreate:
+	case *eventstypes.TaskCreate:
 		return runtime.TaskCreateEventTopic
-	case *eventsapi.TaskStart:
+	case *eventstypes.TaskStart:
 		return runtime.TaskStartEventTopic
-	case *eventsapi.TaskOOM:
+	case *eventstypes.TaskOOM:
 		return runtime.TaskOOMEventTopic
-	case *eventsapi.TaskExit:
+	case *eventstypes.TaskExit:
 		return runtime.TaskExitEventTopic
-	case *eventsapi.TaskDelete:
+	case *eventstypes.TaskDelete:
 		return runtime.TaskDeleteEventTopic
-	case *eventsapi.TaskExecAdded:
+	case *eventstypes.TaskExecAdded:
 		return runtime.TaskExecAddedEventTopic
-	case *eventsapi.TaskExecStarted:
+	case *eventstypes.TaskExecStarted:
 		return runtime.TaskExecStartedEventTopic
-	case *eventsapi.TaskPaused:
+	case *eventstypes.TaskPaused:
 		return runtime.TaskPausedEventTopic
-	case *eventsapi.TaskResumed:
+	case *eventstypes.TaskResumed:
 		return runtime.TaskResumedEventTopic
-	case *eventsapi.TaskCheckpointed:
+	case *eventstypes.TaskCheckpointed:
 		return runtime.TaskCheckpointedEventTopic
 	default:
 		logrus.Warnf("no topic for type %#v", e)
diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_linux.go b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go
index 1d078ba..bbe9d18 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/service_linux.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go
@@ -1,6 +1,7 @@
 package shim
 
 import (
+	"context"
 	"io"
 	"sync"
 	"syscall"
@@ -8,14 +9,13 @@
 	"github.com/containerd/console"
 	"github.com/containerd/fifo"
 	"github.com/pkg/errors"
-	"golang.org/x/net/context"
 )
 
 type linuxPlatform struct {
 	epoller *console.Epoller
 }
 
-func (p *linuxPlatform) copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
+func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
 	if p.epoller == nil {
 		return nil, errors.New("uninitialized epoller")
 	}
@@ -58,7 +58,7 @@
 	return epollConsole, nil
 }
 
-func (p *linuxPlatform) shutdownConsole(ctx context.Context, cons console.Console) error {
+func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error {
 	if p.epoller == nil {
 		return errors.New("uninitialized epoller")
 	}
@@ -69,7 +69,7 @@
 	return epollConsole.Shutdown(p.epoller.CloseConsole)
 }
 
-func (p *linuxPlatform) close() error {
+func (p *linuxPlatform) Close() error {
 	return p.epoller.Close()
 }
 
diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_unix.go b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go
index c00b853..d4419e5 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/service_unix.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go
@@ -3,19 +3,19 @@
 package shim
 
 import (
+	"context"
 	"io"
 	"sync"
 	"syscall"
 
 	"github.com/containerd/console"
 	"github.com/containerd/fifo"
-	"golang.org/x/net/context"
 )
 
 type unixPlatform struct {
 }
 
-func (p *unixPlatform) copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
+func (p *unixPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
 	if stdin != "" {
 		in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
 		if err != nil {
@@ -48,11 +48,11 @@
 	return console, nil
 }
 
-func (p *unixPlatform) shutdownConsole(ctx context.Context, cons console.Console) error {
+func (p *unixPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error {
 	return nil
 }
 
-func (p *unixPlatform) close() error {
+func (p *unixPlatform) Close() error {
 	return nil
 }
 
diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go
index 831d091..fd4e32e 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/linux/shim/v1/shim.proto
-// DO NOT EDIT!
 
 /*
 	Package shim is a generated protocol buffer package.
@@ -36,24 +35,23 @@
 import fmt "fmt"
 import math "math"
 import google_protobuf "github.com/gogo/protobuf/types"
-import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
-import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/gogo/protobuf/types"
 import containerd_types "github.com/containerd/containerd/api/types"
 import containerd_v1_types "github.com/containerd/containerd/api/types/task"
 
 import time "time"
 
-import (
-	context "golang.org/x/net/context"
-	grpc "google.golang.org/grpc"
-)
-
 import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
 
 import strings "strings"
 import reflect "reflect"
 
+import context "context"
+import github_com_stevvooe_ttrpc "github.com/stevvooe/ttrpc"
+
 import io "io"
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -283,578 +281,6 @@
 	proto.RegisterType((*WaitRequest)(nil), "containerd.runtime.linux.shim.v1.WaitRequest")
 	proto.RegisterType((*WaitResponse)(nil), "containerd.runtime.linux.shim.v1.WaitResponse")
 }
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for Shim service
-
-type ShimClient interface {
-	// State returns shim and task state information.
-	State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error)
-	Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error)
-	Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error)
-	Delete(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*DeleteResponse, error)
-	DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error)
-	ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error)
-	Pause(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	Resume(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	// ShimInfo returns information about the shim.
-	ShimInfo(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ShimInfoResponse, error)
-	Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
-	Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error)
-}
-
-type shimClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewShimClient(cc *grpc.ClientConn) ShimClient {
-	return &shimClient{cc}
-}
-
-func (c *shimClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) {
-	out := new(StateResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/State", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) {
-	out := new(CreateTaskResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Create", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
-	out := new(StartResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Start", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Delete(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*DeleteResponse, error) {
-	out := new(DeleteResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Delete", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) {
-	out := new(DeleteResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/DeleteProcess", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) {
-	out := new(ListPidsResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ListPids", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Pause(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Pause", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Resume(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Resume", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Checkpoint", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Kill", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Exec", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ResizePty", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/CloseIO", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) ShimInfo(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ShimInfoResponse, error) {
-	out := new(ShimInfoResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ShimInfo", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
-	out := new(google_protobuf1.Empty)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Update", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *shimClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) {
-	out := new(WaitResponse)
-	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Wait", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Server API for Shim service
-
-type ShimServer interface {
-	// State returns shim and task state information.
-	State(context.Context, *StateRequest) (*StateResponse, error)
-	Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
-	Start(context.Context, *StartRequest) (*StartResponse, error)
-	Delete(context.Context, *google_protobuf1.Empty) (*DeleteResponse, error)
-	DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error)
-	ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
-	Pause(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
-	Resume(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
-	Checkpoint(context.Context, *CheckpointTaskRequest) (*google_protobuf1.Empty, error)
-	Kill(context.Context, *KillRequest) (*google_protobuf1.Empty, error)
-	Exec(context.Context, *ExecProcessRequest) (*google_protobuf1.Empty, error)
-	ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf1.Empty, error)
-	CloseIO(context.Context, *CloseIORequest) (*google_protobuf1.Empty, error)
-	// ShimInfo returns information about the shim.
-	ShimInfo(context.Context, *google_protobuf1.Empty) (*ShimInfoResponse, error)
-	Update(context.Context, *UpdateTaskRequest) (*google_protobuf1.Empty, error)
-	Wait(context.Context, *WaitRequest) (*WaitResponse, error)
-}
-
-func RegisterShimServer(s *grpc.Server, srv ShimServer) {
-	s.RegisterService(&_Shim_serviceDesc, srv)
-}
-
-func _Shim_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(StateRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).State(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/State",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).State(ctx, req.(*StateRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CreateTaskRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Create(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Create",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Create(ctx, req.(*CreateTaskRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(StartRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Start(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Start",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Start(ctx, req.(*StartRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(google_protobuf1.Empty)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Delete(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Delete",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Delete(ctx, req.(*google_protobuf1.Empty))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(DeleteProcessRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).DeleteProcess(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/DeleteProcess",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).DeleteProcess(ctx, req.(*DeleteProcessRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(ListPidsRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).ListPids(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ListPids",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).ListPids(ctx, req.(*ListPidsRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(google_protobuf1.Empty)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Pause(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Pause",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Pause(ctx, req.(*google_protobuf1.Empty))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(google_protobuf1.Empty)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Resume(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Resume",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Resume(ctx, req.(*google_protobuf1.Empty))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CheckpointTaskRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Checkpoint(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Checkpoint",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Checkpoint(ctx, req.(*CheckpointTaskRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(KillRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Kill(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Kill",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Kill(ctx, req.(*KillRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(ExecProcessRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Exec(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Exec",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Exec(ctx, req.(*ExecProcessRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(ResizePtyRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).ResizePty(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ResizePty",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).ResizePty(ctx, req.(*ResizePtyRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CloseIORequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).CloseIO(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/CloseIO",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).CloseIO(ctx, req.(*CloseIORequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_ShimInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(google_protobuf1.Empty)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).ShimInfo(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ShimInfo",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).ShimInfo(ctx, req.(*google_protobuf1.Empty))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(UpdateTaskRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Update(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Update",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Update(ctx, req.(*UpdateTaskRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Shim_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(WaitRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ShimServer).Wait(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Wait",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ShimServer).Wait(ctx, req.(*WaitRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Shim_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "containerd.runtime.linux.shim.v1.Shim",
-	HandlerType: (*ShimServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "State",
-			Handler:    _Shim_State_Handler,
-		},
-		{
-			MethodName: "Create",
-			Handler:    _Shim_Create_Handler,
-		},
-		{
-			MethodName: "Start",
-			Handler:    _Shim_Start_Handler,
-		},
-		{
-			MethodName: "Delete",
-			Handler:    _Shim_Delete_Handler,
-		},
-		{
-			MethodName: "DeleteProcess",
-			Handler:    _Shim_DeleteProcess_Handler,
-		},
-		{
-			MethodName: "ListPids",
-			Handler:    _Shim_ListPids_Handler,
-		},
-		{
-			MethodName: "Pause",
-			Handler:    _Shim_Pause_Handler,
-		},
-		{
-			MethodName: "Resume",
-			Handler:    _Shim_Resume_Handler,
-		},
-		{
-			MethodName: "Checkpoint",
-			Handler:    _Shim_Checkpoint_Handler,
-		},
-		{
-			MethodName: "Kill",
-			Handler:    _Shim_Kill_Handler,
-		},
-		{
-			MethodName: "Exec",
-			Handler:    _Shim_Exec_Handler,
-		},
-		{
-			MethodName: "ResizePty",
-			Handler:    _Shim_ResizePty_Handler,
-		},
-		{
-			MethodName: "CloseIO",
-			Handler:    _Shim_CloseIO_Handler,
-		},
-		{
-			MethodName: "ShimInfo",
-			Handler:    _Shim_ShimInfo_Handler,
-		},
-		{
-			MethodName: "Update",
-			Handler:    _Shim_Update_Handler,
-		},
-		{
-			MethodName: "Wait",
-			Handler:    _Shim_Wait_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "github.com/containerd/containerd/linux/shim/v1/shim.proto",
-}
-
 func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -1575,24 +1001,6 @@
 	return i, nil
 }
 
-func encodeFixed64Shim(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Shim(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintShim(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -2169,6 +1577,280 @@
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 }
+
+type ShimService interface {
+	State(ctx context.Context, req *StateRequest) (*StateResponse, error)
+	Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error)
+	Start(ctx context.Context, req *StartRequest) (*StartResponse, error)
+	Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error)
+	DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error)
+	ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error)
+	Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
+	Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
+	Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error)
+	Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error)
+	Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error)
+	ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error)
+	CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error)
+	ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error)
+	Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error)
+	Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error)
+}
+
+func RegisterShimService(srv *github_com_stevvooe_ttrpc.Server, svc ShimService) {
+	srv.Register("containerd.runtime.linux.shim.v1.Shim", map[string]github_com_stevvooe_ttrpc.Method{
+		"State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req StateRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.State(ctx, &req)
+		},
+		"Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req CreateTaskRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Create(ctx, &req)
+		},
+		"Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req StartRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Start(ctx, &req)
+		},
+		"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req google_protobuf1.Empty
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Delete(ctx, &req)
+		},
+		"DeleteProcess": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req DeleteProcessRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.DeleteProcess(ctx, &req)
+		},
+		"ListPids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req ListPidsRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.ListPids(ctx, &req)
+		},
+		"Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req google_protobuf1.Empty
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Pause(ctx, &req)
+		},
+		"Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req google_protobuf1.Empty
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Resume(ctx, &req)
+		},
+		"Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req CheckpointTaskRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Checkpoint(ctx, &req)
+		},
+		"Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req KillRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Kill(ctx, &req)
+		},
+		"Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req ExecProcessRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Exec(ctx, &req)
+		},
+		"ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req ResizePtyRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.ResizePty(ctx, &req)
+		},
+		"CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req CloseIORequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.CloseIO(ctx, &req)
+		},
+		"ShimInfo": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req google_protobuf1.Empty
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.ShimInfo(ctx, &req)
+		},
+		"Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req UpdateTaskRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Update(ctx, &req)
+		},
+		"Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
+			var req WaitRequest
+			if err := unmarshal(&req); err != nil {
+				return nil, err
+			}
+			return svc.Wait(ctx, &req)
+		},
+	})
+}
+
+type shimClient struct {
+	client *github_com_stevvooe_ttrpc.Client
+}
+
+func NewShimClient(client *github_com_stevvooe_ttrpc.Client) ShimService {
+	return &shimClient{
+		client: client,
+	}
+}
+
+func (c *shimClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) {
+	var resp StateResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "State", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) {
+	var resp CreateTaskResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Create", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) {
+	var resp StartResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Start", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Delete(ctx context.Context, req *google_protobuf1.Empty) (*DeleteResponse, error) {
+	var resp DeleteResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Delete", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) {
+	var resp DeleteResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "DeleteProcess", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) {
+	var resp ListPidsResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ListPids", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Pause(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Pause", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Resume(ctx context.Context, req *google_protobuf1.Empty) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Resume", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Checkpoint", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Kill(ctx context.Context, req *KillRequest) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Kill", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Exec(ctx context.Context, req *ExecProcessRequest) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Exec", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ResizePty", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) CloseIO(ctx context.Context, req *CloseIORequest) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "CloseIO", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) ShimInfo(ctx context.Context, req *google_protobuf1.Empty) (*ShimInfoResponse, error) {
+	var resp ShimInfoResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "ShimInfo", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Update(ctx context.Context, req *UpdateTaskRequest) (*google_protobuf1.Empty, error) {
+	var resp google_protobuf1.Empty
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Update", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
+
+func (c *shimClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) {
+	var resp WaitResponse
+	if err := c.client.Call(ctx, "containerd.runtime.linux.shim.v1.Shim", "Wait", req, &resp); err != nil {
+		return nil, err
+	}
+	return &resp, nil
+}
 func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -4674,76 +4356,76 @@
 }
 
 var fileDescriptorShim = []byte{
-	// 1131 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4f, 0x4f, 0x1b, 0x47,
-	0x14, 0x67, 0x8d, 0xff, 0x3e, 0xc7, 0x14, 0xa6, 0x84, 0x6e, 0x1c, 0xc9, 0x58, 0x2b, 0x35, 0xa2,
-	0xaa, 0xb2, 0x2e, 0xa6, 0x4a, 0x9a, 0x56, 0x8a, 0x04, 0x24, 0xaa, 0x50, 0x8b, 0x82, 0x16, 0xd2,
-	0x54, 0xad, 0x2a, 0xb4, 0x78, 0x07, 0x7b, 0x84, 0xbd, 0xb3, 0xd9, 0x99, 0xa5, 0xd0, 0x53, 0x4f,
-	0x3d, 0xf7, 0xe3, 0xf4, 0x23, 0x70, 0xc8, 0xa1, 0xc7, 0x9e, 0xd2, 0xc6, 0xf7, 0x7e, 0x87, 0x68,
-	0xfe, 0x18, 0xaf, 0x6d, 0x36, 0xbb, 0xe6, 0x82, 0xf7, 0xcd, 0xfc, 0xde, 0xcc, 0x9b, 0xf7, 0xfb,
-	0xcd, 0x7b, 0x03, 0x3c, 0xe9, 0x12, 0xde, 0x8b, 0x4e, 0xec, 0x0e, 0x1d, 0xb4, 0x3a, 0xd4, 0xe7,
-	0x2e, 0xf1, 0x71, 0xe8, 0xc5, 0x3f, 0xfb, 0xc4, 0x8f, 0x2e, 0x5a, 0xac, 0x47, 0x06, 0xad, 0xf3,
-	0x4d, 0xf9, 0x6b, 0x07, 0x21, 0xe5, 0x14, 0x35, 0xc7, 0x20, 0x3b, 0x8c, 0x7c, 0x4e, 0x06, 0xd8,
-	0x96, 0x60, 0x5b, 0x82, 0xce, 0x37, 0xeb, 0xf7, 0xba, 0x94, 0x76, 0xfb, 0xb8, 0x25, 0xf1, 0x27,
-	0xd1, 0x69, 0xcb, 0xf5, 0x2f, 0x95, 0x73, 0xfd, 0xfe, 0xf4, 0x14, 0x1e, 0x04, 0x7c, 0x34, 0xb9,
-	0xda, 0xa5, 0x5d, 0x2a, 0x3f, 0x5b, 0xe2, 0x4b, 0x8f, 0xae, 0x4f, 0xbb, 0x88, 0x1d, 0x19, 0x77,
-	0x07, 0x81, 0x06, 0x3c, 0x4a, 0x3d, 0x8b, 0x1b, 0x90, 0x16, 0xbf, 0x0c, 0x30, 0x6b, 0x0d, 0x68,
-	0xe4, 0x73, 0xed, 0xf7, 0xf5, 0x1c, 0x7e, 0xdc, 0x65, 0x67, 0xf2, 0x8f, 0xf2, 0xb5, 0xfe, 0xcf,
-	0xc1, 0xca, 0x6e, 0x88, 0x5d, 0x8e, 0x8f, 0x5c, 0x76, 0xe6, 0xe0, 0xd7, 0x11, 0x66, 0x1c, 0xad,
-	0x41, 0x8e, 0x78, 0xa6, 0xd1, 0x34, 0x36, 0x2a, 0x3b, 0xc5, 0xe1, 0xdb, 0xf5, 0xdc, 0xde, 0x33,
-	0x27, 0x47, 0x3c, 0xb4, 0x06, 0xc5, 0x93, 0xc8, 0xf7, 0xfa, 0xd8, 0xcc, 0x89, 0x39, 0x47, 0x5b,
-	0xc8, 0x84, 0x92, 0xce, 0xa0, 0xb9, 0x28, 0x27, 0x46, 0x26, 0x6a, 0x41, 0x31, 0xa4, 0x94, 0x9f,
-	0x32, 0x33, 0xdf, 0x5c, 0xdc, 0xa8, 0xb6, 0x3f, 0xb1, 0x63, 0x59, 0x97, 0x21, 0xd9, 0xfb, 0xe2,
-	0x28, 0x8e, 0x86, 0xa1, 0x3a, 0x94, 0x39, 0x0e, 0x07, 0xc4, 0x77, 0xfb, 0x66, 0xa1, 0x69, 0x6c,
-	0x94, 0x9d, 0x6b, 0x1b, 0xad, 0x42, 0x81, 0x71, 0x8f, 0xf8, 0x66, 0x51, 0x6e, 0xa2, 0x0c, 0x11,
-	0x14, 0xe3, 0x1e, 0x8d, 0xb8, 0x59, 0x52, 0x41, 0x29, 0x4b, 0x8f, 0xe3, 0x30, 0x34, 0xcb, 0xd7,
-	0xe3, 0x38, 0x0c, 0x51, 0x03, 0xa0, 0xd3, 0xc3, 0x9d, 0xb3, 0x80, 0x12, 0x9f, 0x9b, 0x15, 0x39,
-	0x17, 0x1b, 0x41, 0x9f, 0xc3, 0x4a, 0xe0, 0x86, 0xd8, 0xe7, 0xc7, 0x31, 0x18, 0x48, 0xd8, 0xb2,
-	0x9a, 0xd8, 0x1d, 0x83, 0x6d, 0x28, 0xd1, 0x80, 0x13, 0xea, 0x33, 0xb3, 0xda, 0x34, 0x36, 0xaa,
-	0xed, 0x55, 0x5b, 0xd1, 0x6c, 0x8f, 0x68, 0xb6, 0xb7, 0xfd, 0x4b, 0x67, 0x04, 0xb2, 0x1e, 0x00,
-	0x8a, 0xa7, 0x9b, 0x05, 0xd4, 0x67, 0x18, 0x2d, 0xc3, 0x62, 0xa0, 0x13, 0x5e, 0x73, 0xc4, 0xa7,
-	0xf5, 0x87, 0x01, 0x4b, 0xcf, 0x70, 0x1f, 0x73, 0x9c, 0x0c, 0x42, 0xeb, 0x50, 0xc5, 0x17, 0x84,
-	0x1f, 0x33, 0xee, 0xf2, 0x88, 0x49, 0x4e, 0x6a, 0x0e, 0x88, 0xa1, 0x43, 0x39, 0x82, 0xb6, 0xa1,
-	0x22, 0x2c, 0xec, 0x1d, 0xbb, 0x5c, 0x32, 0x53, 0x6d, 0xd7, 0x67, 0xe2, 0x3b, 0x1a, 0xc9, 0x70,
-	0xa7, 0x7c, 0xf5, 0x76, 0x7d, 0xe1, 0xcf, 0x7f, 0xd7, 0x0d, 0xa7, 0xac, 0xdc, 0xb6, 0xb9, 0x65,
-	0xc3, 0xaa, 0x8a, 0xe3, 0x20, 0xa4, 0x1d, 0xcc, 0x58, 0x8a, 0x44, 0xac, 0xbf, 0x0c, 0x40, 0xcf,
-	0x2f, 0x70, 0x27, 0x1b, 0x7c, 0x82, 0xee, 0x5c, 0x12, 0xdd, 0x8b, 0x37, 0xd3, 0x9d, 0x4f, 0xa0,
-	0xbb, 0x30, 0x41, 0xf7, 0x06, 0xe4, 0x59, 0x80, 0x3b, 0x52, 0x33, 0x49, 0xf4, 0x48, 0x84, 0x75,
-	0x17, 0x3e, 0x9e, 0x88, 0x5c, 0xe5, 0xdd, 0xfa, 0x11, 0x96, 0x1d, 0xcc, 0xc8, 0x6f, 0xf8, 0x80,
-	0x5f, 0xa6, 0x1d, 0x67, 0x15, 0x0a, 0xbf, 0x12, 0x8f, 0xf7, 0x34, 0x17, 0xca, 0x10, 0xa1, 0xf5,
-	0x30, 0xe9, 0xf6, 0x14, 0x07, 0x35, 0x47, 0x5b, 0xd6, 0x03, 0xb8, 0x23, 0x88, 0xc2, 0x69, 0x39,
-	0x7d, 0x93, 0x83, 0x9a, 0x06, 0x6a, 0x2d, 0xcc, 0x7b, 0x41, 0xb5, 0x76, 0x16, 0xc7, 0xda, 0xd9,
-	0x12, 0xe9, 0x92, 0xb2, 0x11, 0x69, 0x5c, 0x6a, 0xdf, 0x8f, 0x5f, 0xcc, 0xf3, 0x4d, 0x7d, 0x37,
-	0x95, 0x8e, 0x1c, 0x0d, 0x1d, 0x33, 0x52, 0xb8, 0x99, 0x91, 0x62, 0x02, 0x23, 0xa5, 0x09, 0x46,
-	0xe2, 0x9c, 0x97, 0xa7, 0x38, 0x9f, 0x92, 0x74, 0xe5, 0xc3, 0x92, 0x86, 0x5b, 0x49, 0xfa, 0x05,
-	0x54, 0xbf, 0x23, 0xfd, 0x7e, 0x86, 0x62, 0xc7, 0x48, 0x77, 0x24, 0xcc, 0x9a, 0xa3, 0x2d, 0x91,
-	0x4b, 0xb7, 0xdf, 0x97, 0xb9, 0x2c, 0x3b, 0xe2, 0xd3, 0x7a, 0x0a, 0x4b, 0xbb, 0x7d, 0xca, 0xf0,
-	0xde, 0x8b, 0x0c, 0xfa, 0x50, 0x09, 0x54, 0x5a, 0x57, 0x86, 0xf5, 0x19, 0x7c, 0xf4, 0x3d, 0x61,
-	0xfc, 0x80, 0x78, 0xa9, 0xd7, 0xcb, 0x81, 0xe5, 0x31, 0x54, 0x8b, 0xe1, 0x29, 0x54, 0x02, 0xa5,
-	0x59, 0xcc, 0x4c, 0x43, 0x96, 0xd9, 0xe6, 0x8d, 0x6c, 0x6a, 0x65, 0xef, 0xf9, 0xa7, 0xd4, 0x19,
-	0xbb, 0x58, 0x3f, 0xc3, 0xdd, 0x71, 0x45, 0x8b, 0xb7, 0x01, 0x04, 0xf9, 0xc0, 0xe5, 0x3d, 0x15,
-	0x86, 0x23, 0xbf, 0xe3, 0x05, 0x2f, 0x97, 0xa5, 0xe0, 0x3d, 0x84, 0xe5, 0xc3, 0x1e, 0x19, 0xc8,
-	0x3d, 0x47, 0x01, 0xdf, 0x83, 0xb2, 0x68, 0xb1, 0xc7, 0xe3, 0x72, 0x56, 0x12, 0xf6, 0x01, 0xf1,
-	0xac, 0x6f, 0x61, 0xe5, 0x65, 0xe0, 0x4d, 0xb5, 0xa3, 0x36, 0x54, 0x42, 0xcc, 0x68, 0x14, 0x76,
-	0xe4, 0x01, 0x93, 0x77, 0x1d, 0xc3, 0xf4, 0xdd, 0x0a, 0x79, 0x5a, 0x42, 0x9f, 0xc8, 0xab, 0x25,
-	0x70, 0x29, 0x57, 0x4b, 0x5f, 0xa1, 0xdc, 0xb8, 0x46, 0x7f, 0x0a, 0xd5, 0x57, 0x2e, 0x49, 0xdd,
-	0x21, 0x84, 0x3b, 0x0a, 0xa6, 0x37, 0x98, 0x92, 0xb8, 0xf1, 0x61, 0x89, 0xe7, 0x6e, 0x23, 0xf1,
-	0xf6, 0x9b, 0x2a, 0xe4, 0x45, 0xda, 0x51, 0x0f, 0x0a, 0xb2, 0x72, 0x20, 0xdb, 0x4e, 0x7b, 0xee,
-	0xd8, 0xf1, 0x5a, 0x54, 0x6f, 0x65, 0xc6, 0xeb, 0x63, 0x31, 0x28, 0xaa, 0xce, 0x86, 0xb6, 0xd2,
-	0x5d, 0x67, 0x9e, 0x1c, 0xf5, 0x2f, 0xe7, 0x73, 0xd2, 0x9b, 0xaa, 0xe3, 0x85, 0x3c, 0xe3, 0xf1,
-	0xae, 0xe5, 0x90, 0xf1, 0x78, 0x31, 0x59, 0x38, 0x50, 0x54, 0x7d, 0x10, 0xad, 0xcd, 0x70, 0xf1,
-	0x5c, 0xbc, 0xfd, 0xea, 0x5f, 0xa4, 0x2f, 0x39, 0xd5, 0xd1, 0x2f, 0xa1, 0x36, 0xd1, 0x5b, 0xd1,
-	0xa3, 0xac, 0x4b, 0x4c, 0x76, 0xd7, 0x5b, 0x6c, 0xfd, 0x1a, 0xca, 0xa3, 0x3a, 0x82, 0x36, 0xd3,
-	0xbd, 0xa7, 0xca, 0x53, 0xbd, 0x3d, 0x8f, 0x8b, 0xde, 0xf2, 0x31, 0x14, 0x0e, 0xdc, 0x88, 0x25,
-	0x27, 0x30, 0x61, 0x1c, 0x7d, 0x05, 0x45, 0x07, 0xb3, 0x68, 0x30, 0xbf, 0xe7, 0x2f, 0x00, 0xb1,
-	0xb7, 0xda, 0xe3, 0x0c, 0x12, 0xbb, 0xa9, 0x0e, 0x26, 0x2e, 0xbf, 0x0f, 0x79, 0xd1, 0x48, 0xd0,
-	0xc3, 0xf4, 0x85, 0x63, 0x0d, 0x27, 0x71, 0xb9, 0x23, 0xc8, 0x8b, 0xf7, 0x07, 0xca, 0x70, 0x15,
-	0x66, 0x5f, 0x58, 0x89, 0xab, 0xbe, 0x82, 0xca, 0xf5, 0xf3, 0x05, 0x65, 0xe0, 0x6d, 0xfa, 0xad,
-	0x93, 0xb8, 0xf0, 0x21, 0x94, 0x74, 0xd7, 0x43, 0x19, 0xf4, 0x37, 0xd9, 0x20, 0x13, 0x17, 0xfd,
-	0x01, 0xca, 0xa3, 0x76, 0x91, 0xc8, 0x76, 0x86, 0x43, 0xcc, 0xb4, 0x9c, 0x97, 0x50, 0x54, 0x7d,
-	0x25, 0x4b, 0x75, 0x9a, 0xe9, 0x40, 0x89, 0xe1, 0x62, 0xc8, 0x8b, 0xda, 0x9e, 0x45, 0x01, 0xb1,
-	0x56, 0x51, 0xb7, 0xb3, 0xc2, 0x55, 0xf4, 0x3b, 0xfb, 0x57, 0xef, 0x1a, 0x0b, 0xff, 0xbc, 0x6b,
-	0x2c, 0xfc, 0x3e, 0x6c, 0x18, 0x57, 0xc3, 0x86, 0xf1, 0xf7, 0xb0, 0x61, 0xfc, 0x37, 0x6c, 0x18,
-	0x3f, 0x6d, 0xcd, 0xf7, 0xff, 0xef, 0x37, 0xe2, 0xf7, 0xa4, 0x28, 0x4f, 0xb1, 0xf5, 0x3e, 0x00,
-	0x00, 0xff, 0xff, 0xad, 0x60, 0x08, 0x28, 0x3d, 0x0f, 0x00, 0x00,
+	// 1133 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xcd, 0x6e, 0xdb, 0x46,
+	0x10, 0x36, 0x69, 0xfd, 0x8e, 0x22, 0xd7, 0xde, 0x3a, 0x2e, 0xa3, 0x00, 0xb2, 0x40, 0xa0, 0x81,
+	0x8b, 0x22, 0x54, 0x2d, 0x17, 0x49, 0xd3, 0x02, 0x01, 0x6c, 0x27, 0x28, 0x8c, 0xd6, 0x88, 0x41,
+	0x3b, 0x4d, 0xd0, 0xa2, 0x30, 0x68, 0x71, 0x2d, 0x2d, 0x2c, 0x91, 0x0c, 0x77, 0xe9, 0xda, 0x3d,
+	0xf5, 0xd4, 0x73, 0x1f, 0xa7, 0x8f, 0xe0, 0x43, 0x0e, 0x3d, 0xf6, 0x94, 0x36, 0xba, 0xf7, 0x1d,
+	0x8a, 0xfd, 0x91, 0x49, 0x49, 0x66, 0x48, 0xf9, 0x62, 0x71, 0x76, 0xbf, 0xd9, 0x9d, 0x9d, 0xef,
+	0xdb, 0x99, 0x35, 0x3c, 0xe9, 0x11, 0xd6, 0x8f, 0x4e, 0xac, 0xae, 0x3f, 0x6c, 0x77, 0x7d, 0x8f,
+	0x39, 0xc4, 0xc3, 0xa1, 0x9b, 0xfc, 0x1c, 0x10, 0x2f, 0xba, 0x68, 0xd3, 0x3e, 0x19, 0xb6, 0xcf,
+	0x37, 0xc5, 0xaf, 0x15, 0x84, 0x3e, 0xf3, 0x51, 0x2b, 0x06, 0x59, 0x61, 0xe4, 0x31, 0x32, 0xc4,
+	0x96, 0x00, 0x5b, 0x02, 0x74, 0xbe, 0xd9, 0xb8, 0xd7, 0xf3, 0xfd, 0xde, 0x00, 0xb7, 0x05, 0xfe,
+	0x24, 0x3a, 0x6d, 0x3b, 0xde, 0xa5, 0x74, 0x6e, 0xdc, 0x9f, 0x9e, 0xc2, 0xc3, 0x80, 0x8d, 0x27,
+	0x57, 0x7b, 0x7e, 0xcf, 0x17, 0x9f, 0x6d, 0xfe, 0xa5, 0x46, 0xd7, 0xa7, 0x5d, 0xf8, 0x8e, 0x94,
+	0x39, 0xc3, 0x40, 0x01, 0x1e, 0x65, 0x9e, 0xc5, 0x09, 0x48, 0x9b, 0x5d, 0x06, 0x98, 0xb6, 0x87,
+	0x7e, 0xe4, 0x31, 0xe5, 0xf7, 0xf5, 0x1c, 0x7e, 0xcc, 0xa1, 0x67, 0xe2, 0x8f, 0xf4, 0x35, 0xff,
+	0xd3, 0x61, 0x65, 0x37, 0xc4, 0x0e, 0xc3, 0x47, 0x0e, 0x3d, 0xb3, 0xf1, 0x9b, 0x08, 0x53, 0x86,
+	0xd6, 0x40, 0x27, 0xae, 0xa1, 0xb5, 0xb4, 0x8d, 0xea, 0x4e, 0x69, 0xf4, 0x6e, 0x5d, 0xdf, 0x7b,
+	0x66, 0xeb, 0xc4, 0x45, 0x6b, 0x50, 0x3a, 0x89, 0x3c, 0x77, 0x80, 0x0d, 0x9d, 0xcf, 0xd9, 0xca,
+	0x42, 0x06, 0x94, 0x55, 0x06, 0x8d, 0x45, 0x31, 0x31, 0x36, 0x51, 0x1b, 0x4a, 0xa1, 0xef, 0xb3,
+	0x53, 0x6a, 0x14, 0x5a, 0x8b, 0x1b, 0xb5, 0xce, 0x27, 0x56, 0x22, 0xeb, 0x22, 0x24, 0x6b, 0x9f,
+	0x1f, 0xc5, 0x56, 0x30, 0xd4, 0x80, 0x0a, 0xc3, 0xe1, 0x90, 0x78, 0xce, 0xc0, 0x28, 0xb6, 0xb4,
+	0x8d, 0x8a, 0x7d, 0x6d, 0xa3, 0x55, 0x28, 0x52, 0xe6, 0x12, 0xcf, 0x28, 0x89, 0x4d, 0xa4, 0xc1,
+	0x83, 0xa2, 0xcc, 0xf5, 0x23, 0x66, 0x94, 0x65, 0x50, 0xd2, 0x52, 0xe3, 0x38, 0x0c, 0x8d, 0xca,
+	0xf5, 0x38, 0x0e, 0x43, 0xd4, 0x04, 0xe8, 0xf6, 0x71, 0xf7, 0x2c, 0xf0, 0x89, 0xc7, 0x8c, 0xaa,
+	0x98, 0x4b, 0x8c, 0xa0, 0xcf, 0x61, 0x25, 0x70, 0x42, 0xec, 0xb1, 0xe3, 0x04, 0x0c, 0x04, 0x6c,
+	0x59, 0x4e, 0xec, 0xc6, 0x60, 0x0b, 0xca, 0x7e, 0xc0, 0x88, 0xef, 0x51, 0xa3, 0xd6, 0xd2, 0x36,
+	0x6a, 0x9d, 0x55, 0x4b, 0xd2, 0x6c, 0x8d, 0x69, 0xb6, 0xb6, 0xbd, 0x4b, 0x7b, 0x0c, 0x32, 0x1f,
+	0x00, 0x4a, 0xa6, 0x9b, 0x06, 0xbe, 0x47, 0x31, 0x5a, 0x86, 0xc5, 0x40, 0x25, 0xbc, 0x6e, 0xf3,
+	0x4f, 0xf3, 0x77, 0x0d, 0x96, 0x9e, 0xe1, 0x01, 0x66, 0x38, 0x1d, 0x84, 0xd6, 0xa1, 0x86, 0x2f,
+	0x08, 0x3b, 0xa6, 0xcc, 0x61, 0x11, 0x15, 0x9c, 0xd4, 0x6d, 0xe0, 0x43, 0x87, 0x62, 0x04, 0x6d,
+	0x43, 0x95, 0x5b, 0xd8, 0x3d, 0x76, 0x98, 0x60, 0xa6, 0xd6, 0x69, 0xcc, 0xc4, 0x77, 0x34, 0x96,
+	0xe1, 0x4e, 0xe5, 0xea, 0xdd, 0xfa, 0xc2, 0x1f, 0xff, 0xac, 0x6b, 0x76, 0x45, 0xba, 0x6d, 0x33,
+	0xd3, 0x82, 0x55, 0x19, 0xc7, 0x41, 0xe8, 0x77, 0x31, 0xa5, 0x19, 0x12, 0x31, 0xff, 0xd4, 0x00,
+	0x3d, 0xbf, 0xc0, 0xdd, 0x7c, 0xf0, 0x09, 0xba, 0xf5, 0x34, 0xba, 0x17, 0x6f, 0xa6, 0xbb, 0x90,
+	0x42, 0x77, 0x71, 0x82, 0xee, 0x0d, 0x28, 0xd0, 0x00, 0x77, 0x85, 0x66, 0xd2, 0xe8, 0x11, 0x08,
+	0xf3, 0x2e, 0x7c, 0x3c, 0x11, 0xb9, 0xcc, 0xbb, 0xf9, 0x1a, 0x96, 0x6d, 0x4c, 0xc9, 0xaf, 0xf8,
+	0x80, 0x5d, 0x66, 0x1d, 0x67, 0x15, 0x8a, 0xbf, 0x10, 0x97, 0xf5, 0x15, 0x17, 0xd2, 0xe0, 0xa1,
+	0xf5, 0x31, 0xe9, 0xf5, 0x25, 0x07, 0x75, 0x5b, 0x59, 0xe6, 0x03, 0xb8, 0xc3, 0x89, 0xc2, 0x59,
+	0x39, 0x7d, 0xab, 0x43, 0x5d, 0x01, 0x95, 0x16, 0xe6, 0xbd, 0xa0, 0x4a, 0x3b, 0x8b, 0xb1, 0x76,
+	0xb6, 0x78, 0xba, 0x84, 0x6c, 0x78, 0x1a, 0x97, 0x3a, 0xf7, 0x93, 0x17, 0xf3, 0x7c, 0x53, 0xdd,
+	0x4d, 0xa9, 0x23, 0x5b, 0x41, 0x63, 0x46, 0x8a, 0x37, 0x33, 0x52, 0x4a, 0x61, 0xa4, 0x3c, 0xc1,
+	0x48, 0x92, 0xf3, 0xca, 0x14, 0xe7, 0x53, 0x92, 0xae, 0x7e, 0x58, 0xd2, 0x70, 0x2b, 0x49, 0xbf,
+	0x80, 0xda, 0x77, 0x64, 0x30, 0xc8, 0x51, 0xec, 0x28, 0xe9, 0x8d, 0x85, 0x59, 0xb7, 0x95, 0xc5,
+	0x73, 0xe9, 0x0c, 0x06, 0x22, 0x97, 0x15, 0x9b, 0x7f, 0x9a, 0x4f, 0x61, 0x69, 0x77, 0xe0, 0x53,
+	0xbc, 0xf7, 0x22, 0x87, 0x3e, 0x64, 0x02, 0xa5, 0xd6, 0xa5, 0x61, 0x7e, 0x06, 0x1f, 0x7d, 0x4f,
+	0x28, 0x3b, 0x20, 0x6e, 0xe6, 0xf5, 0xb2, 0x61, 0x39, 0x86, 0x2a, 0x31, 0x3c, 0x85, 0x6a, 0x20,
+	0x35, 0x8b, 0xa9, 0xa1, 0x89, 0x32, 0xdb, 0xba, 0x91, 0x4d, 0xa5, 0xec, 0x3d, 0xef, 0xd4, 0xb7,
+	0x63, 0x17, 0xf3, 0x27, 0xb8, 0x1b, 0x57, 0xb4, 0x64, 0x1b, 0x40, 0x50, 0x08, 0x1c, 0xd6, 0x97,
+	0x61, 0xd8, 0xe2, 0x3b, 0x59, 0xf0, 0xf4, 0x3c, 0x05, 0xef, 0x21, 0x2c, 0x1f, 0xf6, 0xc9, 0x50,
+	0xec, 0x39, 0x0e, 0xf8, 0x1e, 0x54, 0x78, 0x8b, 0x3d, 0x8e, 0xcb, 0x59, 0x99, 0xdb, 0x07, 0xc4,
+	0x35, 0xbf, 0x85, 0x95, 0x97, 0x81, 0x3b, 0xd5, 0x8e, 0x3a, 0x50, 0x0d, 0x31, 0xf5, 0xa3, 0xb0,
+	0x2b, 0x0e, 0x98, 0xbe, 0x6b, 0x0c, 0x53, 0x77, 0x2b, 0x64, 0x59, 0x09, 0x7d, 0x22, 0xae, 0x16,
+	0xc7, 0x65, 0x5c, 0x2d, 0x75, 0x85, 0xf4, 0xb8, 0x46, 0x7f, 0x0a, 0xb5, 0x57, 0x0e, 0xc9, 0xdc,
+	0x21, 0x84, 0x3b, 0x12, 0xa6, 0x36, 0x98, 0x92, 0xb8, 0xf6, 0x61, 0x89, 0xeb, 0xb7, 0x91, 0x78,
+	0xe7, 0x6d, 0x0d, 0x0a, 0x3c, 0xed, 0xa8, 0x0f, 0x45, 0x51, 0x39, 0x90, 0x65, 0x65, 0x3d, 0x77,
+	0xac, 0x64, 0x2d, 0x6a, 0xb4, 0x73, 0xe3, 0xd5, 0xb1, 0x28, 0x94, 0x64, 0x67, 0x43, 0x5b, 0xd9,
+	0xae, 0x33, 0x4f, 0x8e, 0xc6, 0x97, 0xf3, 0x39, 0xa9, 0x4d, 0xe5, 0xf1, 0x42, 0x96, 0xf3, 0x78,
+	0xd7, 0x72, 0xc8, 0x79, 0xbc, 0x84, 0x2c, 0x6c, 0x28, 0xc9, 0x3e, 0x88, 0xd6, 0x66, 0xb8, 0x78,
+	0xce, 0xdf, 0x7e, 0x8d, 0x2f, 0xb2, 0x97, 0x9c, 0xea, 0xe8, 0x97, 0x50, 0x9f, 0xe8, 0xad, 0xe8,
+	0x51, 0xde, 0x25, 0x26, 0xbb, 0xeb, 0x2d, 0xb6, 0x7e, 0x03, 0x95, 0x71, 0x1d, 0x41, 0x9b, 0xd9,
+	0xde, 0x53, 0xe5, 0xa9, 0xd1, 0x99, 0xc7, 0x45, 0x6d, 0xf9, 0x18, 0x8a, 0x07, 0x4e, 0x44, 0xd3,
+	0x13, 0x98, 0x32, 0x8e, 0xbe, 0x82, 0x92, 0x8d, 0x69, 0x34, 0x9c, 0xdf, 0xf3, 0x67, 0x80, 0xc4,
+	0x5b, 0xed, 0x71, 0x0e, 0x89, 0xdd, 0x54, 0x07, 0x53, 0x97, 0xdf, 0x87, 0x02, 0x6f, 0x24, 0xe8,
+	0x61, 0xf6, 0xc2, 0x89, 0x86, 0x93, 0xba, 0xdc, 0x11, 0x14, 0xf8, 0xfb, 0x03, 0xe5, 0xb8, 0x0a,
+	0xb3, 0x2f, 0xac, 0xd4, 0x55, 0x5f, 0x41, 0xf5, 0xfa, 0xf9, 0x82, 0x72, 0xf0, 0x36, 0xfd, 0xd6,
+	0x49, 0x5d, 0xf8, 0x10, 0xca, 0xaa, 0xeb, 0xa1, 0x1c, 0xfa, 0x9b, 0x6c, 0x90, 0xa9, 0x8b, 0xfe,
+	0x00, 0x95, 0x71, 0xbb, 0x48, 0x65, 0x3b, 0xc7, 0x21, 0x66, 0x5a, 0xce, 0x4b, 0x28, 0xc9, 0xbe,
+	0x92, 0xa7, 0x3a, 0xcd, 0x74, 0xa0, 0xd4, 0x70, 0x31, 0x14, 0x78, 0x6d, 0xcf, 0xa3, 0x80, 0x44,
+	0xab, 0x68, 0x58, 0x79, 0xe1, 0x32, 0xfa, 0x9d, 0xfd, 0xab, 0xf7, 0xcd, 0x85, 0xbf, 0xdf, 0x37,
+	0x17, 0x7e, 0x1b, 0x35, 0xb5, 0xab, 0x51, 0x53, 0xfb, 0x6b, 0xd4, 0xd4, 0xfe, 0x1d, 0x35, 0xb5,
+	0x1f, 0xb7, 0xe6, 0xfb, 0xff, 0xf7, 0x1b, 0xfe, 0xfb, 0x5a, 0x3f, 0x29, 0x89, 0x73, 0x6c, 0xfd,
+	0x1f, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xd0, 0xe6, 0x46, 0x3f, 0x0f, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto
index 8d8af95..6de8f13 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto
+++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto
@@ -4,7 +4,7 @@
 
 import "google/protobuf/any.proto";
 import "google/protobuf/empty.proto";
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/timestamp.proto";
 import "github.com/containerd/containerd/api/types/mount.proto";
 import "github.com/containerd/containerd/api/types/task/task.proto";
diff --git a/vendor/github.com/containerd/containerd/linux/task.go b/vendor/github.com/containerd/containerd/linux/task.go
index 268e91a..85327ca 100644
--- a/vendor/github.com/containerd/containerd/linux/task.go
+++ b/vendor/github.com/containerd/containerd/linux/task.go
@@ -4,30 +4,38 @@
 
 import (
 	"context"
+	"sync"
 
 	"github.com/pkg/errors"
 	"google.golang.org/grpc"
 
 	"github.com/containerd/cgroups"
+	eventstypes "github.com/containerd/containerd/api/events"
 	"github.com/containerd/containerd/api/types/task"
 	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events/exchange"
+	"github.com/containerd/containerd/identifiers"
 	"github.com/containerd/containerd/linux/shim/client"
 	shim "github.com/containerd/containerd/linux/shim/v1"
 	"github.com/containerd/containerd/runtime"
+	runc "github.com/containerd/go-runc"
 	"github.com/gogo/protobuf/types"
 )
 
 // Task on a linux based system
 type Task struct {
+	mu        sync.Mutex
 	id        string
 	pid       int
 	shim      *client.Client
 	namespace string
 	cg        cgroups.Cgroup
 	monitor   runtime.TaskMonitor
+	events    *exchange.Exchange
+	runtime   *runc.Runc
 }
 
-func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime.TaskMonitor) (*Task, error) {
+func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime.TaskMonitor, events *exchange.Exchange, runtime *runc.Runc) (*Task, error) {
 	var (
 		err error
 		cg  cgroups.Cgroup
@@ -45,6 +53,8 @@
 		namespace: namespace,
 		cg:        cg,
 		monitor:   monitor,
+		events:    events,
+		runtime:   runtime,
 	}, nil
 }
 
@@ -64,7 +74,9 @@
 
 // Start the task
 func (t *Task) Start(ctx context.Context) error {
+	t.mu.Lock()
 	hasCgroup := t.cg != nil
+	t.mu.Unlock()
 	r, err := t.shim.Start(ctx, &shim.StartRequest{
 		ID: t.id,
 	})
@@ -77,11 +89,17 @@
 		if err != nil {
 			return err
 		}
+		t.mu.Lock()
 		t.cg = cg
+		t.mu.Unlock()
 		if err := t.monitor.Monitor(t); err != nil {
 			return err
 		}
 	}
+	t.events.Publish(ctx, runtime.TaskStartEventTopic, &eventstypes.TaskStart{
+		ContainerID: t.id,
+		Pid:         uint32(t.pid),
+	})
 	return nil
 }
 
@@ -123,11 +141,13 @@
 
 // Pause the task and all processes
 func (t *Task) Pause(ctx context.Context) error {
-	_, err := t.shim.Pause(ctx, empty)
-	if err != nil {
-		err = errdefs.FromGRPC(err)
+	if _, err := t.shim.Pause(ctx, empty); err != nil {
+		return errdefs.FromGRPC(err)
 	}
-	return err
+	t.events.Publish(ctx, runtime.TaskPausedEventTopic, &eventstypes.TaskPaused{
+		ContainerID: t.id,
+	})
+	return nil
 }
 
 // Resume the task and all processes
@@ -135,6 +155,9 @@
 	if _, err := t.shim.Resume(ctx, empty); err != nil {
 		return errdefs.FromGRPC(err)
 	}
+	t.events.Publish(ctx, runtime.TaskResumedEventTopic, &eventstypes.TaskResumed{
+		ContainerID: t.id,
+	})
 	return nil
 }
 
@@ -154,6 +177,9 @@
 
 // Exec creates a new process inside the task
 func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
+	if err := identifiers.Validate(id); err != nil {
+		return nil, errors.Wrapf(err, "invalid exec id")
+	}
 	request := &shim.ExecProcessRequest{
 		ID:       id,
 		Stdin:    opts.IO.Stdin,
@@ -182,7 +208,8 @@
 	var processList []runtime.ProcessInfo
 	for _, p := range resp.Processes {
 		processList = append(processList, runtime.ProcessInfo{
-			Pid: p.Pid,
+			Pid:  p.Pid,
+			Info: p.Info,
 		})
 	}
 	return processList, nil
@@ -222,6 +249,9 @@
 	if _, err := t.shim.Checkpoint(ctx, r); err != nil {
 		return errdefs.FromGRPC(err)
 	}
+	t.events.Publish(ctx, runtime.TaskCheckpointedEventTopic, &eventstypes.TaskCheckpointed{
+		ContainerID: t.id,
+	})
 	return nil
 }
 
@@ -261,6 +291,8 @@
 
 // Metrics returns runtime specific system level metric information for the task
 func (t *Task) Metrics(ctx context.Context) (interface{}, error) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
 	if t.cg == nil {
 		return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist")
 	}
@@ -273,6 +305,8 @@
 
 // Cgroup returns the underlying cgroup for a linux task
 func (t *Task) Cgroup() (cgroups.Cgroup, error) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
 	if t.cg == nil {
 		return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist")
 	}
diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go
index b6a66ba..9325f16 100644
--- a/vendor/github.com/containerd/containerd/metadata/buckets.go
+++ b/vendor/github.com/containerd/containerd/metadata/buckets.go
@@ -31,7 +31,6 @@
 	bucketKeyVersion          = []byte(schemaVersion)
 	bucketKeyDBVersion        = []byte("version")    // stores the version of the schema
 	bucketKeyObjectLabels     = []byte("labels")     // stores the labels for a namespace.
-	bucketKeyObjectIndexes    = []byte("indexes")    // reserved
 	bucketKeyObjectImages     = []byte("images")     // stores image objects
 	bucketKeyObjectContainers = []byte("containers") // stores container objects
 	bucketKeyObjectSnapshots  = []byte("snapshots")  // stores snapshot references
diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go
index c9d8ab6..32f339a 100644
--- a/vendor/github.com/containerd/containerd/metadata/containers.go
+++ b/vendor/github.com/containerd/containerd/metadata/containers.go
@@ -37,12 +37,12 @@
 
 	bkt := getContainerBucket(s.tx, namespace, id)
 	if bkt == nil {
-		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "bucket name %q:%q", namespace, id)
+		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
 	}
 
 	container := containers.Container{ID: id}
 	if err := readContainer(&container, bkt); err != nil {
-		return containers.Container{}, errors.Wrapf(err, "failed to read container %v", id)
+		return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id)
 	}
 
 	return container, nil
@@ -61,7 +61,7 @@
 
 	bkt := getContainersBucket(s.tx, namespace)
 	if bkt == nil {
-		return nil, nil
+		return nil, nil // empty store
 	}
 
 	var m []containers.Container
@@ -73,7 +73,7 @@
 		container := containers.Container{ID: string(k)}
 
 		if err := readContainer(&container, cbkt); err != nil {
-			return errors.Wrap(err, "failed to read container")
+			return errors.Wrapf(err, "failed to read container %q", string(k))
 		}
 
 		if filter.Match(adaptContainer(container)) {
@@ -105,7 +105,7 @@
 	cbkt, err := bkt.CreateBucket([]byte(container.ID))
 	if err != nil {
 		if err == bolt.ErrBucketExists {
-			err = errors.Wrapf(errdefs.ErrAlreadyExists, "content %q", container.ID)
+			err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID)
 		}
 		return containers.Container{}, err
 	}
@@ -113,7 +113,7 @@
 	container.CreatedAt = time.Now().UTC()
 	container.UpdatedAt = container.CreatedAt
 	if err := writeContainer(cbkt, &container); err != nil {
-		return containers.Container{}, errors.Wrap(err, "failed to write container")
+		return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
 	}
 
 	return container, nil
@@ -131,7 +131,7 @@
 
 	bkt := getContainersBucket(s.tx, namespace)
 	if bkt == nil {
-		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
+		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
 	}
 
 	cbkt := bkt.Bucket([]byte(container.ID))
@@ -141,7 +141,7 @@
 
 	var updated containers.Container
 	if err := readContainer(&updated, cbkt); err != nil {
-		return updated, errors.Wrapf(err, "failed to read container from bucket")
+		return updated, errors.Wrapf(err, "failed to read container %q", container.ID)
 	}
 	createdat := updated.CreatedAt
 	updated.ID = container.ID
@@ -211,7 +211,7 @@
 	updated.CreatedAt = createdat
 	updated.UpdatedAt = time.Now().UTC()
 	if err := writeContainer(cbkt, &updated); err != nil {
-		return containers.Container{}, errors.Wrap(err, "failed to write container")
+		return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
 	}
 
 	return updated, nil
@@ -225,7 +225,7 @@
 
 	bkt := getContainersBucket(s.tx, namespace)
 	if bkt == nil {
-		return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %v, bucket not present", id)
+		return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
 	}
 
 	if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound {
@@ -236,7 +236,7 @@
 
 func validateContainer(container *containers.Container) error {
 	if err := identifiers.Validate(container.ID); err != nil {
-		return errors.Wrapf(err, "container.ID validation error")
+		return errors.Wrap(err, "container.ID")
 	}
 
 	for k := range container.Extensions {
diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go
index 0797345..c13f786 100644
--- a/vendor/github.com/containerd/containerd/metadata/content.go
+++ b/vendor/github.com/containerd/containerd/metadata/content.go
@@ -184,6 +184,9 @@
 		if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil {
 			return err
 		}
+		if err := removeContentLease(ctx, tx, dgst); err != nil {
+			return err
+		}
 
 		// Mark content store as dirty for triggering garbage collection
 		cs.db.dirtyL.Lock()
@@ -527,12 +530,14 @@
 	return bkt.Put(bucketKeySize, sizeEncoded)
 }
 
-func (cs *contentStore) garbageCollect(ctx context.Context) error {
-	lt1 := time.Now()
+func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) {
 	cs.l.Lock()
+	t1 := time.Now()
 	defer func() {
+		if err == nil {
+			d = time.Now().Sub(t1)
+		}
 		cs.l.Unlock()
-		log.G(ctx).WithField("t", time.Now().Sub(lt1)).Debugf("content garbage collected")
 	}()
 
 	seen := map[string]struct{}{}
@@ -567,10 +572,10 @@
 
 		return nil
 	}); err != nil {
-		return err
+		return 0, err
 	}
 
-	return cs.Store.Walk(ctx, func(info content.Info) error {
+	err = cs.Store.Walk(ctx, func(info content.Info) error {
 		if _, ok := seen[info.Digest.String()]; !ok {
 			if err := cs.Store.Delete(ctx, info.Digest); err != nil {
 				return err
@@ -579,4 +584,5 @@
 		}
 		return nil
 	})
+	return
 }
diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go
index 7c366eb..8be62a9 100644
--- a/vendor/github.com/containerd/containerd/metadata/db.go
+++ b/vendor/github.com/containerd/containerd/metadata/db.go
@@ -11,7 +11,7 @@
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/gc"
 	"github.com/containerd/containerd/log"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	"github.com/pkg/errors"
 )
 
@@ -53,13 +53,14 @@
 	dirtySS map[string]struct{}
 	dirtyCS bool
 
-	// TODO: Keep track of stats such as pause time, number of collected objects, errors
-	lastCollection time.Time
+	// mutationCallbacks are called after each mutation with the flag
+	// set indicating whether any dirty flags are set
+	mutationCallbacks []func(bool)
 }
 
 // NewDB creates a new metadata database using the provided
 // bolt database, content store, and snapshotters.
-func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshot.Snapshotter) *DB {
+func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter) *DB {
 	m := &DB{
 		db:      db,
 		ss:      make(map[string]*snapshotter, len(ss)),
@@ -137,7 +138,7 @@
 				if err := m.migrate(tx); err != nil {
 					return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version)
 				}
-				log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("database migration to %s.%d finished", m.schema, m.version)
+				log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version)
 			}
 		}
 
@@ -170,7 +171,7 @@
 
 // Snapshotter returns a namespaced content store for
 // the requested snapshotter name proxied to a snapshotter.
-func (m *DB) Snapshotter(name string) snapshot.Snapshotter {
+func (m *DB) Snapshotter(name string) snapshots.Snapshotter {
 	sn, ok := m.ss[name]
 	if !ok {
 		return nil
@@ -183,29 +184,53 @@
 	return m.db.View(fn)
 }
 
-// Update runs a writable transation on the metadata store.
+// Update runs a writable transaction on the metadata store.
 func (m *DB) Update(fn func(*bolt.Tx) error) error {
 	m.wlock.RLock()
 	defer m.wlock.RUnlock()
-	return m.db.Update(fn)
+	err := m.db.Update(fn)
+	if err == nil {
+		m.dirtyL.Lock()
+		dirty := m.dirtyCS || len(m.dirtySS) > 0
+		for _, fn := range m.mutationCallbacks {
+			fn(dirty)
+		}
+		m.dirtyL.Unlock()
+	}
+
+	return err
+}
+
+// RegisterMutationCallback registers a function to be called after a metadata
+// mutations has been performed.
+//
+// The callback function in an argument for whether a deletion has occurred
+// since the last garbage collection.
+func (m *DB) RegisterMutationCallback(fn func(bool)) {
+	m.dirtyL.Lock()
+	m.mutationCallbacks = append(m.mutationCallbacks, fn)
+	m.dirtyL.Unlock()
+}
+
+// GCStats holds the duration for the different phases of the garbage collector
+type GCStats struct {
+	MetaD     time.Duration
+	ContentD  time.Duration
+	SnapshotD map[string]time.Duration
 }
 
 // GarbageCollect starts garbage collection
-func (m *DB) GarbageCollect(ctx context.Context) error {
-	lt1 := time.Now()
+func (m *DB) GarbageCollect(ctx context.Context) (stats GCStats, err error) {
 	m.wlock.Lock()
-	defer func() {
-		m.wlock.Unlock()
-		log.G(ctx).WithField("d", time.Now().Sub(lt1)).Debug("metadata garbage collected")
-	}()
+	t1 := time.Now()
 
 	marked, err := m.getMarked(ctx)
 	if err != nil {
-		return err
+		m.wlock.Unlock()
+		return GCStats{}, err
 	}
 
 	m.dirtyL.Lock()
-	defer m.dirtyL.Unlock()
 
 	if err := m.db.Update(func(tx *bolt.Tx) error {
 		ctx, cancel := context.WithCancel(ctx)
@@ -232,26 +257,53 @@
 
 		return nil
 	}); err != nil {
-		return err
+		m.dirtyL.Unlock()
+		m.wlock.Unlock()
+		return GCStats{}, err
 	}
 
-	m.lastCollection = time.Now()
+	var wg sync.WaitGroup
 
 	if len(m.dirtySS) > 0 {
+		var sl sync.Mutex
+		stats.SnapshotD = map[string]time.Duration{}
+		wg.Add(len(m.dirtySS))
 		for snapshotterName := range m.dirtySS {
-			log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup")
-			go m.cleanupSnapshotter(snapshotterName)
+			log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup")
+			go func(snapshotterName string) {
+				st1 := time.Now()
+				m.cleanupSnapshotter(snapshotterName)
+
+				sl.Lock()
+				stats.SnapshotD[snapshotterName] = time.Now().Sub(st1)
+				sl.Unlock()
+
+				wg.Done()
+			}(snapshotterName)
 		}
 		m.dirtySS = map[string]struct{}{}
 	}
 
 	if m.dirtyCS {
-		log.G(ctx).Debug("scheduling content cleanup")
-		go m.cleanupContent()
+		wg.Add(1)
+		log.G(ctx).Debug("schedule content cleanup")
+		go func() {
+			ct1 := time.Now()
+			m.cleanupContent()
+			stats.ContentD = time.Now().Sub(ct1)
+			wg.Done()
+		}()
 		m.dirtyCS = false
 	}
 
-	return nil
+	m.dirtyL.Unlock()
+
+	stats.MetaD = time.Now().Sub(t1)
+	m.wlock.Unlock()
+
+	wg.Wait()
+
+	return
 }
 
 func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) {
@@ -302,27 +354,35 @@
 	return marked, nil
 }
 
-func (m *DB) cleanupSnapshotter(name string) {
+func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) {
 	ctx := context.Background()
 	sn, ok := m.ss[name]
 	if !ok {
-		return
+		return 0, nil
 	}
 
-	err := sn.garbageCollect(ctx)
+	d, err := sn.garbageCollect(ctx)
+	logger := log.G(ctx).WithField("snapshotter", name)
 	if err != nil {
-		log.G(ctx).WithError(err).WithField("snapshotter", name).Warn("garbage collection failed")
+		logger.WithError(err).Warn("snapshot garbage collection failed")
+	} else {
+		logger.WithField("d", d).Debugf("snapshot garbage collected")
 	}
+	return d, err
 }
 
-func (m *DB) cleanupContent() {
+func (m *DB) cleanupContent() (time.Duration, error) {
 	ctx := context.Background()
 	if m.cs == nil {
-		return
+		return 0, nil
 	}
 
-	err := m.cs.garbageCollect(ctx)
+	d, err := m.cs.garbageCollect(ctx)
 	if err != nil {
 		log.G(ctx).WithError(err).Warn("content garbage collection failed")
+	} else {
+		log.G(ctx).WithField("d", d).Debugf("content garbage collected")
 	}
+
+	return d, err
 }
diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go
index 7fe6f7d..186f350 100644
--- a/vendor/github.com/containerd/containerd/metadata/gc.go
+++ b/vendor/github.com/containerd/containerd/metadata/gc.go
@@ -301,7 +301,7 @@
 			cbkt = cbkt.Bucket(bucketKeyObjectBlob)
 		}
 		if cbkt != nil {
-			log.G(ctx).WithField("key", node.Key).Debug("delete content")
+			log.G(ctx).WithField("key", node.Key).Debug("remove content")
 			return cbkt.DeleteBucket([]byte(node.Key))
 		}
 	case ResourceSnapshot:
@@ -313,7 +313,7 @@
 			}
 			ssbkt := sbkt.Bucket([]byte(parts[0]))
 			if ssbkt != nil {
-				log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("delete snapshot")
+				log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot")
 				return ssbkt.DeleteBucket([]byte(parts[1]))
 			}
 		}
diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go
index 7e5e3c7..070439a 100644
--- a/vendor/github.com/containerd/containerd/metadata/images.go
+++ b/vendor/github.com/containerd/containerd/metadata/images.go
@@ -100,10 +100,6 @@
 		return images.Image{}, err
 	}
 
-	if image.Name == "" {
-		return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for create")
-	}
-
 	if err := validateImage(&image); err != nil {
 		return images.Image{}, err
 	}
@@ -177,7 +173,7 @@
 			updated = image
 		}
 
-		if err := validateImage(&image); err != nil {
+		if err := validateImage(&updated); err != nil {
 			return err
 		}
 
@@ -187,7 +183,7 @@
 	})
 }
 
-func (s *imageStore) Delete(ctx context.Context, name string) error {
+func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error {
 	namespace, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
 		return err
diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go
index 006123d..eff0b20 100644
--- a/vendor/github.com/containerd/containerd/metadata/leases.go
+++ b/vendor/github.com/containerd/containerd/metadata/leases.go
@@ -55,7 +55,7 @@
 		if err == bolt.ErrBucketExists {
 			err = errdefs.ErrAlreadyExists
 		}
-		return Lease{}, err
+		return Lease{}, errors.Wrapf(err, "lease %q", lid)
 	}
 
 	t := time.Now().UTC()
@@ -155,7 +155,7 @@
 
 	namespace, ok := namespaces.Namespace(ctx)
 	if !ok {
-		panic("namespace must already be required")
+		panic("namespace must already be checked")
 	}
 
 	bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid))
@@ -176,6 +176,26 @@
 	return bkt.Put([]byte(key), nil)
 }
 
+func removeSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error {
+	lid, ok := leases.Lease(ctx)
+	if !ok {
+		return nil
+	}
+
+	namespace, ok := namespaces.Namespace(ctx)
+	if !ok {
+		panic("namespace must already be checked")
+	}
+
+	bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectSnapshots, []byte(snapshotter))
+	if bkt == nil {
+		// Key does not exist so we return nil
+		return nil
+	}
+
+	return bkt.Delete([]byte(key))
+}
+
 func addContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error {
 	lid, ok := leases.Lease(ctx)
 	if !ok {
@@ -199,3 +219,23 @@
 
 	return bkt.Put([]byte(dgst.String()), nil)
 }
+
+func removeContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error {
+	lid, ok := leases.Lease(ctx)
+	if !ok {
+		return nil
+	}
+
+	namespace, ok := namespaces.Namespace(ctx)
+	if !ok {
+		panic("namespace must already be checked")
+	}
+
+	bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectContent)
+	if bkt == nil {
+		// Key does not exist so we return nil
+		return nil
+	}
+
+	return bkt.Delete([]byte(dgst.String()))
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go
index 22ce3c8..6c34e49 100644
--- a/vendor/github.com/containerd/containerd/metadata/snapshot.go
+++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go
@@ -14,12 +14,12 @@
 	"github.com/containerd/containerd/metadata/boltutil"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/namespaces"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	"github.com/pkg/errors"
 )
 
 type snapshotter struct {
-	snapshot.Snapshotter
+	snapshots.Snapshotter
 	name string
 	db   *DB
 	l    sync.RWMutex
@@ -27,7 +27,7 @@
 
 // newSnapshotter returns a new Snapshotter which namespaces the given snapshot
 // using the provided name and database.
-func newSnapshotter(db *DB, name string, sn snapshot.Snapshotter) *snapshotter {
+func newSnapshotter(db *DB, name string, sn snapshots.Snapshotter) *snapshotter {
 	return &snapshotter{
 		Snapshotter: sn,
 		name:        name,
@@ -39,14 +39,6 @@
 	return fmt.Sprintf("%s/%d/%s", namespace, id, key)
 }
 
-func trimKey(key string) string {
-	parts := strings.SplitN(key, "/", 3)
-	if len(parts) < 3 {
-		return ""
-	}
-	return parts[2]
-}
-
 func getKey(tx *bolt.Tx, ns, name, key string) string {
 	bkt := getSnapshotterBucket(tx, ns, name)
 	if bkt == nil {
@@ -83,15 +75,15 @@
 	return id, nil
 }
 
-func (s *snapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
+func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
 	ns, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
-		return snapshot.Info{}, err
+		return snapshots.Info{}, err
 	}
 
 	var (
 		bkey  string
-		local = snapshot.Info{
+		local = snapshots.Info{
 			Name: key,
 		}
 	)
@@ -116,33 +108,33 @@
 
 		return nil
 	}); err != nil {
-		return snapshot.Info{}, err
+		return snapshots.Info{}, err
 	}
 
 	info, err := s.Snapshotter.Stat(ctx, bkey)
 	if err != nil {
-		return snapshot.Info{}, err
+		return snapshots.Info{}, err
 	}
 
 	return overlayInfo(info, local), nil
 }
 
-func (s *snapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) {
+func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
 	s.l.RLock()
 	defer s.l.RUnlock()
 
 	ns, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
-		return snapshot.Info{}, err
+		return snapshots.Info{}, err
 	}
 
 	if info.Name == "" {
-		return snapshot.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "")
+		return snapshots.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "")
 	}
 
 	var (
 		bkey  string
-		local = snapshot.Info{
+		local = snapshots.Info{
 			Name: info.Name,
 		}
 	)
@@ -203,18 +195,18 @@
 
 		return nil
 	}); err != nil {
-		return snapshot.Info{}, err
+		return snapshots.Info{}, err
 	}
 
 	info, err = s.Snapshotter.Stat(ctx, bkey)
 	if err != nil {
-		return snapshot.Info{}, err
+		return snapshots.Info{}, err
 	}
 
 	return overlayInfo(info, local), nil
 }
 
-func overlayInfo(info, overlay snapshot.Info) snapshot.Info {
+func overlayInfo(info, overlay snapshots.Info) snapshots.Info {
 	// Merge info
 	info.Name = overlay.Name
 	info.Created = overlay.Created
@@ -230,10 +222,10 @@
 	return info
 }
 
-func (s *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
+func (s *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
 	bkey, err := s.resolveKey(ctx, key)
 	if err != nil {
-		return snapshot.Usage{}, err
+		return snapshots.Usage{}, err
 	}
 	return s.Snapshotter.Usage(ctx, bkey)
 }
@@ -246,15 +238,15 @@
 	return s.Snapshotter.Mounts(ctx, bkey)
 }
 
-func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
+func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
 	return s.createSnapshot(ctx, key, parent, false, opts)
 }
 
-func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
+func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
 	return s.createSnapshot(ctx, key, parent, true, opts)
 }
 
-func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshot.Opt) ([]mount.Mount, error) {
+func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshots.Opt) ([]mount.Mount, error) {
 	s.l.RLock()
 	defer s.l.RUnlock()
 
@@ -263,7 +255,7 @@
 		return nil, err
 	}
 
-	var base snapshot.Info
+	var base snapshots.Info
 	for _, opt := range opts {
 		if err := opt(&base); err != nil {
 			return nil, err
@@ -284,10 +276,14 @@
 		bbkt, err := bkt.CreateBucket([]byte(key))
 		if err != nil {
 			if err == bolt.ErrBucketExists {
-				err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", key)
+				err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key)
 			}
 			return err
 		}
+		if err := addSnapshotLease(ctx, tx, s.name, key); err != nil {
+			return err
+		}
+
 		var bparent string
 		if parent != "" {
 			pbkt := bkt.Bucket([]byte(parent))
@@ -326,10 +322,6 @@
 			return err
 		}
 
-		if err := addSnapshotLease(ctx, tx, s.name, key); err != nil {
-			return err
-		}
-
 		// TODO: Consider doing this outside of transaction to lessen
 		// metadata lock time
 		if readonly {
@@ -344,7 +336,7 @@
 	return m, nil
 }
 
-func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error {
+func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
 	s.l.RLock()
 	defer s.l.RUnlock()
 
@@ -353,7 +345,7 @@
 		return err
 	}
 
-	var base snapshot.Info
+	var base snapshots.Info
 	for _, opt := range opts {
 		if err := opt(&base); err != nil {
 			return err
@@ -367,16 +359,20 @@
 	return update(ctx, s.db, func(tx *bolt.Tx) error {
 		bkt := getSnapshotterBucket(tx, ns, s.name)
 		if bkt == nil {
-			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+			return errors.Wrapf(errdefs.ErrNotFound,
+				"can not find snapshotter %q", s.name)
 		}
 
 		bbkt, err := bkt.CreateBucket([]byte(name))
 		if err != nil {
 			if err == bolt.ErrBucketExists {
-				err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", name)
+				err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", name)
 			}
 			return err
 		}
+		if err := addSnapshotLease(ctx, tx, s.name, name); err != nil {
+			return err
+		}
 
 		obkt := bkt.Bucket([]byte(key))
 		if obkt == nil {
@@ -425,9 +421,13 @@
 		if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil {
 			return err
 		}
+
 		if err := bkt.DeleteBucket([]byte(key)); err != nil {
 			return err
 		}
+		if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil {
+			return err
+		}
 
 		// TODO: Consider doing this outside of transaction to lessen
 		// metadata lock time
@@ -479,6 +479,9 @@
 		if err := bkt.DeleteBucket([]byte(key)); err != nil {
 			return err
 		}
+		if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil {
+			return err
+		}
 
 		// Mark snapshotter as dirty for triggering garbage collection
 		s.db.dirtyL.Lock()
@@ -491,10 +494,10 @@
 
 type infoPair struct {
 	bkey string
-	info snapshot.Info
+	info snapshots.Info
 }
 
-func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
+func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error {
 	ns, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
 		return err
@@ -531,7 +534,7 @@
 
 					pair := infoPair{
 						bkey: string(sbkt.Get(bucketKeyName)),
-						info: snapshot.Info{
+						info: snapshots.Info{
 							Name:   string(k),
 							Parent: string(sbkt.Get(bucketKeyParent)),
 						},
@@ -584,7 +587,7 @@
 	return nil
 }
 
-func validateSnapshot(info *snapshot.Info) error {
+func validateSnapshot(info *snapshots.Info) error {
 	for k, v := range info.Labels {
 		if err := labels.Validate(k, v); err != nil {
 			return errors.Wrapf(err, "info.Labels")
@@ -594,13 +597,14 @@
 	return nil
 }
 
-func (s *snapshotter) garbageCollect(ctx context.Context) error {
-	logger := log.G(ctx).WithField("snapshotter", s.name)
-	lt1 := time.Now()
+func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err error) {
 	s.l.Lock()
+	t1 := time.Now()
 	defer func() {
+		if err == nil {
+			d = time.Now().Sub(t1)
+		}
 		s.l.Unlock()
-		logger.WithField("t", time.Now().Sub(lt1)).Debugf("garbage collected")
 	}()
 
 	seen := map[string]struct{}{}
@@ -644,27 +648,30 @@
 
 		return nil
 	}); err != nil {
-		return err
+		return 0, err
 	}
 
 	roots, err := s.walkTree(ctx, seen)
 	if err != nil {
-		return err
+		return 0, err
 	}
 
-	// TODO: Unlock before prune (once nodes are fully unavailable)
+	// TODO: Unlock before removal (once nodes are fully unavailable).
+	// This could be achieved through doing prune inside the lock
+	// and having a cleanup method which actually performs the
+	// deletions on the snapshotters which support it.
 
 	for _, node := range roots {
 		if err := s.pruneBranch(ctx, node); err != nil {
-			return err
+			return 0, err
 		}
 	}
 
-	return nil
+	return
 }
 
 type treeNode struct {
-	info     snapshot.Info
+	info     snapshots.Info
 	remove   bool
 	children []*treeNode
 }
@@ -673,7 +680,7 @@
 	roots := []*treeNode{}
 	nodes := map[string]*treeNode{}
 
-	if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshot.Info) error {
+	if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshots.Info) error {
 		_, isSeen := seen[info.Name]
 		node, ok := nodes[info.Name]
 		if !ok {
@@ -716,7 +723,7 @@
 			if !errdefs.IsFailedPrecondition(err) {
 				return err
 			}
-			logger.WithError(err).WithField("key", node.info.Name).Warnf("snapshot removal failed")
+			logger.WithError(err).WithField("key", node.info.Name).Warnf("failed to remove snapshot")
 		} else {
 			logger.WithField("key", node.info.Name).Debug("removed snapshot")
 		}
@@ -724,3 +731,8 @@
 
 	return nil
 }
+
+// Close closes s.Snapshotter but not db
+func (s *snapshotter) Close() error {
+	return s.Snapshotter.Close()
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go
index 474792d..de2e8bb 100644
--- a/vendor/github.com/containerd/containerd/mount/mount_linux.go
+++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go
@@ -2,7 +2,9 @@
 
 import (
 	"strings"
+	"time"
 
+	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 )
 
@@ -42,8 +44,27 @@
 }
 
 // Unmount the provided mount path with the flags
-func Unmount(mount string, flags int) error {
-	return unix.Unmount(mount, flags)
+func Unmount(target string, flags int) error {
+	if err := unmount(target, flags); err != nil && err != unix.EINVAL {
+		return err
+	}
+	return nil
+}
+
+func unmount(target string, flags int) error {
+	for i := 0; i < 50; i++ {
+		if err := unix.Unmount(target, flags); err != nil {
+			switch err {
+			case unix.EBUSY:
+				time.Sleep(50 * time.Millisecond)
+				continue
+			default:
+				return err
+			}
+		}
+		return nil
+	}
+	return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target)
 }
 
 // UnmountAll repeatedly unmounts the given mount point until there
@@ -51,7 +72,7 @@
 // useful for undoing a stack of mounts on the same mount point.
 func UnmountAll(mount string, flags int) error {
 	for {
-		if err := Unmount(mount, flags); err != nil {
+		if err := unmount(mount, flags); err != nil {
 			// EINVAL is returned if the target is not a
 			// mount point, indicating that we are
 			// done. It can also indicate a few other
diff --git a/vendor/github.com/containerd/containerd/services/namespaces/client.go b/vendor/github.com/containerd/containerd/namespaces.go
similarity index 67%
rename from vendor/github.com/containerd/containerd/services/namespaces/client.go
rename to vendor/github.com/containerd/containerd/namespaces.go
index fd59ec6..36fc50c 100644
--- a/vendor/github.com/containerd/containerd/services/namespaces/client.go
+++ b/vendor/github.com/containerd/containerd/namespaces.go
@@ -1,4 +1,4 @@
-package namespaces
+package containerd
 
 import (
 	"context"
@@ -10,16 +10,16 @@
 	"github.com/gogo/protobuf/types"
 )
 
-// NewStoreFromClient returns a new namespace store
-func NewStoreFromClient(client api.NamespacesClient) namespaces.Store {
-	return &remote{client: client}
+// NewNamespaceStoreFromClient returns a new namespace store
+func NewNamespaceStoreFromClient(client api.NamespacesClient) namespaces.Store {
+	return &remoteNamespaces{client: client}
 }
 
-type remote struct {
+type remoteNamespaces struct {
 	client api.NamespacesClient
 }
 
-func (r *remote) Create(ctx context.Context, namespace string, labels map[string]string) error {
+func (r *remoteNamespaces) Create(ctx context.Context, namespace string, labels map[string]string) error {
 	var req api.CreateNamespaceRequest
 
 	req.Namespace = api.Namespace{
@@ -35,7 +35,7 @@
 	return nil
 }
 
-func (r *remote) Labels(ctx context.Context, namespace string) (map[string]string, error) {
+func (r *remoteNamespaces) Labels(ctx context.Context, namespace string) (map[string]string, error) {
 	var req api.GetNamespaceRequest
 	req.Name = namespace
 
@@ -47,7 +47,7 @@
 	return resp.Namespace.Labels, nil
 }
 
-func (r *remote) SetLabel(ctx context.Context, namespace, key, value string) error {
+func (r *remoteNamespaces) SetLabel(ctx context.Context, namespace, key, value string) error {
 	var req api.UpdateNamespaceRequest
 
 	req.Namespace = api.Namespace{
@@ -67,7 +67,7 @@
 	return nil
 }
 
-func (r *remote) List(ctx context.Context) ([]string, error) {
+func (r *remoteNamespaces) List(ctx context.Context) ([]string, error) {
 	var req api.ListNamespacesRequest
 
 	resp, err := r.client.List(ctx, &req)
@@ -84,7 +84,7 @@
 	return namespaces, nil
 }
 
-func (r *remote) Delete(ctx context.Context, namespace string) error {
+func (r *remoteNamespaces) Delete(ctx context.Context, namespace string) error {
 	var req api.DeleteNamespaceRequest
 
 	req.Name = namespace
diff --git a/vendor/github.com/containerd/containerd/oci/client.go b/vendor/github.com/containerd/containerd/oci/client.go
new file mode 100644
index 0000000..d2cd355
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/oci/client.go
@@ -0,0 +1,22 @@
+package oci
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/snapshots"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Client interface used by SpecOpt
+type Client interface {
+	SnapshotService(snapshotterName string) snapshots.Snapshotter
+}
+
+// Image interface used by some SpecOpt to query image configuration
+type Image interface {
+	// Config descriptor for the image.
+	Config(ctx context.Context) (ocispec.Descriptor, error)
+	// ContentStore provides a content store which contains image blob data
+	ContentStore() content.Store
+}
diff --git a/vendor/github.com/containerd/containerd/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go
similarity index 75%
rename from vendor/github.com/containerd/containerd/spec.go
rename to vendor/github.com/containerd/containerd/oci/spec.go
index 850f470..558a357 100644
--- a/vendor/github.com/containerd/containerd/spec.go
+++ b/vendor/github.com/containerd/containerd/oci/spec.go
@@ -1,4 +1,4 @@
-package containerd
+package oci
 
 import (
 	"context"
@@ -9,7 +9,7 @@
 
 // GenerateSpec will generate a default spec from the provided image
 // for use as a containerd container
-func GenerateSpec(ctx context.Context, client *Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) {
+func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) {
 	s, err := createDefaultSpec(ctx, c.ID)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go
new file mode 100644
index 0000000..c940c7a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go
@@ -0,0 +1,35 @@
+package oci
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/containers"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// SpecOpts sets spec specific information to a newly generated OCI spec
+type SpecOpts func(context.Context, Client, *containers.Container, *specs.Spec) error
+
+// WithProcessArgs replaces the args on the generated spec
+func WithProcessArgs(args ...string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
+		s.Process.Args = args
+		return nil
+	}
+}
+
+// WithProcessCwd replaces the current working directory on the generated spec
+func WithProcessCwd(cwd string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
+		s.Process.Cwd = cwd
+		return nil
+	}
+}
+
+// WithHostname sets the container's hostname
+func WithHostname(name string) SpecOpts {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
+		s.Hostname = name
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
similarity index 66%
rename from vendor/github.com/containerd/containerd/spec_opts_unix.go
rename to vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
index 01d5121..865aff2 100644
--- a/vendor/github.com/containerd/containerd/spec_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
@@ -1,6 +1,6 @@
 // +build !windows
 
-package containerd
+package oci
 
 import (
 	"context"
@@ -12,16 +12,12 @@
 	"strconv"
 	"strings"
 
-	"golang.org/x/sys/unix"
-
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/fs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/namespaces"
-	"github.com/containerd/containerd/platforms"
-	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/opencontainers/runc/libcontainer/user"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -30,7 +26,7 @@
 
 // WithTTY sets the information on the spec as well as the environment variables for
 // using a TTY
-func WithTTY(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 	s.Process.Terminal = true
 	s.Process.Env = append(s.Process.Env, "TERM=xterm")
 	return nil
@@ -38,7 +34,7 @@
 
 // WithHostNamespace allows a task to run inside the host's linux namespace
 func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		for i, n := range s.Linux.Namespaces {
 			if n.Type == ns {
 				s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...)
@@ -52,7 +48,7 @@
 // WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the
 // spec, the existing namespace is replaced by the one provided.
 func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		for i, n := range s.Linux.Namespaces {
 			if n.Type == ns.Type {
 				before := s.Linux.Namespaces[:i]
@@ -68,13 +64,9 @@
 }
 
 // WithImageConfig configures the spec to from the configuration of an Image
-func WithImageConfig(i Image) SpecOpts {
-	return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
-		var (
-			image = i.(*image)
-			store = client.ContentStore()
-		)
-		ic, err := image.i.Config(ctx, store, platforms.Default())
+func WithImageConfig(image Image) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+		ic, err := image.Config(ctx)
 		if err != nil {
 			return err
 		}
@@ -84,7 +76,7 @@
 		)
 		switch ic.MediaType {
 		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
-			p, err := content.ReadBlob(ctx, store, ic.Digest)
+			p, err := content.ReadBlob(ctx, image.ContentStore(), ic.Digest)
 			if err != nil {
 				return err
 			}
@@ -96,6 +88,11 @@
 		default:
 			return fmt.Errorf("unknown image config media type %s", ic.MediaType)
 		}
+
+		if s.Process == nil {
+			s.Process = &specs.Process{}
+		}
+
 		s.Process.Env = append(s.Process.Env, config.Env...)
 		cmd := config.Cmd
 		s.Process.Args = append(config.Entrypoint, cmd...)
@@ -103,7 +100,7 @@
 			parts := strings.Split(config.User, ":")
 			switch len(parts) {
 			case 1:
-				v, err := strconv.ParseUint(parts[0], 0, 10)
+				v, err := strconv.Atoi(parts[0])
 				if err != nil {
 					// if we cannot parse as a uint they try to see if it is a username
 					if err := WithUsername(config.User)(ctx, client, c, s); err != nil {
@@ -115,13 +112,13 @@
 					return err
 				}
 			case 2:
-				v, err := strconv.ParseUint(parts[0], 0, 10)
+				v, err := strconv.Atoi(parts[0])
 				if err != nil {
-					return err
+					return errors.Wrapf(err, "parse uid %s", parts[0])
 				}
 				uid := uint32(v)
-				if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
-					return err
+				if v, err = strconv.Atoi(parts[1]); err != nil {
+					return errors.Wrapf(err, "parse gid %s", parts[1])
 				}
 				gid := uint32(v)
 				s.Process.User.UID, s.Process.User.GID = uid, gid
@@ -140,7 +137,7 @@
 
 // WithRootFSPath specifies unmanaged rootfs path.
 func WithRootFSPath(path string) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		if s.Root == nil {
 			s.Root = &specs.Root{}
 		}
@@ -152,7 +149,7 @@
 
 // WithRootFSReadonly sets specs.Root.Readonly to true
 func WithRootFSReadonly() SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		if s.Root == nil {
 			s.Root = &specs.Root{}
 		}
@@ -161,22 +158,14 @@
 	}
 }
 
-// WithResources sets the provided resources on the spec for task updates
-func WithResources(resources *specs.LinuxResources) UpdateTaskOpts {
-	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
-		r.Resources = resources
-		return nil
-	}
-}
-
 // WithNoNewPrivileges sets no_new_privileges on the process for the container
-func WithNoNewPrivileges(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 	s.Process.NoNewPrivileges = true
 	return nil
 }
 
 // WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly
-func WithHostHostsFile(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 	s.Mounts = append(s.Mounts, specs.Mount{
 		Destination: "/etc/hosts",
 		Type:        "bind",
@@ -187,7 +176,7 @@
 }
 
 // WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly
-func WithHostResolvconf(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 	s.Mounts = append(s.Mounts, specs.Mount{
 		Destination: "/etc/resolv.conf",
 		Type:        "bind",
@@ -198,7 +187,7 @@
 }
 
 // WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly
-func WithHostLocaltime(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 	s.Mounts = append(s.Mounts, specs.Mount{
 		Destination: "/etc/localtime",
 		Type:        "bind",
@@ -211,7 +200,7 @@
 // WithUserNamespace sets the uid and gid mappings for the task
 // this can be called multiple times to add more mappings to the generated spec
 func WithUserNamespace(container, host, size uint32) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		var hasUserns bool
 		for _, ns := range s.Linux.Namespaces {
 			if ns.Type == specs.UserNamespace {
@@ -235,68 +224,9 @@
 	}
 }
 
-// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
-// filesystem to be used by a container with user namespaces
-func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
-	return withRemappedSnapshotBase(id, i, uid, gid, false)
-}
-
-// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
-func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
-	return withRemappedSnapshotBase(id, i, uid, gid, true)
-}
-
-func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
-	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
-		if err != nil {
-			return err
-		}
-
-		setSnapshotterIfEmpty(c)
-
-		var (
-			snapshotter = client.SnapshotService(c.Snapshotter)
-			parent      = identity.ChainID(diffIDs).String()
-			usernsID    = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
-		)
-		if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
-			if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
-				c.SnapshotKey = id
-				c.Image = i.Name()
-				return nil
-			} else if !errdefs.IsNotFound(err) {
-				return err
-			}
-		}
-		mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent)
-		if err != nil {
-			return err
-		}
-		if err := remapRootFS(mounts, uid, gid); err != nil {
-			snapshotter.Remove(ctx, usernsID)
-			return err
-		}
-		if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil {
-			return err
-		}
-		if readonly {
-			_, err = snapshotter.View(ctx, id, usernsID)
-		} else {
-			_, err = snapshotter.Prepare(ctx, id, usernsID)
-		}
-		if err != nil {
-			return err
-		}
-		c.SnapshotKey = id
-		c.Image = i.Name()
-		return nil
-	}
-}
-
 // WithCgroup sets the container's cgroup path
 func WithCgroup(path string) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		s.Linux.CgroupsPath = path
 		return nil
 	}
@@ -305,7 +235,7 @@
 // WithNamespacedCgroup uses the namespace set on the context to create a
 // root directory for containers in the cgroup with the id as the subcgroup
 func WithNamespacedCgroup() SpecOpts {
-	return func(ctx context.Context, _ *Client, c *containers.Container, s *specs.Spec) error {
+	return func(ctx context.Context, _ Client, c *containers.Container, s *specs.Spec) error {
 		namespace, err := namespaces.NamespaceRequired(ctx)
 		if err != nil {
 			return err
@@ -317,7 +247,7 @@
 
 // WithUIDGID allows the UID and GID for the Process to be set
 func WithUIDGID(uid, gid uint32) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		s.Process.User.UID = uid
 		s.Process.User.GID = gid
 		return nil
@@ -329,7 +259,7 @@
 // or uid is not found in /etc/passwd, it sets gid to be the same with
 // uid, and not returns error.
 func WithUserID(uid uint32) SpecOpts {
-	return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
+	return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
 		if c.Snapshotter == "" {
 			return errors.Errorf("no snapshotter set for container")
 		}
@@ -345,13 +275,19 @@
 		if err != nil {
 			return err
 		}
-		defer os.RemoveAll(root)
+		defer os.Remove(root)
 		for _, m := range mounts {
 			if err := m.Mount(root); err != nil {
 				return err
 			}
 		}
-		defer unix.Unmount(root, 0)
+		defer func() {
+			if uerr := mount.Unmount(root, 0); uerr != nil {
+				if err == nil {
+					err = uerr
+				}
+			}
+		}()
 		ppath, err := fs.RootPath(root, "/etc/passwd")
 		if err != nil {
 			return err
@@ -386,7 +322,7 @@
 // does not exist, or the username is not found in /etc/passwd,
 // it returns error.
 func WithUsername(username string) SpecOpts {
-	return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
+	return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
 		if c.Snapshotter == "" {
 			return errors.Errorf("no snapshotter set for container")
 		}
@@ -402,13 +338,19 @@
 		if err != nil {
 			return err
 		}
-		defer os.RemoveAll(root)
+		defer os.Remove(root)
 		for _, m := range mounts {
 			if err := m.Mount(root); err != nil {
 				return err
 			}
 		}
-		defer unix.Unmount(root, 0)
+		defer func() {
+			if uerr := mount.Unmount(root, 0); uerr != nil {
+				if err == nil {
+					err = uerr
+				}
+			}
+		}()
 		ppath, err := fs.RootPath(root, "/etc/passwd")
 		if err != nil {
 			return err
diff --git a/vendor/github.com/containerd/containerd/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
similarity index 65%
rename from vendor/github.com/containerd/containerd/spec_opts_windows.go
rename to vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
index 1fc5d5e..796ad55 100644
--- a/vendor/github.com/containerd/containerd/spec_opts_windows.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
@@ -1,6 +1,6 @@
 // +build windows
 
-package containerd
+package oci
 
 import (
 	"context"
@@ -10,19 +10,14 @@
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/platforms"
 	"github.com/opencontainers/image-spec/specs-go/v1"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
 // WithImageConfig configures the spec to from the configuration of an Image
-func WithImageConfig(i Image) SpecOpts {
-	return func(ctx context.Context, client *Client, _ *containers.Container, s *specs.Spec) error {
-		var (
-			image = i.(*image)
-			store = client.ContentStore()
-		)
-		ic, err := image.i.Config(ctx, store, platforms.Default())
+func WithImageConfig(image Image) SpecOpts {
+	return func(ctx context.Context, client Client, _ *containers.Container, s *specs.Spec) error {
+		ic, err := image.Config(ctx)
 		if err != nil {
 			return err
 		}
@@ -32,7 +27,7 @@
 		)
 		switch ic.MediaType {
 		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
-			p, err := content.ReadBlob(ctx, store, ic.Digest)
+			p, err := content.ReadBlob(ctx, image.ContentStore(), ic.Digest)
 			if err != nil {
 				return err
 			}
@@ -55,7 +50,7 @@
 // WithTTY sets the information on the spec as well as the environment variables for
 // using a TTY
 func WithTTY(width, height int) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	return func(_ context.Context, _ Client, _ *containers.Container, s *specs.Spec) error {
 		s.Process.Terminal = true
 		if s.Process.ConsoleSize == nil {
 			s.Process.ConsoleSize = &specs.Box{}
@@ -66,10 +61,10 @@
 	}
 }
 
-// WithResources sets the provided resources on the spec for task updates
-func WithResources(resources *specs.WindowsResources) UpdateTaskOpts {
-	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
-		r.Resources = resources
+// WithUsername sets the username on the process
+func WithUsername(username string) SpecOpts {
+	return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+		s.Process.User.Username = username
 		return nil
 	}
 }
diff --git a/vendor/github.com/containerd/containerd/spec_unix.go b/vendor/github.com/containerd/containerd/oci/spec_unix.go
similarity index 76%
rename from vendor/github.com/containerd/containerd/spec_unix.go
rename to vendor/github.com/containerd/containerd/oci/spec_unix.go
index 957f90e..c8f3b37 100644
--- a/vendor/github.com/containerd/containerd/spec_unix.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_unix.go
@@ -1,17 +1,11 @@
 // +build !windows
 
-package containerd
+package oci
 
 import (
 	"context"
-	"io/ioutil"
-	"os"
 	"path/filepath"
-	"syscall"
 
-	"golang.org/x/sys/unix"
-
-	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/namespaces"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
@@ -142,8 +136,6 @@
 			},
 		},
 		Linux: &specs.Linux{
-			// TODO (AkihiroSuda): unmask /sys/firmware on Windows daemon for LCOW support?
-			// https://github.com/moby/moby/pull/33241/files#diff-a1f5051ce84e711a2ee688ab9ded5e74R215
 			MaskedPaths: []string{
 				"/proc/kcore",
 				"/proc/latency_stats",
@@ -175,32 +167,3 @@
 	}
 	return s, nil
 }
-
-func remapRootFS(mounts []mount.Mount, uid, gid uint32) error {
-	root, err := ioutil.TempDir("", "ctd-remap")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(root)
-	for _, m := range mounts {
-		if err := m.Mount(root); err != nil {
-			return err
-		}
-	}
-	defer unix.Unmount(root, 0)
-	return filepath.Walk(root, incrementFS(root, uid, gid))
-}
-
-func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
-	return func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-		var (
-			stat = info.Sys().(*syscall.Stat_t)
-			u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc)
-		)
-		// be sure the lchown the path as to not de-reference the symlink to a host file
-		return os.Lchown(path, u, g)
-	}
-}
diff --git a/vendor/github.com/containerd/containerd/spec_windows.go b/vendor/github.com/containerd/containerd/oci/spec_windows.go
similarity index 96%
rename from vendor/github.com/containerd/containerd/spec_windows.go
rename to vendor/github.com/containerd/containerd/oci/spec_windows.go
index 16a58b4..64c2288 100644
--- a/vendor/github.com/containerd/containerd/spec_windows.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_windows.go
@@ -1,4 +1,4 @@
-package containerd
+package oci
 
 import (
 	"context"
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go
index 9bda46c..5746bf7 100644
--- a/vendor/github.com/containerd/containerd/plugin/plugin.go
+++ b/vendor/github.com/containerd/containerd/plugin/plugin.go
@@ -54,6 +54,8 @@
 	MetadataPlugin Type = "io.containerd.metadata.v1"
 	// ContentPlugin implements a content store
 	ContentPlugin Type = "io.containerd.content.v1"
+	// GCPlugin implements garbage collection policy
+	GCPlugin Type = "io.containerd.gc.v1"
 )
 
 // Registration contains information for registering a plugin
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go
index d910124..eee0e3f 100644
--- a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go
+++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go
@@ -1,4 +1,4 @@
-// +build go1.8,!windows,amd64
+// +build go1.8,!windows,amd64,!static_build
 
 package plugin
 
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go
index 21a4570..180917a 100644
--- a/vendor/github.com/containerd/containerd/plugin/plugin_other.go
+++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go
@@ -1,4 +1,4 @@
-// +build !go1.8 windows !amd64
+// +build !go1.8 windows !amd64 static_build
 
 package plugin
 
diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go
index e51367a..ad1a2a1 100644
--- a/vendor/github.com/containerd/containerd/process.go
+++ b/vendor/github.com/containerd/containerd/process.go
@@ -7,6 +7,7 @@
 	"time"
 
 	"github.com/containerd/containerd/api/services/tasks/v1"
+	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/pkg/errors"
 )
@@ -28,7 +29,7 @@
 	// Resize changes the width and heigh of the process's terminal
 	Resize(ctx context.Context, w, h uint32) error
 	// IO returns the io set for the process
-	IO() IO
+	IO() cio.IO
 	// Status returns the executing status of the process
 	Status(context.Context) (Status, error)
 }
@@ -72,7 +73,7 @@
 	id   string
 	task *task
 	pid  uint32
-	io   IO
+	io   cio.IO
 }
 
 func (p *process) ID() string {
@@ -104,7 +105,7 @@
 func (p *process) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error {
 	var i KillInfo
 	for _, o := range opts {
-		if err := o(ctx, p, &i); err != nil {
+		if err := o(ctx, &i); err != nil {
 			return err
 		}
 	}
@@ -154,7 +155,7 @@
 	return errdefs.FromGRPC(err)
 }
 
-func (p *process) IO() IO {
+func (p *process) IO() cio.IO {
 	return p.io
 }
 
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go
index 74537b7..c94ceb4 100644
--- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/protobuf/google/rpc/code.proto
-// DO NOT EDIT!
 
 /*
 Package rpc is a generated protocol buffer package.
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go
index a61229d..46953e7 100644
--- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/protobuf/google/rpc/error_details.proto
-// DO NOT EDIT!
 
 package rpc
 
@@ -670,24 +669,6 @@
 	return i, nil
 }
 
-func encodeFixed64ErrorDetails(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32ErrorDetails(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintErrorDetails(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go
index 80927bf..fde1ca7 100644
--- a/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/protobuf/google/rpc/status.proto
-// DO NOT EDIT!
 
 package rpc
 
@@ -131,24 +130,6 @@
 	return i, nil
 }
 
-func encodeFixed64Status(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Status(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintStatus(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go
deleted file mode 100644
index b0736c3..0000000
--- a/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go
+++ /dev/null
@@ -1 +0,0 @@
-package plugin
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go
deleted file mode 100644
index 797b691..0000000
--- a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
-// DO NOT EDIT!
-
-/*
-Package plugin is a generated protocol buffer package.
-
-It is generated from these files:
-	github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
-
-It has these top-level messages:
-*/
-package plugin
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-
-var E_FieldpathAll = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.FileOptions)(nil),
-	ExtensionType: (*bool)(nil),
-	Field:         63300,
-	Name:          "containerd.plugin.fieldpath_all",
-	Tag:           "varint,63300,opt,name=fieldpath_all,json=fieldpathAll",
-	Filename:      "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
-}
-
-var E_Fieldpath = &proto.ExtensionDesc{
-	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
-	ExtensionType: (*bool)(nil),
-	Field:         64400,
-	Name:          "containerd.plugin.fieldpath",
-	Tag:           "varint,64400,opt,name=fieldpath",
-	Filename:      "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
-}
-
-func init() {
-	proto.RegisterExtension(E_FieldpathAll)
-	proto.RegisterExtension(E_Fieldpath)
-}
-
-func init() {
-	proto.RegisterFile("github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", fileDescriptorFieldpath)
-}
-
-var fileDescriptorFieldpath = []byte{
-	// 203 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9,
-	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
-	0x4a, 0x41, 0x66, 0x16, 0x14, 0xe5, 0x97, 0xe4, 0x27, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6,
-	0x67, 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0xe8, 0x81, 0x65, 0x84,
-	0x04, 0x11, 0x6a, 0xf5, 0x20, 0x4a, 0xa4, 0x14, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x11, 0x5a,
-	0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0xf2, 0x8b, 0x20, 0x9a, 0xac, 0x9c, 0xb9, 0x78,
-	0xe1, 0xe6, 0xc4, 0x27, 0xe6, 0xe4, 0x08, 0xc9, 0xe8, 0x41, 0xf4, 0xe8, 0xc1, 0xf4, 0xe8, 0xb9,
-	0x65, 0xe6, 0xa4, 0xfa, 0x17, 0x94, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x1c, 0x79, 0xc7, 0xac, 0xc0,
-	0xa8, 0xc1, 0x11, 0xc4, 0x03, 0xd7, 0xe4, 0x98, 0x93, 0x63, 0x65, 0xcf, 0xc5, 0x09, 0xe7, 0x0b,
-	0xc9, 0x63, 0x18, 0xe0, 0x9b, 0x5a, 0x5c, 0x9c, 0x98, 0x0e, 0x37, 0x63, 0xc2, 0x77, 0x88, 0x19,
-	0x08, 0x3d, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, 0x48, 0x8e,
-	0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x04, 0x04, 0x00,
-	0x00, 0xff, 0xff, 0xd6, 0x21, 0x2a, 0xb6, 0x17, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
deleted file mode 100644
index 0674dc6..0000000
--- a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
+++ /dev/null
@@ -1,40 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto2";
-package containerd.plugin;
-
-import "google/protobuf/descriptor.proto";
-
-extend google.protobuf.FileOptions {
-	optional bool fieldpath_all = 63300;
-}
-
-extend google.protobuf.MessageOptions {
-	optional bool fieldpath = 64400;
-}
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go b/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go
deleted file mode 100644
index 7a2af56..0000000
--- a/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package plugin
-
-import (
-	"github.com/gogo/protobuf/proto"
-	"github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
-)
-
-// FieldpathEnabled returns true if E_Fieldpath is enabled
-func FieldpathEnabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool {
-	return proto.GetBoolExtension(message.Options, E_Fieldpath, proto.GetBoolExtension(file.Options, E_FieldpathAll, false))
-}
diff --git a/vendor/github.com/containerd/containerd/reaper/reaper.go b/vendor/github.com/containerd/containerd/reaper/reaper.go
index d7dfbb2..9127fc5 100644
--- a/vendor/github.com/containerd/containerd/reaper/reaper.go
+++ b/vendor/github.com/containerd/containerd/reaper/reaper.go
@@ -15,7 +15,7 @@
 // ErrNoSuchProcess is returned when the process no longer exists
 var ErrNoSuchProcess = errors.New("no such process")
 
-const bufferSize = 2048
+const bufferSize = 1024
 
 // Reap should be called when the process receives an SIGCHLD.  Reap will reap
 // all exited processes and close their wait channels
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
index 46677e4..222cf83 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
@@ -2,6 +2,7 @@
 
 import (
 	"context"
+	"fmt"
 	"io"
 	"net/http"
 	"path"
@@ -37,32 +38,60 @@
 		return nil, err
 	}
 
-	for _, u := range urls {
-		req, err := http.NewRequest(http.MethodGet, u, nil)
-		if err != nil {
-			return nil, err
-		}
+	return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
+		for _, u := range urls {
+			rc, err := r.open(ctx, u, desc.MediaType, offset)
+			if err != nil {
+				if errdefs.IsNotFound(err) {
+					continue // try one of the other urls.
+				}
 
-		req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
-		resp, err := r.doRequestWithRetries(ctx, req, nil)
-		if err != nil {
-			return nil, err
-		}
-
-		if resp.StatusCode > 299 {
-			resp.Body.Close()
-			if resp.StatusCode == http.StatusNotFound {
-				continue // try one of the other urls.
+				return nil, err
 			}
-			return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+
+			return rc, nil
 		}
 
-		return resp.Body, nil
+		return nil, errors.Wrapf(errdefs.ErrNotFound,
+			"could not fetch content descriptor %v (%v) from remote",
+			desc.Digest, desc.MediaType)
+
+	})
+}
+
+func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) {
+	req, err := http.NewRequest(http.MethodGet, u, nil)
+	if err != nil {
+		return nil, err
 	}
 
-	return nil, errors.Wrapf(errdefs.ErrNotFound,
-		"could not fetch content descriptor %v (%v) from remote",
-		desc.Digest, desc.MediaType)
+	req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
+
+	if offset > 0 {
+		// TODO(stevvooe): Only set this header in response to the
+		// "Accept-Ranges: bytes" header.
+		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+	}
+
+	resp, err := r.doRequestWithRetries(ctx, req, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode > 299 {
+		// TODO(stevvooe): When doing a offset specific request, we should
+		// really distinguish between a 206 and a 200. In the case of 200, we
+		// can discard the bytes, hiding the seek behavior from the
+		// implementation.
+
+		resp.Body.Close()
+		if resp.StatusCode == http.StatusNotFound {
+			return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
+		}
+		return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+	}
+
+	return resp.Body, nil
 }
 
 // getV2URLPaths generates the candidate urls paths for the object based on the
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
new file mode 100644
index 0000000..f6de60a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
@@ -0,0 +1,128 @@
+package docker
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/log"
+	"github.com/pkg/errors"
+)
+
+type httpReadSeeker struct {
+	size   int64
+	offset int64
+	rc     io.ReadCloser
+	open   func(offset int64) (io.ReadCloser, error)
+	closed bool
+}
+
+func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) {
+	return &httpReadSeeker{
+		size: size,
+		open: open,
+	}, nil
+}
+
+func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
+	if hrs.closed {
+		return 0, io.EOF
+	}
+
+	rd, err := hrs.reader()
+	if err != nil {
+		return 0, err
+	}
+
+	n, err = rd.Read(p)
+	hrs.offset += int64(n)
+	return
+}
+
+func (hrs *httpReadSeeker) Close() error {
+	if hrs.closed {
+		return nil
+	}
+	hrs.closed = true
+	if hrs.rc != nil {
+		return hrs.rc.Close()
+	}
+
+	return nil
+}
+
+func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
+	if hrs.closed {
+		return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed")
+	}
+
+	abs := hrs.offset
+	switch whence {
+	case io.SeekStart:
+		abs = offset
+	case io.SeekCurrent:
+		abs += offset
+	case io.SeekEnd:
+		if hrs.size == -1 {
+			return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end")
+		}
+		abs = hrs.size + offset
+	default:
+		return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence")
+	}
+
+	if abs < 0 {
+		return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset")
+	}
+
+	if abs != hrs.offset {
+		if hrs.rc != nil {
+			if err := hrs.rc.Close(); err != nil {
+				log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser")
+			}
+
+			hrs.rc = nil
+		}
+
+		hrs.offset = abs
+	}
+
+	return hrs.offset, nil
+}
+
+func (hrs *httpReadSeeker) reader() (io.Reader, error) {
+	if hrs.rc != nil {
+		return hrs.rc, nil
+	}
+
+	if hrs.size == -1 || hrs.offset < hrs.size {
+		// only try to reopen the body request if we are seeking to a value
+		// less than the actual size.
+		if hrs.open == nil {
+			return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open")
+		}
+
+		rc, err := hrs.open(hrs.offset)
+		if err != nil {
+			return nil, errors.Wrapf(err, "httpReaderSeeker: failed open")
+		}
+
+		if hrs.rc != nil {
+			if err := hrs.rc.Close(); err != nil {
+				log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser")
+			}
+		}
+		hrs.rc = rc
+	} else {
+		// There is an edge case here where offset == size of the content. If
+		// we seek, we will probably get an error for content that cannot be
+		// sought (?). In that case, we should err on committing the content,
+		// as the length is already satisified but we just return the empty
+		// reader instead.
+
+		hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{}))
+	}
+
+	return hrs.rc, nil
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
index 24bd278..405480b 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
@@ -36,7 +36,7 @@
 	status, err := p.tracker.GetStatus(ref)
 	if err == nil {
 		if status.Offset == status.Total {
-			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v already exists", ref)
+			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref)
 		}
 		// TODO: Handle incomplete status
 	} else if !errdefs.IsNotFound(err) {
@@ -52,7 +52,11 @@
 	case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
 		ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
 		isManifest = true
-		existCheck = path.Join("manifests", desc.Digest.String())
+		if p.tag == "" {
+			existCheck = path.Join("manifests", desc.Digest.String())
+		} else {
+			existCheck = path.Join("manifests", p.tag)
+		}
 	default:
 		existCheck = path.Join("blobs", desc.Digest.String())
 	}
@@ -71,15 +75,26 @@
 		log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push")
 	} else {
 		if resp.StatusCode == http.StatusOK {
-			p.tracker.SetStatus(ref, Status{
-				Status: content.Status{
-					Ref: ref,
-					// TODO: Set updated time?
-				},
-			})
-			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
-		}
-		if resp.StatusCode != http.StatusNotFound {
+			var exists bool
+			if isManifest && p.tag != "" {
+				dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
+				if dgstHeader == desc.Digest {
+					exists = true
+				}
+			} else {
+				exists = true
+			}
+
+			if exists {
+				p.tracker.SetStatus(ref, Status{
+					Status: content.Status{
+						Ref: ref,
+						// TODO: Set updated time?
+					},
+				})
+				return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
+			}
+		} else if resp.StatusCode != http.StatusNotFound {
 			// TODO: log error
 			return nil, errors.Errorf("unexpected response: %s", resp.Status)
 		}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
index 7a11504..57a18b6 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
@@ -7,7 +7,6 @@
 	"io"
 	"io/ioutil"
 	"net/http"
-	"net/textproto"
 	"net/url"
 	"path"
 	"strconv"
@@ -298,7 +297,7 @@
 
 func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
 	ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
-	log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("Do request")
+	log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request")
 	r.authorize(req)
 	resp, err := ctxhttp.Do(ctx, r.client, req)
 	if err != nil {
@@ -405,22 +404,6 @@
 	return &ireq, nil
 }
 
-func isManifestAccept(h http.Header) bool {
-	for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] {
-		switch ah {
-		case images.MediaTypeDockerSchema2Manifest:
-			fallthrough
-		case images.MediaTypeDockerSchema2ManifestList:
-			fallthrough
-		case ocispec.MediaTypeImageManifest:
-			fallthrough
-		case ocispec.MediaTypeImageIndex:
-			return true
-		}
-	}
-	return false
-}
-
 func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error {
 	realm, ok := params["realm"]
 	if !ok {
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
index 52f83d4..6b74cd6 100644
--- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
+++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
@@ -29,10 +29,6 @@
 
 const manifestSizeLimit = 8e6 // 8MB
 
-var (
-	mediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
-)
-
 type blobState struct {
 	diffID digest.Digest
 	empty  bool
@@ -87,6 +83,7 @@
 						{
 							MediaType: images.MediaTypeDockerSchema2LayerGzip,
 							Digest:    c.pulledManifest.FSLayers[i].BlobSum,
+							Size:      -1,
 						},
 					}, descs...)
 				}
@@ -213,10 +210,16 @@
 		ref   = remotes.MakeRefKey(ctx, desc)
 		calc  = newBlobStateCalculator()
 		retry = 16
+		size  = desc.Size
 	)
 
+	// size may be unknown, set to zero for content ingest
+	if size == -1 {
+		size = 0
+	}
+
 tryit:
-	cw, err := c.contentStore.Writer(ctx, ref, desc.Size, desc.Digest)
+	cw, err := c.contentStore.Writer(ctx, ref, size, desc.Digest)
 	if err != nil {
 		if errdefs.IsUnavailable(err) {
 			select {
@@ -277,7 +280,8 @@
 
 		eg.Go(func() error {
 			defer pw.Close()
-			return content.Copy(ctx, cw, io.TeeReader(rc, pw), desc.Size, desc.Digest)
+
+			return content.Copy(ctx, cw, io.TeeReader(rc, pw), size, desc.Digest)
 		})
 
 		if err := eg.Wait(); err != nil {
@@ -285,7 +289,7 @@
 		}
 	}
 
-	if desc.Size == 0 {
+	if desc.Size == -1 {
 		info, err := c.contentStore.Info(ctx, desc.Digest)
 		if err != nil {
 			return errors.Wrap(err, "failed to get blob info")
diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go
index e583391..ad4cd9f 100644
--- a/vendor/github.com/containerd/containerd/remotes/handlers.go
+++ b/vendor/github.com/containerd/containerd/remotes/handlers.go
@@ -114,6 +114,7 @@
 func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) {
 	var childrenF func(r io.Reader) ([]ocispec.Descriptor, error)
 
+	// TODO(AkihiroSuda): use images/oci.GetChildrenDescriptors?
 	switch desc.MediaType {
 	case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
 		childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {
diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go
index a198c99..4051295 100644
--- a/vendor/github.com/containerd/containerd/rootfs/apply.go
+++ b/vendor/github.com/containerd/containerd/rootfs/apply.go
@@ -9,7 +9,7 @@
 	"github.com/containerd/containerd/diff"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/log"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@@ -30,7 +30,7 @@
 // The returned result is a chain id digest representing all the applied layers.
 // Layers are applied in order they are given, making the first layer the
 // bottom-most layer in the layer chain.
-func ApplyLayers(ctx context.Context, layers []Layer, sn snapshot.Snapshotter, a diff.Differ) (digest.Digest, error) {
+func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Differ) (digest.Digest, error) {
 	var chain []digest.Digest
 	for _, layer := range layers {
 		if _, err := ApplyLayer(ctx, layer, chain, sn, a); err != nil {
@@ -46,7 +46,7 @@
 // ApplyLayer applies a single layer on top of the given provided layer chain,
 // using the provided snapshotter and applier. If the layer was unpacked true
 // is returned, if the layer already exists false is returned.
-func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshot.Snapshotter, a diff.Differ, opts ...snapshot.Opt) (bool, error) {
+func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Differ, opts ...snapshots.Opt) (bool, error) {
 	var (
 		parent  = identity.ChainID(chain)
 		chainID = identity.ChainID(append(chain, layer.Diff.Digest))
@@ -55,10 +55,10 @@
 
 	_, err := sn.Stat(ctx, chainID.String())
 	if err == nil {
-		log.G(ctx).Debugf("Extraction not needed, layer snapshot exists")
+		log.G(ctx).Debugf("Extraction not needed, layer snapshot %s exists", chainID)
 		return false, nil
 	} else if !errdefs.IsNotFound(err) {
-		return false, errors.Wrap(err, "failed to stat snapshot")
+		return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
 	}
 
 	key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
@@ -67,7 +67,7 @@
 	mounts, err := sn.Prepare(ctx, key, parent.String(), opts...)
 	if err != nil {
 		//TODO: If is snapshot exists error, retry
-		return false, errors.Wrap(err, "failed to prepare extraction layer")
+		return false, errors.Wrapf(err, "failed to prepare extraction snapshot %q", key)
 	}
 	defer func() {
 		if err != nil {
@@ -89,7 +89,7 @@
 
 	if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil {
 		if !errdefs.IsAlreadyExists(err) {
-			return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
+			return false, errors.Wrapf(err, "failed to commit snapshot %s", key)
 		}
 
 		// Destination already exists, cleanup key and return without error
diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go
index 035eb30..bab7a3c 100644
--- a/vendor/github.com/containerd/containerd/rootfs/diff.go
+++ b/vendor/github.com/containerd/containerd/rootfs/diff.go
@@ -5,7 +5,7 @@
 
 	"github.com/containerd/containerd/diff"
 	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"golang.org/x/net/context"
 )
@@ -14,7 +14,7 @@
 // of the snapshot. A content ref is provided to track the progress of the
 // content creation and the provided snapshotter and mount differ are used
 // for calculating the diff. The descriptor for the layer diff is returned.
-func Diff(ctx context.Context, snapshotID string, sn snapshot.Snapshotter, d diff.Differ, opts ...diff.Opt) (ocispec.Descriptor, error) {
+func Diff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Differ, opts ...diff.Opt) (ocispec.Descriptor, error) {
 	info, err := sn.Stat(ctx, snapshotID)
 	if err != nil {
 		return ocispec.Descriptor{}, err
@@ -28,7 +28,7 @@
 	defer sn.Remove(ctx, lowerKey)
 
 	var upper []mount.Mount
-	if info.Kind == snapshot.KindActive {
+	if info.Kind == snapshots.KindActive {
 		upper, err = sn.Mounts(ctx, snapshotID)
 		if err != nil {
 			return ocispec.Descriptor{}, err
diff --git a/vendor/github.com/containerd/containerd/rootfs/init.go b/vendor/github.com/containerd/containerd/rootfs/init.go
index 271e6ce..4f32f11 100644
--- a/vendor/github.com/containerd/containerd/rootfs/init.go
+++ b/vendor/github.com/containerd/containerd/rootfs/init.go
@@ -8,7 +8,7 @@
 
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	digest "github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 )
@@ -26,7 +26,7 @@
 }
 
 // InitRootFS initializes the snapshot for use as a rootfs
-func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]mount.Mount, error) {
+func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshots.Snapshotter, mounter Mounter) ([]mount.Mount, error) {
 	_, err := snapshotter.Stat(ctx, name)
 	if err == nil {
 		return nil, errors.Errorf("rootfs already exists")
@@ -51,7 +51,7 @@
 	return snapshotter.Prepare(ctx, name, parentS)
 }
 
-func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshot.Snapshotter, mounter Mounter) (string, error) {
+func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshots.Snapshotter, mounter Mounter) (string, error) {
 	initS := fmt.Sprintf("%s %s", parent, initName)
 	if _, err := snapshotter.Stat(ctx, initS); err == nil {
 		return initS, nil
@@ -69,12 +69,12 @@
 	if err != nil {
 		return "", err
 	}
+
 	defer func() {
 		if err != nil {
-			// TODO: once implemented uncomment
-			//if rerr := snapshotter.Remove(ctx, td); rerr != nil {
-			//	log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, merr)
-			//}
+			if rerr := snapshotter.Remove(ctx, td); rerr != nil {
+				log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, rerr)
+			}
 		}
 	}()
 
diff --git a/vendor/github.com/containerd/containerd/runtime/task_list.go b/vendor/github.com/containerd/containerd/runtime/task_list.go
index 7c52265..05f34c3 100644
--- a/vendor/github.com/containerd/containerd/runtime/task_list.go
+++ b/vendor/github.com/containerd/containerd/runtime/task_list.go
@@ -49,6 +49,8 @@
 
 // GetAll tasks under a namespace
 func (l *TaskList) GetAll(ctx context.Context) ([]Task, error) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
 	namespace, err := namespaces.NamespaceRequired(ctx)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/containerd/containerd/server/config.go b/vendor/github.com/containerd/containerd/server/config.go
index 26af539..f056c7b 100644
--- a/vendor/github.com/containerd/containerd/server/config.go
+++ b/vendor/github.com/containerd/containerd/server/config.go
@@ -23,8 +23,8 @@
 	Metrics MetricsConfig `toml:"metrics"`
 	// Plugins provides plugin specific configuration for the initialization of a plugin
 	Plugins map[string]toml.Primitive `toml:"plugins"`
-	// Enable containerd as a subreaper
-	Subreaper bool `toml:"subreaper"`
+	// NoSubreaper disables containerd as a subreaper
+	NoSubreaper bool `toml:"no_subreaper"`
 	// OOMScore adjust the containerd's oom score
 	OOMScore int `toml:"oom_score"`
 	// Cgroup specifies cgroup information for the containerd daemon process
diff --git a/vendor/github.com/containerd/containerd/server/server.go b/vendor/github.com/containerd/containerd/server/server.go
index f9ca044..6af6df0 100644
--- a/vendor/github.com/containerd/containerd/server/server.go
+++ b/vendor/github.com/containerd/containerd/server/server.go
@@ -18,7 +18,7 @@
 	introspection "github.com/containerd/containerd/api/services/introspection/v1"
 	leasesapi "github.com/containerd/containerd/api/services/leases/v1"
 	namespaces "github.com/containerd/containerd/api/services/namespaces/v1"
-	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
+	snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
 	tasks "github.com/containerd/containerd/api/services/tasks/v1"
 	version "github.com/containerd/containerd/api/services/version/v1"
 	"github.com/containerd/containerd/content"
@@ -27,7 +27,7 @@
 	"github.com/containerd/containerd/log"
 	"github.com/containerd/containerd/metadata"
 	"github.com/containerd/containerd/plugin"
-	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/containerd/snapshots"
 	metrics "github.com/docker/go-metrics"
 	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
 	"github.com/pkg/errors"
@@ -39,12 +39,15 @@
 
 // New creates and initializes a new containerd server
 func New(ctx context.Context, config *Config) (*Server, error) {
-	if config.Root == "" {
+	switch {
+	case config.Root == "":
 		return nil, errors.New("root must be specified")
-	}
-	if config.State == "" {
+	case config.State == "":
 		return nil, errors.New("state must be specified")
+	case config.Root == config.State:
+		return nil, errors.New("root and state must be different paths")
 	}
+
 	if err := os.MkdirAll(config.Root, 0711); err != nil {
 		return nil, err
 	}
@@ -196,7 +199,7 @@
 				return nil, err
 			}
 
-			snapshotters := make(map[string]snapshot.Snapshotter)
+			snapshotters := make(map[string]snapshots.Snapshotter)
 			for name, sn := range snapshottersRaw {
 				sn, err := sn.Instance()
 				if err != nil {
@@ -204,7 +207,7 @@
 						Warnf("could not use snapshotter %v in metadata plugin", name)
 					continue
 				}
-				snapshotters[name] = sn.(snapshot.Snapshotter)
+				snapshotters[name] = sn.(snapshots.Snapshotter)
 			}
 
 			path := filepath.Join(ic.Root, "meta.db")
@@ -246,7 +249,7 @@
 		// No need to change the context
 	case version.VersionServer:
 		ctx = log.WithModule(ctx, "version")
-	case snapshotapi.SnapshotsServer:
+	case snapshotsapi.SnapshotsServer:
 		ctx = log.WithModule(ctx, "snapshot")
 	case diff.DiffServer:
 		ctx = log.WithModule(ctx, "diff")
diff --git a/vendor/github.com/containerd/containerd/server/server_linux.go b/vendor/github.com/containerd/containerd/server/server_linux.go
index 03244e9..f6f679c 100644
--- a/vendor/github.com/containerd/containerd/server/server_linux.go
+++ b/vendor/github.com/containerd/containerd/server/server_linux.go
@@ -12,7 +12,7 @@
 
 // apply sets config settings on the server process
 func apply(ctx context.Context, config *Config) error {
-	if config.Subreaper {
+	if !config.NoSubreaper {
 		log.G(ctx).Info("setting subreaper...")
 		if err := sys.SetSubreaper(1); err != nil {
 			return err
diff --git a/vendor/github.com/containerd/containerd/server/server_solaris.go b/vendor/github.com/containerd/containerd/server/server_solaris.go
index 71e1c09..3c39816 100644
--- a/vendor/github.com/containerd/containerd/server/server_solaris.go
+++ b/vendor/github.com/containerd/containerd/server/server_solaris.go
@@ -2,13 +2,6 @@
 
 import "context"
 
-const (
-	// DefaultAddress is the default unix socket address
-	DefaultAddress = "/var/run/containerd/containerd.sock"
-	// DefaultDebugAddress is the default unix socket address for pprof data
-	DefaultDebugAddress = "/var/run/containerd/debug.sock"
-)
-
 func apply(_ context.Context, _ *Config) error {
 	return nil
 }
diff --git a/vendor/github.com/containerd/containerd/server/server_unsupported.go b/vendor/github.com/containerd/containerd/server/server_unsupported.go
index f820e3f..4df599e 100644
--- a/vendor/github.com/containerd/containerd/server/server_unsupported.go
+++ b/vendor/github.com/containerd/containerd/server/server_unsupported.go
@@ -4,19 +4,6 @@
 
 import "context"
 
-const (
-	// DefaultRootDir is the default location used by containerd to store
-	// persistent data
-	DefaultRootDir = "/var/lib/containerd"
-	// DefaultStateDir is the default location used by containerd to store
-	// transient data
-	DefaultStateDir = "/run/containerd"
-	// DefaultAddress is the default unix socket address
-	DefaultAddress = "/run/containerd/containerd.sock"
-	// DefaultDebugAddress is the default unix socket address for pprof data
-	DefaultDebugAddress = "/run/containerd/debug.sock"
-)
-
 func apply(_ context.Context, _ *Config) error {
 	return nil
 }
diff --git a/vendor/github.com/containerd/containerd/server/server_windows.go b/vendor/github.com/containerd/containerd/server/server_windows.go
index b35e776..37b71df 100644
--- a/vendor/github.com/containerd/containerd/server/server_windows.go
+++ b/vendor/github.com/containerd/containerd/server/server_windows.go
@@ -4,24 +4,6 @@
 
 import (
 	"context"
-	"os"
-	"path/filepath"
-)
-
-var (
-	// DefaultRootDir is the default location used by containerd to store
-	// persistent data
-	DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
-	// DefaultStateDir is the default location used by containerd to store
-	// transient data
-	DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
-)
-
-const (
-	// DefaultAddress is the default winpipe address
-	DefaultAddress = `\\.\pipe\containerd-containerd`
-	// DefaultDebugAddress is the default winpipe address for pprof data
-	DefaultDebugAddress = `\\.\pipe\containerd-debug`
 )
 
 func apply(_ context.Context, _ *Config) error {
diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go
deleted file mode 100644
index 3784579..0000000
--- a/vendor/github.com/containerd/containerd/services/content/service.go
+++ /dev/null
@@ -1,454 +0,0 @@
-package content
-
-import (
-	"io"
-	"sync"
-
-	api "github.com/containerd/containerd/api/services/content/v1"
-	eventsapi "github.com/containerd/containerd/api/services/events/v1"
-	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/events"
-	"github.com/containerd/containerd/log"
-	"github.com/containerd/containerd/metadata"
-	"github.com/containerd/containerd/plugin"
-	"github.com/golang/protobuf/ptypes/empty"
-	digest "github.com/opencontainers/go-digest"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-)
-
-type service struct {
-	store     content.Store
-	publisher events.Publisher
-}
-
-var bufPool = sync.Pool{
-	New: func() interface{} {
-		return make([]byte, 1<<20)
-	},
-}
-
-var _ api.ContentServer = &service{}
-
-func init() {
-	plugin.Register(&plugin.Registration{
-		Type: plugin.GRPCPlugin,
-		ID:   "content",
-		Requires: []plugin.Type{
-			plugin.MetadataPlugin,
-		},
-		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
-			m, err := ic.Get(plugin.MetadataPlugin)
-			if err != nil {
-				return nil, err
-			}
-
-			s, err := NewService(m.(*metadata.DB).ContentStore(), ic.Events)
-			return s, err
-		},
-	})
-}
-
-// NewService returns the content GRPC server
-func NewService(cs content.Store, publisher events.Publisher) (api.ContentServer, error) {
-	return &service{
-		store:     cs,
-		publisher: publisher,
-	}, nil
-}
-
-func (s *service) Register(server *grpc.Server) error {
-	api.RegisterContentServer(server, s)
-	return nil
-}
-
-func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
-	if err := req.Digest.Validate(); err != nil {
-		return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
-	}
-
-	bi, err := s.store.Info(ctx, req.Digest)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &api.InfoResponse{
-		Info: infoToGRPC(bi),
-	}, nil
-}
-
-func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
-	if err := req.Info.Digest.Validate(); err != nil {
-		return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
-	}
-
-	info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &api.UpdateResponse{
-		Info: infoToGRPC(info),
-	}, nil
-}
-
-func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
-	var (
-		buffer    []api.Info
-		sendBlock = func(block []api.Info) error {
-			// send last block
-			return session.Send(&api.ListContentResponse{
-				Info: block,
-			})
-		}
-	)
-
-	if err := s.store.Walk(session.Context(), func(info content.Info) error {
-		buffer = append(buffer, api.Info{
-			Digest:    info.Digest,
-			Size_:     info.Size,
-			CreatedAt: info.CreatedAt,
-			Labels:    info.Labels,
-		})
-
-		if len(buffer) >= 100 {
-			if err := sendBlock(buffer); err != nil {
-				return err
-			}
-
-			buffer = buffer[:0]
-		}
-
-		return nil
-	}, req.Filters...); err != nil {
-		return err
-	}
-
-	if len(buffer) > 0 {
-		// send last block
-		if err := sendBlock(buffer); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) {
-	if err := req.Digest.Validate(); err != nil {
-		return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
-	}
-
-	if err := s.store.Delete(ctx, req.Digest); err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	if err := s.publisher.Publish(ctx, "/content/delete", &eventsapi.ContentDelete{
-		Digest: req.Digest,
-	}); err != nil {
-		return nil, err
-	}
-
-	return &empty.Empty{}, nil
-}
-
-func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
-	if err := req.Digest.Validate(); err != nil {
-		return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
-	}
-
-	oi, err := s.store.Info(session.Context(), req.Digest)
-	if err != nil {
-		return errdefs.ToGRPC(err)
-	}
-
-	ra, err := s.store.ReaderAt(session.Context(), req.Digest)
-	if err != nil {
-		return errdefs.ToGRPC(err)
-	}
-	defer ra.Close()
-
-	var (
-		offset = req.Offset
-		size   = req.Size_
-
-		// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
-		// little inefficient for work over a fast network. We can tune this later.
-		p = bufPool.Get().([]byte)
-	)
-	defer bufPool.Put(p)
-
-	if offset < 0 {
-		offset = 0
-	}
-
-	if size <= 0 {
-		size = oi.Size - offset
-	}
-
-	if offset+size > oi.Size {
-		return grpc.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
-	}
-
-	if _, err := io.CopyBuffer(
-		&readResponseWriter{session: session},
-		io.NewSectionReader(ra, offset, size), p); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// readResponseWriter is a writer that places the output into ReadContentRequest messages.
-//
-// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
-// into the buffer size.
-type readResponseWriter struct {
-	offset  int64
-	session api.Content_ReadServer
-}
-
-func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
-	if err := rw.session.Send(&api.ReadContentResponse{
-		Offset: rw.offset,
-		Data:   p,
-	}); err != nil {
-		return 0, err
-	}
-
-	rw.offset += int64(len(p))
-	return len(p), nil
-}
-
-func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
-	status, err := s.store.Status(ctx, req.Ref)
-	if err != nil {
-		return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
-	}
-
-	var resp api.StatusResponse
-	resp.Status = &api.Status{
-		StartedAt: status.StartedAt,
-		UpdatedAt: status.UpdatedAt,
-		Ref:       status.Ref,
-		Offset:    status.Offset,
-		Total:     status.Total,
-		Expected:  status.Expected,
-	}
-
-	return &resp, nil
-}
-
-func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
-	statuses, err := s.store.ListStatuses(ctx, req.Filters...)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	var resp api.ListStatusesResponse
-	for _, status := range statuses {
-		resp.Statuses = append(resp.Statuses, api.Status{
-			StartedAt: status.StartedAt,
-			UpdatedAt: status.UpdatedAt,
-			Ref:       status.Ref,
-			Offset:    status.Offset,
-			Total:     status.Total,
-			Expected:  status.Expected,
-		})
-	}
-
-	return &resp, nil
-}
-
-func (s *service) Write(session api.Content_WriteServer) (err error) {
-	var (
-		ctx      = session.Context()
-		msg      api.WriteContentResponse
-		req      *api.WriteContentRequest
-		ref      string
-		total    int64
-		expected digest.Digest
-	)
-
-	defer func(msg *api.WriteContentResponse) {
-		// pump through the last message if no error was encountered
-		if err != nil {
-			if grpc.Code(err) != codes.AlreadyExists {
-				// TODO(stevvooe): Really need a log line here to track which
-				// errors are actually causing failure on the server side. May want
-				// to configure the service with an interceptor to make this work
-				// identically across all GRPC methods.
-				//
-				// This is pretty noisy, so we can remove it but leave it for now.
-				log.G(ctx).WithError(err).Error("(*service).Write failed")
-			}
-
-			return
-		}
-
-		err = session.Send(msg)
-	}(&msg)
-
-	// handle the very first request!
-	req, err = session.Recv()
-	if err != nil {
-		return err
-	}
-
-	ref = req.Ref
-
-	if ref == "" {
-		return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
-	}
-
-	fields := logrus.Fields{
-		"ref": ref,
-	}
-	total = req.Total
-	expected = req.Expected
-	if total > 0 {
-		fields["total"] = total
-	}
-
-	if expected != "" {
-		fields["expected"] = expected
-	}
-
-	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
-
-	log.G(ctx).Debug("(*service).Write started")
-	// this action locks the writer for the session.
-	wr, err := s.store.Writer(ctx, ref, total, expected)
-	if err != nil {
-		return errdefs.ToGRPC(err)
-	}
-	defer wr.Close()
-
-	for {
-		msg.Action = req.Action
-		ws, err := wr.Status()
-		if err != nil {
-			return errdefs.ToGRPC(err)
-		}
-
-		msg.Offset = ws.Offset // always set the offset.
-
-		// NOTE(stevvooe): In general, there are two cases underwhich a remote
-		// writer is used.
-		//
-		// For pull, we almost always have this before fetching large content,
-		// through descriptors. We allow predeclaration of the expected size
-		// and digest.
-		//
-		// For push, it is more complex. If we want to cut through content into
-		// storage, we may have no expectation until we are done processing the
-		// content. The case here is the following:
-		//
-		// 	1. Start writing content.
-		// 	2. Compress inline.
-		// 	3. Validate digest and size (maybe).
-		//
-		// Supporting these two paths is quite awkward but it lets both API
-		// users use the same writer style for each with a minimum of overhead.
-		if req.Expected != "" {
-			if expected != "" && expected != req.Expected {
-				return grpc.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected)
-			}
-			expected = req.Expected
-
-			if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
-				if err := s.store.Abort(session.Context(), ref); err != nil {
-					log.G(ctx).WithError(err).Error("failed to abort write")
-				}
-
-				return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
-			}
-		}
-
-		if req.Total > 0 {
-			// Update the expected total. Typically, this could be seen at
-			// negotiation time or on a commit message.
-			if total > 0 && req.Total != total {
-				return grpc.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total)
-			}
-			total = req.Total
-		}
-
-		switch req.Action {
-		case api.WriteActionStat:
-			msg.Digest = wr.Digest()
-			msg.StartedAt = ws.StartedAt
-			msg.UpdatedAt = ws.UpdatedAt
-			msg.Total = total
-		case api.WriteActionWrite, api.WriteActionCommit:
-			if req.Offset > 0 {
-				// validate the offset if provided
-				if req.Offset != ws.Offset {
-					return grpc.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
-				}
-			}
-
-			if req.Offset == 0 && ws.Offset > 0 {
-				if err := wr.Truncate(req.Offset); err != nil {
-					return errors.Wrapf(err, "truncate failed")
-				}
-				msg.Offset = req.Offset
-			}
-
-			// issue the write if we actually have data.
-			if len(req.Data) > 0 {
-				// While this looks like we could use io.WriterAt here, because we
-				// maintain the offset as append only, we just issue the write.
-				n, err := wr.Write(req.Data)
-				if err != nil {
-					return err
-				}
-
-				if n != len(req.Data) {
-					// TODO(stevvooe): Perhaps, we can recover this by including it
-					// in the offset on the write return.
-					return grpc.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
-				}
-
-				msg.Offset += int64(n)
-			}
-
-			if req.Action == api.WriteActionCommit {
-				var opts []content.Opt
-				if req.Labels != nil {
-					opts = append(opts, content.WithLabels(req.Labels))
-				}
-				if err := wr.Commit(ctx, total, expected, opts...); err != nil {
-					return err
-				}
-			}
-
-			msg.Digest = wr.Digest()
-		}
-
-		if err := session.Send(&msg); err != nil {
-			return err
-		}
-
-		req, err = session.Recv()
-		if err != nil {
-			if err == io.EOF {
-				return nil
-			}
-
-			return err
-		}
-	}
-}
-
-func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) {
-	if err := s.store.Abort(ctx, req.Ref); err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &empty.Empty{}, nil
-}
diff --git a/vendor/github.com/containerd/containerd/services/diff/service.go b/vendor/github.com/containerd/containerd/services/diff/service.go
deleted file mode 100644
index 81e44dc..0000000
--- a/vendor/github.com/containerd/containerd/services/diff/service.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package diff
-
-import (
-	diffapi "github.com/containerd/containerd/api/services/diff/v1"
-	"github.com/containerd/containerd/api/types"
-	"github.com/containerd/containerd/diff"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/plugin"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-)
-
-type config struct {
-	// Order is the order of preference in which to try diff algorithms, the
-	// first differ which is supported is used.
-	// Note when multiple differs may be supported, this order will be
-	// respected for which is choosen. Each differ should return the same
-	// correct output, allowing any ordering to be used to prefer
-	// more optimimal implementations.
-	Order []string `toml:"default"`
-}
-
-func init() {
-	plugin.Register(&plugin.Registration{
-		Type: plugin.GRPCPlugin,
-		ID:   "diff",
-		Requires: []plugin.Type{
-			plugin.DiffPlugin,
-		},
-		Config: &config{
-			Order: []string{"walking"},
-		},
-		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
-			differs, err := ic.GetByType(plugin.DiffPlugin)
-			if err != nil {
-				return nil, err
-			}
-
-			orderedNames := ic.Config.(*config).Order
-			ordered := make([]diff.Differ, len(orderedNames))
-			for i, n := range orderedNames {
-				differp, ok := differs[n]
-				if !ok {
-					return nil, errors.Errorf("needed differ not loaded: %s", n)
-				}
-				differ, err := differp.Instance()
-				if err != nil {
-					return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n)
-				}
-
-				ordered[i] = differ.(diff.Differ)
-			}
-
-			return &service{
-				differs: ordered,
-			}, nil
-		},
-	})
-}
-
-type service struct {
-	differs []diff.Differ
-}
-
-func (s *service) Register(gs *grpc.Server) error {
-	diffapi.RegisterDiffServer(gs, s)
-	return nil
-}
-
-func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) {
-	var (
-		ocidesc ocispec.Descriptor
-		err     error
-		desc    = toDescriptor(er.Diff)
-		mounts  = toMounts(er.Mounts)
-	)
-
-	for _, differ := range s.differs {
-		ocidesc, err = differ.Apply(ctx, desc, mounts)
-		if !errdefs.IsNotImplemented(err) {
-			break
-		}
-	}
-
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &diffapi.ApplyResponse{
-		Applied: fromDescriptor(ocidesc),
-	}, nil
-
-}
-
-func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) {
-	var (
-		ocidesc ocispec.Descriptor
-		err     error
-		aMounts = toMounts(dr.Left)
-		bMounts = toMounts(dr.Right)
-	)
-
-	var opts []diff.Opt
-	if dr.MediaType != "" {
-		opts = append(opts, diff.WithMediaType(dr.MediaType))
-	}
-	if dr.Ref != "" {
-		opts = append(opts, diff.WithReference(dr.Ref))
-	}
-	if dr.Labels != nil {
-		opts = append(opts, diff.WithLabels(dr.Labels))
-	}
-
-	for _, differ := range s.differs {
-		ocidesc, err = differ.DiffMounts(ctx, aMounts, bMounts, opts...)
-		if !errdefs.IsNotImplemented(err) {
-			break
-		}
-	}
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &diffapi.DiffResponse{
-		Diff: fromDescriptor(ocidesc),
-	}, nil
-}
-
-func toMounts(apim []*types.Mount) []mount.Mount {
-	mounts := make([]mount.Mount, len(apim))
-	for i, m := range apim {
-		mounts[i] = mount.Mount{
-			Type:    m.Type,
-			Source:  m.Source,
-			Options: m.Options,
-		}
-	}
-	return mounts
-}
diff --git a/vendor/github.com/containerd/containerd/services/images/client.go b/vendor/github.com/containerd/containerd/services/images/client.go
deleted file mode 100644
index f746ddc..0000000
--- a/vendor/github.com/containerd/containerd/services/images/client.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package images
-
-import (
-	"context"
-
-	imagesapi "github.com/containerd/containerd/api/services/images/v1"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/images"
-	ptypes "github.com/gogo/protobuf/types"
-)
-
-type remoteStore struct {
-	client imagesapi.ImagesClient
-}
-
-// NewStoreFromClient returns a new image store client
-func NewStoreFromClient(client imagesapi.ImagesClient) images.Store {
-	return &remoteStore{
-		client: client,
-	}
-}
-
-func (s *remoteStore) Get(ctx context.Context, name string) (images.Image, error) {
-	resp, err := s.client.Get(ctx, &imagesapi.GetImageRequest{
-		Name: name,
-	})
-	if err != nil {
-		return images.Image{}, errdefs.FromGRPC(err)
-	}
-
-	return imageFromProto(resp.Image), nil
-}
-
-func (s *remoteStore) List(ctx context.Context, filters ...string) ([]images.Image, error) {
-	resp, err := s.client.List(ctx, &imagesapi.ListImagesRequest{
-		Filters: filters,
-	})
-	if err != nil {
-		return nil, errdefs.FromGRPC(err)
-	}
-
-	return imagesFromProto(resp.Images), nil
-}
-
-func (s *remoteStore) Create(ctx context.Context, image images.Image) (images.Image, error) {
-	created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{
-		Image: imageToProto(&image),
-	})
-	if err != nil {
-		return images.Image{}, errdefs.FromGRPC(err)
-	}
-
-	return imageFromProto(&created.Image), nil
-}
-
-func (s *remoteStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) {
-	var updateMask *ptypes.FieldMask
-	if len(fieldpaths) > 0 {
-		updateMask = &ptypes.FieldMask{
-			Paths: fieldpaths,
-		}
-	}
-
-	updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{
-		Image:      imageToProto(&image),
-		UpdateMask: updateMask,
-	})
-	if err != nil {
-		return images.Image{}, errdefs.FromGRPC(err)
-	}
-
-	return imageFromProto(&updated.Image), nil
-}
-
-func (s *remoteStore) Delete(ctx context.Context, name string) error {
-	_, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{
-		Name: name,
-	})
-
-	return errdefs.FromGRPC(err)
-}
diff --git a/vendor/github.com/containerd/containerd/services/images/helpers.go b/vendor/github.com/containerd/containerd/services/images/helpers.go
deleted file mode 100644
index 374aefd..0000000
--- a/vendor/github.com/containerd/containerd/services/images/helpers.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package images
-
-import (
-	imagesapi "github.com/containerd/containerd/api/services/images/v1"
-	"github.com/containerd/containerd/api/types"
-	"github.com/containerd/containerd/images"
-	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-func imagesToProto(images []images.Image) []imagesapi.Image {
-	var imagespb []imagesapi.Image
-
-	for _, image := range images {
-		imagespb = append(imagespb, imageToProto(&image))
-	}
-
-	return imagespb
-}
-
-func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
-	var images []images.Image
-
-	for _, image := range imagespb {
-		images = append(images, imageFromProto(&image))
-	}
-
-	return images
-}
-
-func imageToProto(image *images.Image) imagesapi.Image {
-	return imagesapi.Image{
-		Name:      image.Name,
-		Labels:    image.Labels,
-		Target:    descToProto(&image.Target),
-		CreatedAt: image.CreatedAt,
-		UpdatedAt: image.UpdatedAt,
-	}
-}
-
-func imageFromProto(imagepb *imagesapi.Image) images.Image {
-	return images.Image{
-		Name:      imagepb.Name,
-		Labels:    imagepb.Labels,
-		Target:    descFromProto(&imagepb.Target),
-		CreatedAt: imagepb.CreatedAt,
-		UpdatedAt: imagepb.UpdatedAt,
-	}
-}
-
-func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
-	return ocispec.Descriptor{
-		MediaType: desc.MediaType,
-		Size:      desc.Size_,
-		Digest:    desc.Digest,
-	}
-}
-
-func descToProto(desc *ocispec.Descriptor) types.Descriptor {
-	return types.Descriptor{
-		MediaType: desc.MediaType,
-		Size_:     desc.Size,
-		Digest:    desc.Digest,
-	}
-}
diff --git a/vendor/github.com/containerd/containerd/services/images/service.go b/vendor/github.com/containerd/containerd/services/images/service.go
deleted file mode 100644
index 3843df5..0000000
--- a/vendor/github.com/containerd/containerd/services/images/service.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package images
-
-import (
-	"github.com/boltdb/bolt"
-	eventsapi "github.com/containerd/containerd/api/services/events/v1"
-	imagesapi "github.com/containerd/containerd/api/services/images/v1"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/events"
-	"github.com/containerd/containerd/images"
-	"github.com/containerd/containerd/metadata"
-	"github.com/containerd/containerd/plugin"
-	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-func init() {
-	plugin.Register(&plugin.Registration{
-		Type: plugin.GRPCPlugin,
-		ID:   "images",
-		Requires: []plugin.Type{
-			plugin.MetadataPlugin,
-		},
-		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
-			m, err := ic.Get(plugin.MetadataPlugin)
-			if err != nil {
-				return nil, err
-			}
-			return NewService(m.(*metadata.DB), ic.Events), nil
-		},
-	})
-}
-
-type service struct {
-	db        *metadata.DB
-	publisher events.Publisher
-}
-
-// NewService returns the GRPC image server
-func NewService(db *metadata.DB, publisher events.Publisher) imagesapi.ImagesServer {
-	return &service{
-		db:        db,
-		publisher: publisher,
-	}
-}
-
-func (s *service) Register(server *grpc.Server) error {
-	imagesapi.RegisterImagesServer(server, s)
-	return nil
-}
-
-func (s *service) Get(ctx context.Context, req *imagesapi.GetImageRequest) (*imagesapi.GetImageResponse, error) {
-	var resp imagesapi.GetImageResponse
-
-	return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store images.Store) error {
-		image, err := store.Get(ctx, req.Name)
-		if err != nil {
-			return err
-		}
-		imagepb := imageToProto(&image)
-		resp.Image = &imagepb
-		return nil
-	}))
-}
-
-func (s *service) List(ctx context.Context, req *imagesapi.ListImagesRequest) (*imagesapi.ListImagesResponse, error) {
-	var resp imagesapi.ListImagesResponse
-
-	return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store images.Store) error {
-		images, err := store.List(ctx, req.Filters...)
-		if err != nil {
-			return err
-		}
-
-		resp.Images = imagesToProto(images)
-		return nil
-	}))
-}
-
-func (s *service) Create(ctx context.Context, req *imagesapi.CreateImageRequest) (*imagesapi.CreateImageResponse, error) {
-	if req.Image.Name == "" {
-		return nil, status.Errorf(codes.InvalidArgument, "Image.Name required")
-	}
-
-	var (
-		image = imageFromProto(&req.Image)
-		resp  imagesapi.CreateImageResponse
-	)
-	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
-		created, err := store.Create(ctx, image)
-		if err != nil {
-			return err
-		}
-
-		resp.Image = imageToProto(&created)
-		return nil
-	}); err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	if err := s.publisher.Publish(ctx, "/images/create", &eventsapi.ImageCreate{
-		Name:   resp.Image.Name,
-		Labels: resp.Image.Labels,
-	}); err != nil {
-		return nil, err
-	}
-
-	return &resp, nil
-
-}
-
-func (s *service) Update(ctx context.Context, req *imagesapi.UpdateImageRequest) (*imagesapi.UpdateImageResponse, error) {
-	if req.Image.Name == "" {
-		return nil, status.Errorf(codes.InvalidArgument, "Image.Name required")
-	}
-
-	var (
-		image = imageFromProto(&req.Image)
-		resp  imagesapi.UpdateImageResponse
-	)
-	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
-		var fieldpaths []string
-		if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 {
-			for _, path := range req.UpdateMask.Paths {
-				fieldpaths = append(fieldpaths, path)
-			}
-		}
-
-		updated, err := store.Update(ctx, image, fieldpaths...)
-		if err != nil {
-			return err
-		}
-
-		resp.Image = imageToProto(&updated)
-		return nil
-	}); err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	if err := s.publisher.Publish(ctx, "/images/update", &eventsapi.ImageUpdate{
-		Name:   resp.Image.Name,
-		Labels: resp.Image.Labels,
-	}); err != nil {
-		return nil, err
-	}
-
-	return &resp, nil
-}
-
-func (s *service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest) (*empty.Empty, error) {
-	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
-		return errdefs.ToGRPC(store.Delete(ctx, req.Name))
-	}); err != nil {
-		return nil, err
-	}
-
-	if err := s.publisher.Publish(ctx, "/images/delete", &eventsapi.ImageDelete{
-		Name: req.Name,
-	}); err != nil {
-		return nil, err
-	}
-
-	if err := s.db.GarbageCollect(ctx); err != nil {
-		return nil, errdefs.ToGRPC(errors.Wrap(err, "garbage collection failed"))
-	}
-
-	return &empty.Empty{}, nil
-}
-
-func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store images.Store) error) func(tx *bolt.Tx) error {
-	return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewImageStore(tx)) }
-}
-
-func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {
-	return s.db.View(s.withStore(ctx, fn))
-}
-
-func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {
-	return s.db.Update(s.withStore(ctx, fn))
-}
diff --git a/vendor/github.com/containerd/containerd/services/namespaces/service.go b/vendor/github.com/containerd/containerd/services/namespaces/service.go
deleted file mode 100644
index b795ab5..0000000
--- a/vendor/github.com/containerd/containerd/services/namespaces/service.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package namespaces
-
-import (
-	"strings"
-
-	"github.com/boltdb/bolt"
-	eventsapi "github.com/containerd/containerd/api/services/events/v1"
-	api "github.com/containerd/containerd/api/services/namespaces/v1"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/events"
-	"github.com/containerd/containerd/metadata"
-	"github.com/containerd/containerd/namespaces"
-	"github.com/containerd/containerd/plugin"
-	"github.com/golang/protobuf/ptypes/empty"
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-)
-
-func init() {
-	plugin.Register(&plugin.Registration{
-		Type: plugin.GRPCPlugin,
-		ID:   "namespaces",
-		Requires: []plugin.Type{
-			plugin.MetadataPlugin,
-		},
-		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
-			m, err := ic.Get(plugin.MetadataPlugin)
-			if err != nil {
-				return nil, err
-			}
-			return NewService(m.(*metadata.DB), ic.Events), nil
-		},
-	})
-}
-
-type service struct {
-	db        *metadata.DB
-	publisher events.Publisher
-}
-
-var _ api.NamespacesServer = &service{}
-
-// NewService returns the GRPC namespaces server
-func NewService(db *metadata.DB, publisher events.Publisher) api.NamespacesServer {
-	return &service{
-		db:        db,
-		publisher: publisher,
-	}
-}
-
-func (s *service) Register(server *grpc.Server) error {
-	api.RegisterNamespacesServer(server, s)
-	return nil
-}
-
-func (s *service) Get(ctx context.Context, req *api.GetNamespaceRequest) (*api.GetNamespaceResponse, error) {
-	var resp api.GetNamespaceResponse
-
-	return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error {
-		labels, err := store.Labels(ctx, req.Name)
-		if err != nil {
-			return errdefs.ToGRPC(err)
-		}
-
-		resp.Namespace = api.Namespace{
-			Name:   req.Name,
-			Labels: labels,
-		}
-
-		return nil
-	})
-}
-
-func (s *service) List(ctx context.Context, req *api.ListNamespacesRequest) (*api.ListNamespacesResponse, error) {
-	var resp api.ListNamespacesResponse
-
-	return &resp, s.withStoreView(ctx, func(ctx context.Context, store namespaces.Store) error {
-		namespaces, err := store.List(ctx)
-		if err != nil {
-			return err
-		}
-
-		for _, namespace := range namespaces {
-			labels, err := store.Labels(ctx, namespace)
-			if err != nil {
-				// In general, this should be unlikely, since we are holding a
-				// transaction to service this request.
-				return errdefs.ToGRPC(err)
-			}
-
-			resp.Namespaces = append(resp.Namespaces, api.Namespace{
-				Name:   namespace,
-				Labels: labels,
-			})
-		}
-
-		return nil
-	})
-}
-
-func (s *service) Create(ctx context.Context, req *api.CreateNamespaceRequest) (*api.CreateNamespaceResponse, error) {
-	var resp api.CreateNamespaceResponse
-
-	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error {
-		if err := store.Create(ctx, req.Namespace.Name, req.Namespace.Labels); err != nil {
-			return errdefs.ToGRPC(err)
-		}
-
-		for k, v := range req.Namespace.Labels {
-			if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil {
-				return err
-			}
-		}
-
-		resp.Namespace = req.Namespace
-		return nil
-	}); err != nil {
-		return &resp, err
-	}
-
-	if err := s.publisher.Publish(ctx, "/namespaces/create", &eventsapi.NamespaceCreate{
-		Name:   req.Namespace.Name,
-		Labels: req.Namespace.Labels,
-	}); err != nil {
-		return &resp, err
-	}
-
-	return &resp, nil
-
-}
-
-func (s *service) Update(ctx context.Context, req *api.UpdateNamespaceRequest) (*api.UpdateNamespaceResponse, error) {
-	var resp api.UpdateNamespaceResponse
-	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error {
-		if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 {
-			for _, path := range req.UpdateMask.Paths {
-				switch {
-				case strings.HasPrefix(path, "labels."):
-					key := strings.TrimPrefix(path, "labels.")
-					if err := store.SetLabel(ctx, req.Namespace.Name, key, req.Namespace.Labels[key]); err != nil {
-						return err
-					}
-				default:
-					return grpc.Errorf(codes.InvalidArgument, "cannot update %q field", path)
-				}
-			}
-		} else {
-			// clear out the existing labels and then set them to the incoming request.
-			// get current set of labels
-			labels, err := store.Labels(ctx, req.Namespace.Name)
-			if err != nil {
-				return errdefs.ToGRPC(err)
-			}
-
-			for k := range labels {
-				if err := store.SetLabel(ctx, req.Namespace.Name, k, ""); err != nil {
-					return err
-				}
-			}
-
-			for k, v := range req.Namespace.Labels {
-				if err := store.SetLabel(ctx, req.Namespace.Name, k, v); err != nil {
-					return err
-				}
-
-			}
-		}
-
-		return nil
-	}); err != nil {
-		return &resp, err
-	}
-
-	if err := s.publisher.Publish(ctx, "/namespaces/update", &eventsapi.NamespaceUpdate{
-		Name:   req.Namespace.Name,
-		Labels: req.Namespace.Labels,
-	}); err != nil {
-		return &resp, err
-	}
-
-	return &resp, nil
-}
-
-func (s *service) Delete(ctx context.Context, req *api.DeleteNamespaceRequest) (*empty.Empty, error) {
-	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store namespaces.Store) error {
-		return errdefs.ToGRPC(store.Delete(ctx, req.Name))
-	}); err != nil {
-		return &empty.Empty{}, err
-	}
-	// set the namespace in the context before publishing the event
-	ctx = namespaces.WithNamespace(ctx, req.Name)
-	if err := s.publisher.Publish(ctx, "/namespaces/delete", &eventsapi.NamespaceDelete{
-		Name: req.Name,
-	}); err != nil {
-		return &empty.Empty{}, err
-	}
-
-	return &empty.Empty{}, nil
-}
-
-func (s *service) withStore(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) func(tx *bolt.Tx) error {
-	return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewNamespaceStore(tx)) }
-}
-
-func (s *service) withStoreView(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error {
-	return s.db.View(s.withStore(ctx, fn))
-}
-
-func (s *service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store namespaces.Store) error) error {
-	return s.db.Update(s.withStore(ctx, fn))
-}
diff --git a/vendor/github.com/containerd/containerd/services/snapshot/client.go b/vendor/github.com/containerd/containerd/services/snapshot/client.go
deleted file mode 100644
index a9b9ffe..0000000
--- a/vendor/github.com/containerd/containerd/services/snapshot/client.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package snapshot
-
-import (
-	"context"
-	"io"
-
-	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
-	"github.com/containerd/containerd/api/types"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/snapshot"
-	protobuftypes "github.com/gogo/protobuf/types"
-)
-
-// NewSnapshotterFromClient returns a new Snapshotter which communicates
-// over a GRPC connection.
-func NewSnapshotterFromClient(client snapshotapi.SnapshotsClient, snapshotterName string) snapshot.Snapshotter {
-	return &remoteSnapshotter{
-		client:          client,
-		snapshotterName: snapshotterName,
-	}
-}
-
-type remoteSnapshotter struct {
-	client          snapshotapi.SnapshotsClient
-	snapshotterName string
-}
-
-func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
-	resp, err := r.client.Stat(ctx,
-		&snapshotapi.StatSnapshotRequest{
-			Snapshotter: r.snapshotterName,
-			Key:         key,
-		})
-	if err != nil {
-		return snapshot.Info{}, errdefs.FromGRPC(err)
-	}
-	return toInfo(resp.Info), nil
-}
-
-func (r *remoteSnapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) {
-	resp, err := r.client.Update(ctx,
-		&snapshotapi.UpdateSnapshotRequest{
-			Snapshotter: r.snapshotterName,
-			Info:        fromInfo(info),
-			UpdateMask: &protobuftypes.FieldMask{
-				Paths: fieldpaths,
-			},
-		})
-	if err != nil {
-		return snapshot.Info{}, errdefs.FromGRPC(err)
-	}
-	return toInfo(resp.Info), nil
-}
-
-func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
-	resp, err := r.client.Usage(ctx, &snapshotapi.UsageRequest{
-		Snapshotter: r.snapshotterName,
-		Key:         key,
-	})
-	if err != nil {
-		return snapshot.Usage{}, errdefs.FromGRPC(err)
-	}
-	return toUsage(resp), nil
-}
-
-func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
-	resp, err := r.client.Mounts(ctx, &snapshotapi.MountsRequest{
-		Snapshotter: r.snapshotterName,
-		Key:         key,
-	})
-	if err != nil {
-		return nil, errdefs.FromGRPC(err)
-	}
-	return toMounts(resp.Mounts), nil
-}
-
-func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
-	var local snapshot.Info
-	for _, opt := range opts {
-		if err := opt(&local); err != nil {
-			return nil, err
-		}
-	}
-	resp, err := r.client.Prepare(ctx, &snapshotapi.PrepareSnapshotRequest{
-		Snapshotter: r.snapshotterName,
-		Key:         key,
-		Parent:      parent,
-		Labels:      local.Labels,
-	})
-	if err != nil {
-		return nil, errdefs.FromGRPC(err)
-	}
-	return toMounts(resp.Mounts), nil
-}
-
-func (r *remoteSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
-	var local snapshot.Info
-	for _, opt := range opts {
-		if err := opt(&local); err != nil {
-			return nil, err
-		}
-	}
-	resp, err := r.client.View(ctx, &snapshotapi.ViewSnapshotRequest{
-		Snapshotter: r.snapshotterName,
-		Key:         key,
-		Parent:      parent,
-		Labels:      local.Labels,
-	})
-	if err != nil {
-		return nil, errdefs.FromGRPC(err)
-	}
-	return toMounts(resp.Mounts), nil
-}
-
-func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error {
-	var local snapshot.Info
-	for _, opt := range opts {
-		if err := opt(&local); err != nil {
-			return err
-		}
-	}
-	_, err := r.client.Commit(ctx, &snapshotapi.CommitSnapshotRequest{
-		Snapshotter: r.snapshotterName,
-		Name:        name,
-		Key:         key,
-		Labels:      local.Labels,
-	})
-	return errdefs.FromGRPC(err)
-}
-
-func (r *remoteSnapshotter) Remove(ctx context.Context, key string) error {
-	_, err := r.client.Remove(ctx, &snapshotapi.RemoveSnapshotRequest{
-		Snapshotter: r.snapshotterName,
-		Key:         key,
-	})
-	return errdefs.FromGRPC(err)
-}
-
-func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
-	sc, err := r.client.List(ctx, &snapshotapi.ListSnapshotsRequest{
-		Snapshotter: r.snapshotterName,
-	})
-	if err != nil {
-		return errdefs.FromGRPC(err)
-	}
-	for {
-		resp, err := sc.Recv()
-		if err != nil {
-			if err == io.EOF {
-				return nil
-			}
-			return errdefs.FromGRPC(err)
-		}
-		if resp == nil {
-			return nil
-		}
-		for _, info := range resp.Info {
-			if err := fn(ctx, toInfo(info)); err != nil {
-				return err
-			}
-		}
-	}
-}
-
-func toKind(kind snapshotapi.Kind) snapshot.Kind {
-	if kind == snapshotapi.KindActive {
-		return snapshot.KindActive
-	}
-	if kind == snapshotapi.KindView {
-		return snapshot.KindView
-	}
-	return snapshot.KindCommitted
-}
-
-func toInfo(info snapshotapi.Info) snapshot.Info {
-	return snapshot.Info{
-		Name:    info.Name,
-		Parent:  info.Parent,
-		Kind:    toKind(info.Kind),
-		Created: info.CreatedAt,
-		Updated: info.UpdatedAt,
-		Labels:  info.Labels,
-	}
-}
-
-func toUsage(resp *snapshotapi.UsageResponse) snapshot.Usage {
-	return snapshot.Usage{
-		Inodes: resp.Inodes,
-		Size:   resp.Size_,
-	}
-}
-
-func toMounts(mm []*types.Mount) []mount.Mount {
-	mounts := make([]mount.Mount, len(mm))
-	for i, m := range mm {
-		mounts[i] = mount.Mount{
-			Type:    m.Type,
-			Source:  m.Source,
-			Options: m.Options,
-		}
-	}
-	return mounts
-}
diff --git a/vendor/github.com/containerd/containerd/services/snapshot/service.go b/vendor/github.com/containerd/containerd/services/snapshot/service.go
deleted file mode 100644
index 716b4c4..0000000
--- a/vendor/github.com/containerd/containerd/services/snapshot/service.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package snapshot
-
-import (
-	gocontext "context"
-
-	eventsapi "github.com/containerd/containerd/api/services/events/v1"
-	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
-	"github.com/containerd/containerd/api/types"
-	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/events"
-	"github.com/containerd/containerd/log"
-	"github.com/containerd/containerd/metadata"
-	"github.com/containerd/containerd/mount"
-	"github.com/containerd/containerd/plugin"
-	"github.com/containerd/containerd/snapshot"
-	protoempty "github.com/golang/protobuf/ptypes/empty"
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-)
-
-func init() {
-	plugin.Register(&plugin.Registration{
-		Type: plugin.GRPCPlugin,
-		ID:   "snapshots",
-		Requires: []plugin.Type{
-			plugin.MetadataPlugin,
-		},
-		InitFn: newService,
-	})
-}
-
-var empty = &protoempty.Empty{}
-
-type service struct {
-	db        *metadata.DB
-	publisher events.Publisher
-}
-
-func newService(ic *plugin.InitContext) (interface{}, error) {
-	md, err := ic.Get(plugin.MetadataPlugin)
-	if err != nil {
-		return nil, err
-	}
-
-	return &service{
-		db:        md.(*metadata.DB),
-		publisher: ic.Events,
-	}, nil
-}
-
-func (s *service) getSnapshotter(name string) (snapshot.Snapshotter, error) {
-	if name == "" {
-		return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing")
-	}
-
-	sn := s.db.Snapshotter(name)
-	if sn == nil {
-		return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name)
-	}
-	return sn, nil
-}
-
-func (s *service) Register(gs *grpc.Server) error {
-	snapshotapi.RegisterSnapshotsServer(gs, s)
-	return nil
-}
-
-func (s *service) Prepare(ctx context.Context, pr *snapshotapi.PrepareSnapshotRequest) (*snapshotapi.PrepareSnapshotResponse, error) {
-	log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing snapshot")
-	sn, err := s.getSnapshotter(pr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	var opts []snapshot.Opt
-	if pr.Labels != nil {
-		opts = append(opts, snapshot.WithLabels(pr.Labels))
-	}
-	mounts, err := sn.Prepare(ctx, pr.Key, pr.Parent, opts...)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventsapi.SnapshotPrepare{
-		Key:    pr.Key,
-		Parent: pr.Parent,
-	}); err != nil {
-		return nil, err
-	}
-	return &snapshotapi.PrepareSnapshotResponse{
-		Mounts: fromMounts(mounts),
-	}, nil
-}
-
-func (s *service) View(ctx context.Context, pr *snapshotapi.ViewSnapshotRequest) (*snapshotapi.ViewSnapshotResponse, error) {
-	log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing view snapshot")
-	sn, err := s.getSnapshotter(pr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-	var opts []snapshot.Opt
-	if pr.Labels != nil {
-		opts = append(opts, snapshot.WithLabels(pr.Labels))
-	}
-	mounts, err := sn.View(ctx, pr.Key, pr.Parent, opts...)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-	return &snapshotapi.ViewSnapshotResponse{
-		Mounts: fromMounts(mounts),
-	}, nil
-}
-
-func (s *service) Mounts(ctx context.Context, mr *snapshotapi.MountsRequest) (*snapshotapi.MountsResponse, error) {
-	log.G(ctx).WithField("key", mr.Key).Debugf("Getting snapshot mounts")
-	sn, err := s.getSnapshotter(mr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	mounts, err := sn.Mounts(ctx, mr.Key)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-	return &snapshotapi.MountsResponse{
-		Mounts: fromMounts(mounts),
-	}, nil
-}
-
-func (s *service) Commit(ctx context.Context, cr *snapshotapi.CommitSnapshotRequest) (*protoempty.Empty, error) {
-	log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("Committing snapshot")
-	sn, err := s.getSnapshotter(cr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	var opts []snapshot.Opt
-	if cr.Labels != nil {
-		opts = append(opts, snapshot.WithLabels(cr.Labels))
-	}
-	if err := sn.Commit(ctx, cr.Name, cr.Key, opts...); err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	if err := s.publisher.Publish(ctx, "/snapshot/commit", &eventsapi.SnapshotCommit{
-		Key:  cr.Key,
-		Name: cr.Name,
-	}); err != nil {
-		return nil, err
-	}
-	return empty, nil
-}
-
-func (s *service) Remove(ctx context.Context, rr *snapshotapi.RemoveSnapshotRequest) (*protoempty.Empty, error) {
-	log.G(ctx).WithField("key", rr.Key).Debugf("Removing snapshot")
-	sn, err := s.getSnapshotter(rr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	if err := sn.Remove(ctx, rr.Key); err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	if err := s.publisher.Publish(ctx, "/snapshot/remove", &eventsapi.SnapshotRemove{
-		Key: rr.Key,
-	}); err != nil {
-		return nil, err
-	}
-	return empty, nil
-}
-
-func (s *service) Stat(ctx context.Context, sr *snapshotapi.StatSnapshotRequest) (*snapshotapi.StatSnapshotResponse, error) {
-	log.G(ctx).WithField("key", sr.Key).Debugf("Statting snapshot")
-	sn, err := s.getSnapshotter(sr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	info, err := sn.Stat(ctx, sr.Key)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &snapshotapi.StatSnapshotResponse{Info: fromInfo(info)}, nil
-}
-
-func (s *service) Update(ctx context.Context, sr *snapshotapi.UpdateSnapshotRequest) (*snapshotapi.UpdateSnapshotResponse, error) {
-	log.G(ctx).WithField("key", sr.Info.Name).Debugf("Updating snapshot")
-	sn, err := s.getSnapshotter(sr.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	info, err := sn.Update(ctx, toInfo(sr.Info), sr.UpdateMask.GetPaths()...)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return &snapshotapi.UpdateSnapshotResponse{Info: fromInfo(info)}, nil
-}
-
-func (s *service) List(sr *snapshotapi.ListSnapshotsRequest, ss snapshotapi.Snapshots_ListServer) error {
-	sn, err := s.getSnapshotter(sr.Snapshotter)
-	if err != nil {
-		return err
-	}
-
-	var (
-		buffer    []snapshotapi.Info
-		sendBlock = func(block []snapshotapi.Info) error {
-			return ss.Send(&snapshotapi.ListSnapshotsResponse{
-				Info: block,
-			})
-		}
-	)
-	err = sn.Walk(ss.Context(), func(ctx gocontext.Context, info snapshot.Info) error {
-		buffer = append(buffer, fromInfo(info))
-
-		if len(buffer) >= 100 {
-			if err := sendBlock(buffer); err != nil {
-				return err
-			}
-
-			buffer = buffer[:0]
-		}
-
-		return nil
-	})
-	if err != nil {
-		return err
-	}
-	if len(buffer) > 0 {
-		// Send remaining infos
-		if err := sendBlock(buffer); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (s *service) Usage(ctx context.Context, ur *snapshotapi.UsageRequest) (*snapshotapi.UsageResponse, error) {
-	sn, err := s.getSnapshotter(ur.Snapshotter)
-	if err != nil {
-		return nil, err
-	}
-
-	usage, err := sn.Usage(ctx, ur.Key)
-	if err != nil {
-		return nil, errdefs.ToGRPC(err)
-	}
-
-	return fromUsage(usage), nil
-}
-
-func fromKind(kind snapshot.Kind) snapshotapi.Kind {
-	if kind == snapshot.KindActive {
-		return snapshotapi.KindActive
-	}
-	if kind == snapshot.KindView {
-		return snapshotapi.KindView
-	}
-	return snapshotapi.KindCommitted
-}
-
-func fromInfo(info snapshot.Info) snapshotapi.Info {
-	return snapshotapi.Info{
-		Name:      info.Name,
-		Parent:    info.Parent,
-		Kind:      fromKind(info.Kind),
-		CreatedAt: info.Created,
-		UpdatedAt: info.Updated,
-		Labels:    info.Labels,
-	}
-}
-
-func fromUsage(usage snapshot.Usage) *snapshotapi.UsageResponse {
-	return &snapshotapi.UsageResponse{
-		Inodes: usage.Inodes,
-		Size_:  usage.Size,
-	}
-}
-
-func fromMounts(mounts []mount.Mount) []*types.Mount {
-	out := make([]*types.Mount, len(mounts))
-	for i, m := range mounts {
-		out[i] = &types.Mount{
-			Type:    m.Type,
-			Source:  m.Source,
-			Options: m.Options,
-		}
-	}
-	return out
-}
diff --git a/vendor/github.com/containerd/containerd/snapshot.go b/vendor/github.com/containerd/containerd/snapshot.go
new file mode 100644
index 0000000..85bdba1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/snapshot.go
@@ -0,0 +1,229 @@
+package containerd
+
+import (
+	"context"
+	"io"
+
+	snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/snapshots"
+	protobuftypes "github.com/gogo/protobuf/types"
+)
+
+// NewSnapshotterFromClient returns a new Snapshotter which communicates
+// over a GRPC connection.
+func NewSnapshotterFromClient(client snapshotsapi.SnapshotsClient, snapshotterName string) snapshots.Snapshotter {
+	return &remoteSnapshotter{
+		client:          client,
+		snapshotterName: snapshotterName,
+	}
+}
+
+type remoteSnapshotter struct {
+	client          snapshotsapi.SnapshotsClient
+	snapshotterName string
+}
+
+func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
+	resp, err := r.client.Stat(ctx,
+		&snapshotsapi.StatSnapshotRequest{
+			Snapshotter: r.snapshotterName,
+			Key:         key,
+		})
+	if err != nil {
+		return snapshots.Info{}, errdefs.FromGRPC(err)
+	}
+	return toInfo(resp.Info), nil
+}
+
+func (r *remoteSnapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
+	resp, err := r.client.Update(ctx,
+		&snapshotsapi.UpdateSnapshotRequest{
+			Snapshotter: r.snapshotterName,
+			Info:        fromInfo(info),
+			UpdateMask: &protobuftypes.FieldMask{
+				Paths: fieldpaths,
+			},
+		})
+	if err != nil {
+		return snapshots.Info{}, errdefs.FromGRPC(err)
+	}
+	return toInfo(resp.Info), nil
+}
+
+func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
+	resp, err := r.client.Usage(ctx, &snapshotsapi.UsageRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+	})
+	if err != nil {
+		return snapshots.Usage{}, errdefs.FromGRPC(err)
+	}
+	return toUsage(resp), nil
+}
+
+func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
+	resp, err := r.client.Mounts(ctx, &snapshotsapi.MountsRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return toMounts(resp.Mounts), nil
+}
+
+func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
+	var local snapshots.Info
+	for _, opt := range opts {
+		if err := opt(&local); err != nil {
+			return nil, err
+		}
+	}
+	resp, err := r.client.Prepare(ctx, &snapshotsapi.PrepareSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+		Parent:      parent,
+		Labels:      local.Labels,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return toMounts(resp.Mounts), nil
+}
+
+func (r *remoteSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
+	var local snapshots.Info
+	for _, opt := range opts {
+		if err := opt(&local); err != nil {
+			return nil, err
+		}
+	}
+	resp, err := r.client.View(ctx, &snapshotsapi.ViewSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+		Parent:      parent,
+		Labels:      local.Labels,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return toMounts(resp.Mounts), nil
+}
+
+func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
+	var local snapshots.Info
+	for _, opt := range opts {
+		if err := opt(&local); err != nil {
+			return err
+		}
+	}
+	_, err := r.client.Commit(ctx, &snapshotsapi.CommitSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Name:        name,
+		Key:         key,
+		Labels:      local.Labels,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (r *remoteSnapshotter) Remove(ctx context.Context, key string) error {
+	_, err := r.client.Remove(ctx, &snapshotsapi.RemoveSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshots.Info) error) error {
+	sc, err := r.client.List(ctx, &snapshotsapi.ListSnapshotsRequest{
+		Snapshotter: r.snapshotterName,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	for {
+		resp, err := sc.Recv()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return errdefs.FromGRPC(err)
+		}
+		if resp == nil {
+			return nil
+		}
+		for _, info := range resp.Info {
+			if err := fn(ctx, toInfo(info)); err != nil {
+				return err
+			}
+		}
+	}
+}
+
+func (r *remoteSnapshotter) Close() error {
+	return nil
+}
+
+func toKind(kind snapshotsapi.Kind) snapshots.Kind {
+	if kind == snapshotsapi.KindActive {
+		return snapshots.KindActive
+	}
+	if kind == snapshotsapi.KindView {
+		return snapshots.KindView
+	}
+	return snapshots.KindCommitted
+}
+
+func toInfo(info snapshotsapi.Info) snapshots.Info {
+	return snapshots.Info{
+		Name:    info.Name,
+		Parent:  info.Parent,
+		Kind:    toKind(info.Kind),
+		Created: info.CreatedAt,
+		Updated: info.UpdatedAt,
+		Labels:  info.Labels,
+	}
+}
+
+func toUsage(resp *snapshotsapi.UsageResponse) snapshots.Usage {
+	return snapshots.Usage{
+		Inodes: resp.Inodes,
+		Size:   resp.Size_,
+	}
+}
+
+func toMounts(mm []*types.Mount) []mount.Mount {
+	mounts := make([]mount.Mount, len(mm))
+	for i, m := range mm {
+		mounts[i] = mount.Mount{
+			Type:    m.Type,
+			Source:  m.Source,
+			Options: m.Options,
+		}
+	}
+	return mounts
+}
+
+func fromKind(kind snapshots.Kind) snapshotsapi.Kind {
+	if kind == snapshots.KindActive {
+		return snapshotsapi.KindActive
+	}
+	if kind == snapshots.KindView {
+		return snapshotsapi.KindView
+	}
+	return snapshotsapi.KindCommitted
+}
+
+func fromInfo(info snapshots.Info) snapshotsapi.Info {
+	return snapshotsapi.Info{
+		Name:      info.Name,
+		Parent:    info.Parent,
+		Kind:      fromKind(info.Kind),
+		CreatedAt: info.Created,
+		UpdatedAt: info.Updated,
+		Labels:    info.Labels,
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go
similarity index 97%
rename from vendor/github.com/containerd/containerd/snapshot/snapshotter.go
rename to vendor/github.com/containerd/containerd/snapshots/snapshotter.go
index 2b3fe62..cde4c72 100644
--- a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go
+++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go
@@ -1,4 +1,4 @@
-package snapshot
+package snapshots
 
 import (
 	"context"
@@ -280,9 +280,7 @@
 	// A committed snapshot will be created under name with the parent of the
 	// active snapshot.
 	//
-	// Commit may be called multiple times on the same key. Snapshots created
-	// in this manner will all reference the parent used to start the
-	// transaction.
+	// After commit, the snapshot identified by key is removed.
 	Commit(ctx context.Context, name, key string, opts ...Opt) error
 
 	// Remove the committed or active snapshot by the provided key.
@@ -296,6 +294,14 @@
 	// Walk all snapshots in the snapshotter. For each snapshot in the
 	// snapshotter, the function will be called.
 	Walk(ctx context.Context, fn func(context.Context, Info) error) error
+
+	// Close releases the internal resources.
+	//
+	// Close is expected to be called on the end of the lifecycle of the snapshotter,
+	// but not mandatory.
+	//
+	// Close returns nil when it is already closed.
+	Close() error
 }
 
 // Opt allows setting mutable snapshot properties on creation
diff --git a/vendor/github.com/containerd/containerd/spec_opts.go b/vendor/github.com/containerd/containerd/spec_opts.go
deleted file mode 100644
index 2dbf821..0000000
--- a/vendor/github.com/containerd/containerd/spec_opts.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package containerd
-
-import (
-	"context"
-
-	"github.com/containerd/containerd/containers"
-	"github.com/containerd/typeurl"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// SpecOpts sets spec specific information to a newly generated OCI spec
-type SpecOpts func(context.Context, *Client, *containers.Container, *specs.Spec) error
-
-// WithProcessArgs replaces the args on the generated spec
-func WithProcessArgs(args ...string) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
-		s.Process.Args = args
-		return nil
-	}
-}
-
-// WithProcessCwd replaces the current working directory on the generated spec
-func WithProcessCwd(cwd string) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
-		s.Process.Cwd = cwd
-		return nil
-	}
-}
-
-// WithHostname sets the container's hostname
-func WithHostname(name string) SpecOpts {
-	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
-		s.Hostname = name
-		return nil
-	}
-}
-
-// WithNewSpec generates a new spec for a new container
-func WithNewSpec(opts ...SpecOpts) NewContainerOpts {
-	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		s, err := createDefaultSpec(ctx, c.ID)
-		if err != nil {
-			return err
-		}
-		for _, o := range opts {
-			if err := o(ctx, client, c, s); err != nil {
-				return err
-			}
-		}
-		any, err := typeurl.MarshalAny(s)
-		if err != nil {
-			return err
-		}
-		c.Spec = any
-		return nil
-	}
-}
-
-// WithSpec sets the provided spec on the container
-func WithSpec(s *specs.Spec, opts ...SpecOpts) NewContainerOpts {
-	return func(ctx context.Context, client *Client, c *containers.Container) error {
-		for _, o := range opts {
-			if err := o(ctx, client, c, s); err != nil {
-				return err
-			}
-		}
-		any, err := typeurl.MarshalAny(s)
-		if err != nil {
-			return err
-		}
-		c.Spec = any
-		return nil
-	}
-}
diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go
index 7ae1bf6..8d25683 100644
--- a/vendor/github.com/containerd/containerd/task.go
+++ b/vendor/github.com/containerd/containerd/task.go
@@ -8,12 +8,12 @@
 	"io"
 	goruntime "runtime"
 	"strings"
-	"sync"
 	"syscall"
 	"time"
 
 	"github.com/containerd/containerd/api/services/tasks/v1"
 	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/diff"
 	"github.com/containerd/containerd/errdefs"
@@ -123,7 +123,7 @@
 	// Resume the execution of the task
 	Resume(context.Context) error
 	// Exec creates a new process inside the task
-	Exec(context.Context, string, *specs.Process, IOCreation) (Process, error)
+	Exec(context.Context, string, *specs.Process, cio.Creation) (Process, error)
 	// Pids returns a list of system specific process ids inside the task
 	Pids(context.Context) ([]ProcessInfo, error)
 	// Checkpoint serializes the runtime and memory information of a task into an
@@ -134,7 +134,7 @@
 	// Update modifies executing tasks with updated settings
 	Update(context.Context, ...UpdateTaskOpts) error
 	// LoadProcess loads a previously created exec'd process
-	LoadProcess(context.Context, string, IOAttach) (Process, error)
+	LoadProcess(context.Context, string, cio.Attach) (Process, error)
 	// Metrics returns task metrics for runtime specific metrics
 	//
 	// The metric types are generic to containerd and change depending on the runtime
@@ -148,11 +148,9 @@
 type task struct {
 	client *Client
 
-	io  IO
+	io  cio.IO
 	id  string
 	pid uint32
-
-	mu sync.Mutex
 }
 
 // Pid returns the pid or process id for the task
@@ -175,13 +173,14 @@
 func (t *task) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error {
 	var i KillInfo
 	for _, o := range opts {
-		if err := o(ctx, t, &i); err != nil {
+		if err := o(ctx, &i); err != nil {
 			return err
 		}
 	}
 	_, err := t.client.TaskService().Kill(ctx, &tasks.KillRequest{
 		Signal:      uint32(s),
 		ContainerID: t.id,
+		ExecID:      i.ExecID,
 		All:         i.All,
 	})
 	if err != nil {
@@ -278,7 +277,7 @@
 	return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
 }
 
-func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate IOCreation) (Process, error) {
+func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (_ Process, err error) {
 	if id == "" {
 		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
 	}
@@ -286,6 +285,12 @@
 	if err != nil {
 		return nil, err
 	}
+	defer func() {
+		if err != nil && i != nil {
+			i.Cancel()
+			i.Close()
+		}
+	}()
 	any, err := typeurl.MarshalAny(spec)
 	if err != nil {
 		return nil, err
@@ -343,7 +348,7 @@
 	return errdefs.FromGRPC(err)
 }
 
-func (t *task) IO() IO {
+func (t *task) IO() cio.IO {
 	return t.io
 }
 
@@ -357,7 +362,7 @@
 }
 
 func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Image, error) {
-	ctx, done, err := t.client.withLease(ctx)
+	ctx, done, err := t.client.WithLease(ctx)
 	if err != nil {
 		return nil, err
 	}
@@ -460,7 +465,7 @@
 	return errdefs.FromGRPC(err)
 }
 
-func (t *task) LoadProcess(ctx context.Context, id string, ioAttach IOAttach) (Process, error) {
+func (t *task) LoadProcess(ctx context.Context, id string, ioAttach cio.Attach) (Process, error) {
 	response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
 		ContainerID: t.id,
 		ExecID:      id,
@@ -472,7 +477,7 @@
 		}
 		return nil, err
 	}
-	var i IO
+	var i cio.IO
 	if ioAttach != nil {
 		if i, err = attachExistingIO(response, ioAttach); err != nil {
 			return nil, err
diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go
index 261ccba..a387adb 100644
--- a/vendor/github.com/containerd/containerd/task_opts.go
+++ b/vendor/github.com/containerd/containerd/task_opts.go
@@ -5,7 +5,7 @@
 	"syscall"
 
 	"github.com/containerd/containerd/errdefs"
-	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/containerd/linux/runctypes"
 	"github.com/containerd/containerd/mount"
 )
 
@@ -22,7 +22,7 @@
 
 // WithExit causes the task to exit after a successful checkpoint
 func WithExit(r *CheckpointTaskInfo) error {
-	r.Options = &runcopts.CheckpointOptions{
+	r.Options = &runctypes.CheckpointOptions{
 		Exit: true,
 	}
 	return nil
@@ -65,13 +65,23 @@
 	// All kills all processes inside the task
 	// only valid on tasks, ignored on processes
 	All bool
+	// ExecID is the ID of a process to kill
+	ExecID string
 }
 
 // KillOpts allows options to be set for the killing of a process
-type KillOpts func(context.Context, Process, *KillInfo) error
+type KillOpts func(context.Context, *KillInfo) error
 
 // WithKillAll kills all processes for a task
-func WithKillAll(ctx context.Context, p Process, i *KillInfo) error {
+func WithKillAll(ctx context.Context, i *KillInfo) error {
 	i.All = true
 	return nil
 }
+
+// WithKillExecID specifies the process ID
+func WithKillExecID(execID string) KillOpts {
+	return func(ctx context.Context, i *KillInfo) error {
+		i.ExecID = execID
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/task_opts_linux.go b/vendor/github.com/containerd/containerd/task_opts_linux.go
new file mode 100644
index 0000000..5b91cb5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/task_opts_linux.go
@@ -0,0 +1,15 @@
+package containerd
+
+import (
+	"context"
+
+	"github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// WithResources sets the provided resources for task updates
+func WithResources(resources *specs.LinuxResources) UpdateTaskOpts {
+	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
+		r.Resources = resources
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/task_opts_windows.go b/vendor/github.com/containerd/containerd/task_opts_windows.go
new file mode 100644
index 0000000..d77402c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/task_opts_windows.go
@@ -0,0 +1,15 @@
+package containerd
+
+import (
+	"context"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// WithResources sets the provided resources on the spec for task updates
+func WithResources(resources *specs.WindowsResources) UpdateTaskOpts {
+	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
+		r.Resources = resources
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf
index 6713468..fea47e4 100644
--- a/vendor/github.com/containerd/containerd/vendor.conf
+++ b/vendor/github.com/containerd/containerd/vendor.conf
@@ -1,7 +1,7 @@
 github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
 github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
 github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
-github.com/containerd/cgroups f7dd103d3e4e696aa67152f6b4ddd1779a3455a9
+github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
 github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
 github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
@@ -13,8 +13,8 @@
 github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
 github.com/matttproud/golang_protobuf_extensions v1.0.0
 github.com/docker/go-units v0.3.1
-github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
-github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
+github.com/gogo/protobuf v0.5
+github.com/golang/protobuf 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9
 github.com/opencontainers/runtime-spec v1.0.0
 github.com/opencontainers/runc 74a17296470088de3805e138d3d87c62e613dfc4
 github.com/sirupsen/logrus v1.0.0
@@ -25,7 +25,7 @@
 github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
 github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
-google.golang.org/grpc v1.3.0
+google.golang.org/grpc v1.7.2
 github.com/pkg/errors v0.8.0
 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
 golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
@@ -35,9 +35,10 @@
 github.com/BurntSushi/toml v0.2.0-21-g9906417
 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
 github.com/Microsoft/go-winio v0.4.4
-github.com/Microsoft/hcsshim v0.6.3
+github.com/Microsoft/hcsshim v0.6.7
 github.com/Microsoft/opengcs v0.3.2
 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
-github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
+github.com/dmcgowan/go-tar go1.10
+github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go
index 77c344d..d2f9fcc 100644
--- a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go
+++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
-// DO NOT EDIT!
 
 /*
 	Package hcsshimtypes is a generated protocol buffer package.
@@ -17,7 +16,8 @@
 import proto "github.com/gogo/protobuf/proto"
 import fmt "fmt"
 import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
+
+// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
 import _ "github.com/gogo/protobuf/types"
 import _ "github.com/gogo/protobuf/types"
 
@@ -61,6 +61,7 @@
 	MemoryWorkingSetSharedBytes  uint64    `protobuf:"varint,6,opt,name=memory_working_set_shared_bytes,json=memoryWorkingSetSharedBytes,proto3" json:"memory_working_set_shared_bytes,omitempty"`
 	ProcessID                    uint32    `protobuf:"varint,7,opt,name=process_id,json=processId,proto3" json:"process_id,omitempty"`
 	UserTime_100Ns               uint64    `protobuf:"varint,8,opt,name=user_time_100_ns,json=userTime100Ns,proto3" json:"user_time_100_ns,omitempty"`
+	ExecID                       string    `protobuf:"bytes,9,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
 }
 
 func (m *ProcessDetails) Reset()                    { *m = ProcessDetails{} }
@@ -156,27 +157,15 @@
 		i++
 		i = encodeVarintHcsshim(dAtA, i, uint64(m.UserTime_100Ns))
 	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
 	return i, nil
 }
 
-func encodeFixed64Hcsshim(dAtA []byte, offset int, v uint64) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	dAtA[offset+4] = uint8(v >> 32)
-	dAtA[offset+5] = uint8(v >> 40)
-	dAtA[offset+6] = uint8(v >> 48)
-	dAtA[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Hcsshim(dAtA []byte, offset int, v uint32) int {
-	dAtA[offset] = uint8(v)
-	dAtA[offset+1] = uint8(v >> 8)
-	dAtA[offset+2] = uint8(v >> 16)
-	dAtA[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
 func encodeVarintHcsshim(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
 		dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -221,6 +210,10 @@
 	if m.UserTime_100Ns != 0 {
 		n += 1 + sovHcsshim(uint64(m.UserTime_100Ns))
 	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovHcsshim(uint64(l))
+	}
 	return n
 }
 
@@ -260,6 +253,7 @@
 		`MemoryWorkingSetSharedBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetSharedBytes) + `,`,
 		`ProcessID:` + fmt.Sprintf("%v", this.ProcessID) + `,`,
 		`UserTime_100Ns:` + fmt.Sprintf("%v", this.UserTime_100Ns) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -554,6 +548,35 @@
 					break
 				}
 			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthHcsshim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipHcsshim(dAtA[iNdEx:])
@@ -685,35 +708,37 @@
 }
 
 var fileDescriptorHcsshim = []byte{
-	// 479 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x6f, 0xd3, 0x30,
-	0x14, 0xc7, 0x1b, 0x36, 0xc6, 0x62, 0x54, 0x60, 0x86, 0x43, 0x28, 0x90, 0x54, 0xbb, 0x50, 0x09,
-	0x94, 0x74, 0x70, 0xe4, 0x44, 0x5a, 0x21, 0xed, 0x32, 0xa6, 0x0c, 0x09, 0x09, 0x21, 0x59, 0x6e,
-	0xf2, 0x48, 0xad, 0xd5, 0x71, 0x64, 0xbb, 0x54, 0xbd, 0xf1, 0x11, 0x38, 0xf2, 0x49, 0xf8, 0x0c,
-	0x3d, 0x72, 0xe4, 0x34, 0x58, 0x3e, 0x09, 0x8a, 0xed, 0x96, 0x51, 0x38, 0x71, 0xf3, 0xf3, 0xff,
-	0xf7, 0x7e, 0xaf, 0x7e, 0x0d, 0x1a, 0x95, 0x4c, 0x4f, 0xe7, 0x93, 0x38, 0x17, 0x3c, 0xc9, 0x45,
-	0xa5, 0x29, 0xab, 0x40, 0x16, 0x57, 0x8f, 0x0b, 0x56, 0x15, 0x62, 0xa1, 0x92, 0x69, 0xae, 0xd4,
-	0x94, 0x71, 0xbd, 0xac, 0x61, 0x53, 0xc4, 0xb5, 0x14, 0x5a, 0xe0, 0xde, 0x6f, 0x3c, 0x76, 0x78,
-	0xec, 0x88, 0xde, 0xbd, 0x52, 0x94, 0xc2, 0x60, 0x49, 0x7b, 0xb2, 0x1d, 0xbd, 0xb0, 0x14, 0xa2,
-	0x9c, 0x41, 0x62, 0xaa, 0xc9, 0xfc, 0x43, 0x52, 0xcc, 0x25, 0xd5, 0x4c, 0x54, 0x2e, 0x8f, 0xb6,
-	0x73, 0xcd, 0x38, 0x28, 0x4d, 0x79, 0x6d, 0x81, 0xc3, 0x1c, 0x75, 0x47, 0x12, 0xa8, 0x86, 0xd7,
-	0x75, 0xdb, 0xa6, 0x70, 0x86, 0xb0, 0x06, 0xc9, 0x59, 0x45, 0x35, 0x90, 0xb5, 0x2d, 0xf0, 0xfa,
-	0xde, 0xe0, 0xe6, 0xb3, 0xfb, 0xb1, 0xd5, 0xc5, 0x6b, 0x5d, 0x3c, 0x76, 0x40, 0xba, 0xbf, 0xba,
-	0x88, 0x3a, 0x5f, 0x7e, 0x44, 0x5e, 0x76, 0xb0, 0x69, 0x5f, 0x87, 0x87, 0x5f, 0x77, 0xd0, 0xad,
-	0x53, 0x29, 0x72, 0x50, 0x6a, 0x0c, 0x9a, 0xb2, 0x99, 0xc2, 0x8f, 0x10, 0x62, 0x9c, 0x96, 0x40,
-	0x2a, 0xca, 0xc1, 0xe8, 0xfd, 0xcc, 0x37, 0x37, 0x27, 0x94, 0x03, 0x1e, 0x21, 0x94, 0x9b, 0x9f,
-	0x55, 0x10, 0xaa, 0x83, 0x6b, 0x66, 0x7a, 0xef, 0xaf, 0xe9, 0x6f, 0xd6, 0x8f, 0xb1, 0xe3, 0x3f,
-	0xb7, 0xe3, 0x7d, 0xd7, 0xf7, 0x52, 0xe3, 0x27, 0x08, 0x9f, 0x83, 0xac, 0x60, 0x46, 0xda, 0x57,
-	0x93, 0xa3, 0xe1, 0x90, 0x54, 0x2a, 0xd8, 0xe9, 0x7b, 0x83, 0xdd, 0xec, 0xb6, 0x4d, 0x5a, 0xc3,
-	0xd1, 0x70, 0x78, 0xa2, 0x70, 0x8c, 0xee, 0x72, 0xe0, 0x42, 0x2e, 0x49, 0x2e, 0x38, 0x67, 0x9a,
-	0x4c, 0x96, 0x1a, 0x54, 0xb0, 0x6b, 0xe8, 0x03, 0x1b, 0x8d, 0x4c, 0x92, 0xb6, 0x01, 0x7e, 0x85,
-	0xfa, 0x8e, 0x5f, 0x08, 0x79, 0xce, 0xaa, 0x92, 0x28, 0xd0, 0xa4, 0x96, 0xec, 0x63, 0xbb, 0x38,
-	0xdb, 0x7c, 0xdd, 0x34, 0x3f, 0xb4, 0xdc, 0x5b, 0x8b, 0x9d, 0x81, 0x3e, 0xb5, 0x90, 0xf5, 0x8c,
-	0x51, 0xf4, 0x0f, 0x8f, 0x9a, 0x52, 0x09, 0x85, 0xd3, 0xec, 0x19, 0xcd, 0x83, 0x6d, 0xcd, 0x99,
-	0x61, 0xac, 0xe5, 0x29, 0x42, 0xb5, 0x5d, 0x30, 0x61, 0x45, 0x70, 0xa3, 0xef, 0x0d, 0xba, 0x69,
-	0xb7, 0xb9, 0x88, 0x7c, 0xb7, 0xf6, 0xe3, 0x71, 0xe6, 0x3b, 0xe0, 0xb8, 0xc0, 0x8f, 0xd1, 0x9d,
-	0xb9, 0x02, 0xf9, 0xc7, 0x5a, 0xf6, 0xcd, 0x90, 0x6e, 0x7b, 0xbf, 0x59, 0x4a, 0xfa, 0x7e, 0x75,
-	0x19, 0x76, 0xbe, 0x5f, 0x86, 0x9d, 0x4f, 0x4d, 0xe8, 0xad, 0x9a, 0xd0, 0xfb, 0xd6, 0x84, 0xde,
-	0xcf, 0x26, 0xf4, 0xde, 0xa5, 0xff, 0xf5, 0xbd, 0xbf, 0xb8, 0x5a, 0x4c, 0xf6, 0xcc, 0x1f, 0xf9,
-	0xfc, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x22, 0xad, 0xdf, 0xf8, 0x3c, 0x03, 0x00, 0x00,
+	// 507 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0x41, 0x6f, 0xd3, 0x3c,
+	0x18, 0xc7, 0x9b, 0x77, 0x7b, 0xbb, 0xc5, 0xa8, 0xc0, 0x0c, 0x87, 0x52, 0x20, 0xa9, 0xc6, 0x81,
+	0x4a, 0xa0, 0xb4, 0x83, 0x23, 0x27, 0xd2, 0x82, 0xd4, 0xcb, 0x98, 0x32, 0x24, 0x10, 0x42, 0xb2,
+	0xdc, 0xe4, 0x21, 0xb5, 0x56, 0xc7, 0x91, 0xed, 0xd2, 0xf5, 0xc6, 0x47, 0xe0, 0xc8, 0x47, 0xea,
+	0x91, 0x23, 0x12, 0x52, 0x61, 0xf9, 0x24, 0xc8, 0x76, 0xba, 0x8d, 0xc1, 0x89, 0x9b, 0xed, 0xff,
+	0xef, 0xf9, 0x3d, 0xf1, 0x63, 0x05, 0x0d, 0x73, 0xa6, 0xa7, 0xf3, 0x49, 0x94, 0x0a, 0xde, 0x4f,
+	0x45, 0xa1, 0x29, 0x2b, 0x40, 0x66, 0x97, 0x97, 0x0b, 0x56, 0x64, 0x62, 0xa1, 0xfa, 0xd3, 0x54,
+	0xa9, 0x29, 0xe3, 0x7a, 0x59, 0xc2, 0xf9, 0x26, 0x2a, 0xa5, 0xd0, 0x02, 0x77, 0x2e, 0xf0, 0xa8,
+	0xc6, 0xa3, 0x9a, 0xe8, 0xdc, 0xce, 0x45, 0x2e, 0x2c, 0xd6, 0x37, 0x2b, 0x57, 0xd1, 0x09, 0x72,
+	0x21, 0xf2, 0x19, 0xf4, 0xed, 0x6e, 0x32, 0xff, 0xd0, 0xcf, 0xe6, 0x92, 0x6a, 0x26, 0x8a, 0x3a,
+	0x0f, 0xaf, 0xe6, 0x9a, 0x71, 0x50, 0x9a, 0xf2, 0xd2, 0x01, 0xfb, 0x29, 0x6a, 0x0d, 0x25, 0x50,
+	0x0d, 0xaf, 0x4a, 0x53, 0xa6, 0x70, 0x82, 0xb0, 0x06, 0xc9, 0x59, 0x41, 0x35, 0x90, 0x8d, 0xad,
+	0xed, 0x75, 0xbd, 0xde, 0xb5, 0x27, 0x77, 0x22, 0xa7, 0x8b, 0x36, 0xba, 0x68, 0x54, 0x03, 0xf1,
+	0xee, 0x6a, 0x1d, 0x36, 0xbe, 0xfc, 0x08, 0xbd, 0x64, 0xef, 0xbc, 0x7c, 0x13, 0xee, 0x7f, 0xdf,
+	0x42, 0xd7, 0x8f, 0xa4, 0x48, 0x41, 0xa9, 0x11, 0x68, 0xca, 0x66, 0x0a, 0xdf, 0x47, 0x88, 0x71,
+	0x9a, 0x03, 0x29, 0x28, 0x07, 0xab, 0xf7, 0x13, 0xdf, 0x9e, 0x1c, 0x52, 0x0e, 0x78, 0x88, 0x50,
+	0x6a, 0x3f, 0x2b, 0x23, 0x54, 0xb7, 0xff, 0xb3, 0xdd, 0x3b, 0x7f, 0x74, 0x7f, 0xbd, 0xb9, 0x8c,
+	0x6b, 0xff, 0xd9, 0xb4, 0xf7, 0xeb, 0xba, 0xe7, 0x1a, 0x3f, 0x42, 0xf8, 0x04, 0x64, 0x01, 0x33,
+	0x62, 0x6e, 0x4d, 0x0e, 0x06, 0x03, 0x52, 0xa8, 0xf6, 0x56, 0xd7, 0xeb, 0x6d, 0x27, 0x37, 0x5c,
+	0x62, 0x0c, 0x07, 0x83, 0xc1, 0xa1, 0xc2, 0x11, 0xba, 0xc5, 0x81, 0x0b, 0xb9, 0x24, 0xa9, 0xe0,
+	0x9c, 0x69, 0x32, 0x59, 0x6a, 0x50, 0xed, 0x6d, 0x4b, 0xef, 0xb9, 0x68, 0x68, 0x93, 0xd8, 0x04,
+	0xf8, 0x25, 0xea, 0xd6, 0xfc, 0x42, 0xc8, 0x13, 0x56, 0xe4, 0x44, 0x81, 0x26, 0xa5, 0x64, 0x1f,
+	0xcd, 0xe0, 0x5c, 0xf1, 0xff, 0xb6, 0xf8, 0x9e, 0xe3, 0xde, 0x38, 0xec, 0x18, 0xf4, 0x91, 0x83,
+	0x9c, 0x67, 0x84, 0xc2, 0xbf, 0x78, 0xd4, 0x94, 0x4a, 0xc8, 0x6a, 0x4d, 0xd3, 0x6a, 0xee, 0x5e,
+	0xd5, 0x1c, 0x5b, 0xc6, 0x59, 0x1e, 0x23, 0x54, 0xba, 0x01, 0x13, 0x96, 0xb5, 0x77, 0xba, 0x5e,
+	0xaf, 0x15, 0xb7, 0xaa, 0x75, 0xe8, 0xd7, 0x63, 0x1f, 0x8f, 0x12, 0xbf, 0x06, 0xc6, 0x19, 0x7e,
+	0x88, 0x6e, 0xce, 0x15, 0xc8, 0xdf, 0xc6, 0xb2, 0x6b, 0x9b, 0xb4, 0xcc, 0xf9, 0xc5, 0x50, 0x1e,
+	0xa0, 0x1d, 0x38, 0x85, 0xd4, 0x38, 0x7d, 0xf3, 0x44, 0x31, 0xaa, 0xd6, 0x61, 0xf3, 0xc5, 0x29,
+	0xa4, 0xe3, 0x51, 0xd2, 0x34, 0xd1, 0x38, 0x8b, 0xdf, 0xaf, 0xce, 0x82, 0xc6, 0xb7, 0xb3, 0xa0,
+	0xf1, 0xa9, 0x0a, 0xbc, 0x55, 0x15, 0x78, 0x5f, 0xab, 0xc0, 0xfb, 0x59, 0x05, 0xde, 0xbb, 0xf8,
+	0x9f, 0x7e, 0x8a, 0x67, 0x97, 0x37, 0x6f, 0x1b, 0x93, 0xa6, 0x7d, 0xef, 0xa7, 0xbf, 0x02, 0x00,
+	0x00, 0xff, 0xff, 0x1e, 0xd7, 0x2f, 0xa8, 0x63, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
index 7fcc054..5934fca 100644
--- a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
+++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
@@ -2,7 +2,7 @@
 
 package containerd.windows.hcsshim;
 
-import "gogoproto/gogo.proto";
+import weak "gogoproto/gogo.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/timestamp.proto";
 
@@ -23,4 +23,5 @@
 	uint64 memory_working_set_shared_bytes = 6;
 	uint32 process_id = 7;
 	uint64 user_time_100_ns = 8;
+	string exec_id = 9;
 }
diff --git a/vendor/github.com/dmcgowan/go-tar/common.go b/vendor/github.com/dmcgowan/go-tar/common.go
index d2ae66d..e360953 100644
--- a/vendor/github.com/dmcgowan/go-tar/common.go
+++ b/vendor/github.com/dmcgowan/go-tar/common.go
@@ -3,20 +3,23 @@
 // license that can be found in the LICENSE file.
 
 // Package tar implements access to tar archives.
-// It aims to cover most of the variations, including those produced
-// by GNU and BSD tars.
 //
-// References:
-//   http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
-//   http://www.gnu.org/software/tar/manual/html_node/Standard.html
-//   http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+// Tape archives (tar) are a file format for storing a sequence of files that
+// can be read and written in a streaming manner.
+// This package aims to cover most variations of the format,
+// including those produced by GNU and BSD tar tools.
 package tar
 
 import (
 	"errors"
 	"fmt"
+	"io"
+	"math"
 	"os"
 	"path"
+	"reflect"
+	"strconv"
+	"strings"
 	"time"
 )
 
@@ -24,42 +27,569 @@
 // architectures. If a large value is encountered when decoding, the result
 // stored in Header will be the truncated version.
 
-// Header type flags.
-const (
-	TypeReg           = '0'    // regular file
-	TypeRegA          = '\x00' // regular file
-	TypeLink          = '1'    // hard link
-	TypeSymlink       = '2'    // symbolic link
-	TypeChar          = '3'    // character device node
-	TypeBlock         = '4'    // block device node
-	TypeDir           = '5'    // directory
-	TypeFifo          = '6'    // fifo node
-	TypeCont          = '7'    // reserved
-	TypeXHeader       = 'x'    // extended header
-	TypeXGlobalHeader = 'g'    // global extended header
-	TypeGNULongName   = 'L'    // Next file has a long name
-	TypeGNULongLink   = 'K'    // Next file symlinks to a file w/ a long name
-	TypeGNUSparse     = 'S'    // sparse file
+var (
+	ErrHeader          = errors.New("tar: invalid tar header")
+	ErrWriteTooLong    = errors.New("tar: write too long")
+	ErrFieldTooLong    = errors.New("tar: header field too long")
+	ErrWriteAfterClose = errors.New("tar: write after close")
+	errMissData        = errors.New("tar: sparse file references non-existent data")
+	errUnrefData       = errors.New("tar: sparse file contains unreferenced data")
+	errWriteHole       = errors.New("tar: write non-NUL byte in sparse hole")
 )
 
+type headerError []string
+
+func (he headerError) Error() string {
+	const prefix = "tar: cannot encode header"
+	var ss []string
+	for _, s := range he {
+		if s != "" {
+			ss = append(ss, s)
+		}
+	}
+	if len(ss) == 0 {
+		return prefix
+	}
+	return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and "))
+}
+
+// Type flags for Header.Typeflag.
+const (
+	// Type '0' indicates a regular file.
+	TypeReg  = '0'
+	TypeRegA = '\x00' // For legacy support; use TypeReg instead
+
+	// Type '1' to '6' are header-only flags and may not have a data body.
+	TypeLink    = '1' // Hard link
+	TypeSymlink = '2' // Symbolic link
+	TypeChar    = '3' // Character device node
+	TypeBlock   = '4' // Block device node
+	TypeDir     = '5' // Directory
+	TypeFifo    = '6' // FIFO node
+
+	// Type '7' is reserved.
+	TypeCont = '7'
+
+	// Type 'x' is used by the PAX format to store key-value records that
+	// are only relevant to the next file.
+	// This package transparently handles these types.
+	TypeXHeader = 'x'
+
+	// Type 'g' is used by the PAX format to store key-value records that
+	// are relevant to all subsequent files.
+	// This package only supports parsing and composing such headers,
+	// but does not currently support persisting the global state across files.
+	TypeXGlobalHeader = 'g'
+
+	// Type 'S' indicates a sparse file in the GNU format.
+	// Header.SparseHoles should be populated when using this type.
+	TypeGNUSparse = 'S'
+
+	// Types 'L' and 'K' are used by the GNU format for a meta file
+	// used to store the path or link name for the next file.
+	// This package transparently handles these types.
+	TypeGNULongName = 'L'
+	TypeGNULongLink = 'K'
+)
+
+// Keywords for PAX extended header records.
+const (
+	paxNone     = "" // Indicates that no PAX key is suitable
+	paxPath     = "path"
+	paxLinkpath = "linkpath"
+	paxSize     = "size"
+	paxUid      = "uid"
+	paxGid      = "gid"
+	paxUname    = "uname"
+	paxGname    = "gname"
+	paxMtime    = "mtime"
+	paxAtime    = "atime"
+	paxCtime    = "ctime"   // Removed from later revision of PAX spec, but was valid
+	paxCharset  = "charset" // Currently unused
+	paxComment  = "comment" // Currently unused
+
+	paxSchilyXattr = "SCHILY.xattr."
+
+	// Keywords for GNU sparse files in a PAX extended header.
+	paxGNUSparse          = "GNU.sparse."
+	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+	paxGNUSparseOffset    = "GNU.sparse.offset"
+	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
+	paxGNUSparseMap       = "GNU.sparse.map"
+	paxGNUSparseName      = "GNU.sparse.name"
+	paxGNUSparseMajor     = "GNU.sparse.major"
+	paxGNUSparseMinor     = "GNU.sparse.minor"
+	paxGNUSparseSize      = "GNU.sparse.size"
+	paxGNUSparseRealSize  = "GNU.sparse.realsize"
+)
+
+// basicKeys is a set of the PAX keys for which we have built-in support.
+// This does not contain "charset" or "comment", which are both PAX-specific,
+// so adding them as first-class features of Header is unlikely.
+// Users can use the PAXRecords field to set it themselves.
+var basicKeys = map[string]bool{
+	paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true,
+	paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true,
+}
+
 // A Header represents a single header in a tar archive.
 // Some fields may not be populated.
+//
+// For forward compatibility, users that retrieve a Header from Reader.Next,
+// mutate it in some ways, and then pass it back to Writer.WriteHeader
+// should do so by creating a new Header and copying the fields
+// that they are interested in preserving.
 type Header struct {
-	Name       string    // name of header file entry
-	Mode       int64     // permission and mode bits
-	Uid        int       // user id of owner
-	Gid        int       // group id of owner
-	Size       int64     // length in bytes
-	ModTime    time.Time // modified time
-	Typeflag   byte      // type of header entry
-	Linkname   string    // target name of link
-	Uname      string    // user name of owner
-	Gname      string    // group name of owner
-	Devmajor   int64     // major number of character or block device
-	Devminor   int64     // minor number of character or block device
-	AccessTime time.Time // access time
-	ChangeTime time.Time // status change time
-	Xattrs     map[string]string
+	Typeflag byte // Type of header entry (should be TypeReg for most files)
+
+	Name     string // Name of file entry
+	Linkname string // Target name of link (valid for TypeLink or TypeSymlink)
+
+	Size  int64  // Logical file size in bytes
+	Mode  int64  // Permission and mode bits
+	Uid   int    // User ID of owner
+	Gid   int    // Group ID of owner
+	Uname string // User name of owner
+	Gname string // Group name of owner
+
+	// If the Format is unspecified, then Writer.WriteHeader rounds ModTime
+	// to the nearest second and ignores the AccessTime and ChangeTime fields.
+	//
+	// To use AccessTime or ChangeTime, specify the Format as PAX or GNU.
+	// To use sub-second resolution, specify the Format as PAX.
+	ModTime    time.Time // Modification time
+	AccessTime time.Time // Access time (requires either PAX or GNU support)
+	ChangeTime time.Time // Change time (requires either PAX or GNU support)
+
+	Devmajor int64 // Major device number (valid for TypeChar or TypeBlock)
+	Devminor int64 // Minor device number (valid for TypeChar or TypeBlock)
+
+	// SparseHoles represents a sequence of holes in a sparse file.
+	//
+	// A file is sparse if len(SparseHoles) > 0 or Typeflag is TypeGNUSparse.
+	// If TypeGNUSparse is set, then the format is GNU, otherwise
+	// the format is PAX (by using GNU-specific PAX records).
+	//
+	// A sparse file consists of fragments of data, intermixed with holes
+	// (described by this field). A hole is semantically a block of NUL-bytes,
+	// but does not actually exist within the tar file.
+	// The holes must be sorted in ascending order,
+	// not overlap with each other, and not extend past the specified Size.
+	SparseHoles []SparseEntry
+
+	// Xattrs stores extended attributes as PAX records under the
+	// "SCHILY.xattr." namespace.
+	//
+	// The following are semantically equivalent:
+	//  h.Xattrs[key] = value
+	//  h.PAXRecords["SCHILY.xattr."+key] = value
+	//
+	// When Writer.WriteHeader is called, the contents of Xattrs will take
+	// precedence over those in PAXRecords.
+	//
+	// Deprecated: Use PAXRecords instead.
+	Xattrs map[string]string
+
+	// PAXRecords is a map of PAX extended header records.
+	//
+	// User-defined records should have keys of the following form:
+	//	VENDOR.keyword
+	// Where VENDOR is some namespace in all uppercase, and keyword may
+	// not contain the '=' character (e.g., "GOLANG.pkg.version").
+	// The key and value should be non-empty UTF-8 strings.
+	//
+	// When Writer.WriteHeader is called, PAX records derived from the
+	// the other fields in Header take precedence over PAXRecords.
+	PAXRecords map[string]string
+
+	// Format specifies the format of the tar header.
+	//
+	// This is set by Reader.Next as a best-effort guess at the format.
+	// Since the Reader liberally reads some non-compliant files,
+	// it is possible for this to be FormatUnknown.
+	//
+	// If the format is unspecified when Writer.WriteHeader is called,
+	// then it uses the first format (in the order of USTAR, PAX, GNU)
+	// capable of encoding this Header (see Format).
+	Format Format
+}
+
+// SparseEntry represents a Length-sized fragment at Offset in the file.
+type SparseEntry struct{ Offset, Length int64 }
+
+func (s SparseEntry) endOffset() int64 { return s.Offset + s.Length }
+
+// A sparse file can be represented as either a sparseDatas or a sparseHoles.
+// As long as the total size is known, they are equivalent and one can be
+// converted to the other form and back. The various tar formats with sparse
+// file support represent sparse files in the sparseDatas form. That is, they
+// specify the fragments in the file that has data, and treat everything else as
+// having zero bytes. As such, the encoding and decoding logic in this package
+// deals with sparseDatas.
+//
+// However, the external API uses sparseHoles instead of sparseDatas because the
+// zero value of sparseHoles logically represents a normal file (i.e., there are
+// no holes in it). On the other hand, the zero value of sparseDatas implies
+// that the file has no data in it, which is rather odd.
+//
+// As an example, if the underlying raw file contains the 10-byte data:
+//	var compactFile = "abcdefgh"
+//
+// And the sparse map has the following entries:
+//	var spd sparseDatas = []sparseEntry{
+//		{Offset: 2,  Length: 5},  // Data fragment for 2..6
+//		{Offset: 18, Length: 3},  // Data fragment for 18..20
+//	}
+//	var sph sparseHoles = []SparseEntry{
+//		{Offset: 0,  Length: 2},  // Hole fragment for 0..1
+//		{Offset: 7,  Length: 11}, // Hole fragment for 7..17
+//		{Offset: 21, Length: 4},  // Hole fragment for 21..24
+//	}
+//
+// Then the content of the resulting sparse file with a Header.Size of 25 is:
+//	var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type (
+	sparseDatas []SparseEntry
+	sparseHoles []SparseEntry
+)
+
+// validateSparseEntries reports whether sp is a valid sparse map.
+// It does not matter whether sp represents data fragments or hole fragments.
+func validateSparseEntries(sp []SparseEntry, size int64) bool {
+	// Validate all sparse entries. These are the same checks as performed by
+	// the BSD tar utility.
+	if size < 0 {
+		return false
+	}
+	var pre SparseEntry
+	for _, cur := range sp {
+		switch {
+		case cur.Offset < 0 || cur.Length < 0:
+			return false // Negative values are never okay
+		case cur.Offset > math.MaxInt64-cur.Length:
+			return false // Integer overflow with large length
+		case cur.endOffset() > size:
+			return false // Region extends beyond the actual size
+		case pre.endOffset() > cur.Offset:
+			return false // Regions cannot overlap and must be in order
+		}
+		pre = cur
+	}
+	return true
+}
+
+// alignSparseEntries mutates src and returns dst where each fragment's
+// starting offset is aligned up to the nearest block edge, and each
+// ending offset is aligned down to the nearest block edge.
+//
+// Even though the Go tar Reader and the BSD tar utility can handle entries
+// with arbitrary offsets and lengths, the GNU tar utility can only handle
+// offsets and lengths that are multiples of blockSize.
+func alignSparseEntries(src []SparseEntry, size int64) []SparseEntry {
+	dst := src[:0]
+	for _, s := range src {
+		pos, end := s.Offset, s.endOffset()
+		pos += blockPadding(+pos) // Round-up to nearest blockSize
+		if end != size {
+			end -= blockPadding(-end) // Round-down to nearest blockSize
+		}
+		if pos < end {
+			dst = append(dst, SparseEntry{Offset: pos, Length: end - pos})
+		}
+	}
+	return dst
+}
+
+// invertSparseEntries converts a sparse map from one form to the other.
+// If the input is sparseHoles, then it will output sparseDatas and vice-versa.
+// The input must have been already validated.
+//
+// This function mutates src and returns a normalized map where:
+//	* adjacent fragments are coalesced together
+//	* only the last fragment may be empty
+//	* the endOffset of the last fragment is the total size
+func invertSparseEntries(src []SparseEntry, size int64) []SparseEntry {
+	dst := src[:0]
+	var pre SparseEntry
+	for _, cur := range src {
+		if cur.Length == 0 {
+			continue // Skip empty fragments
+		}
+		pre.Length = cur.Offset - pre.Offset
+		if pre.Length > 0 {
+			dst = append(dst, pre) // Only add non-empty fragments
+		}
+		pre.Offset = cur.endOffset()
+	}
+	pre.Length = size - pre.Offset // Possibly the only empty fragment
+	return append(dst, pre)
+}
+
+// fileState tracks the number of logical (includes sparse holes) and physical
+// (actual in tar archive) bytes remaining for the current file.
+//
+// Invariant: LogicalRemaining >= PhysicalRemaining
+type fileState interface {
+	LogicalRemaining() int64
+	PhysicalRemaining() int64
+}
+
+// allowedFormats determines which formats can be used.
+// The value returned is the logical OR of multiple possible formats.
+// If the value is FormatUnknown, then the input Header cannot be encoded
+// and an error is returned explaining why.
+//
+// As a by-product of checking the fields, this function returns paxHdrs, which
+// contain all fields that could not be directly encoded.
+// A value receiver ensures that this method does not mutate the source Header.
+func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) {
+	format = FormatUSTAR | FormatPAX | FormatGNU
+	paxHdrs = make(map[string]string)
+
+	var whyNoUSTAR, whyNoPAX, whyNoGNU string
+	var preferPAX bool // Prefer PAX over USTAR
+	verifyString := func(s string, size int, name, paxKey string) {
+		// NUL-terminator is optional for path and linkpath.
+		// Technically, it is required for uname and gname,
+		// but neither GNU nor BSD tar checks for it.
+		tooLong := len(s) > size
+		allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath
+		if hasNUL(s) || (tooLong && !allowLongGNU) {
+			whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s)
+			format.mustNotBe(FormatGNU)
+		}
+		if !isASCII(s) || tooLong {
+			canSplitUSTAR := paxKey == paxPath
+			if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok {
+				whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s)
+				format.mustNotBe(FormatUSTAR)
+			}
+			if paxKey == paxNone {
+				whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s)
+				format.mustNotBe(FormatPAX)
+			} else {
+				paxHdrs[paxKey] = s
+			}
+		}
+		if v, ok := h.PAXRecords[paxKey]; ok && v == s {
+			paxHdrs[paxKey] = v
+		}
+	}
+	verifyNumeric := func(n int64, size int, name, paxKey string) {
+		if !fitsInBase256(size, n) {
+			whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n)
+			format.mustNotBe(FormatGNU)
+		}
+		if !fitsInOctal(size, n) {
+			whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n)
+			format.mustNotBe(FormatUSTAR)
+			if paxKey == paxNone {
+				whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n)
+				format.mustNotBe(FormatPAX)
+			} else {
+				paxHdrs[paxKey] = strconv.FormatInt(n, 10)
+			}
+		}
+		if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) {
+			paxHdrs[paxKey] = v
+		}
+	}
+	verifyTime := func(ts time.Time, size int, name, paxKey string) {
+		if ts.IsZero() {
+			return // Always okay
+		}
+		if !fitsInBase256(size, ts.Unix()) {
+			whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts)
+			format.mustNotBe(FormatGNU)
+		}
+		isMtime := paxKey == paxMtime
+		fitsOctal := fitsInOctal(size, ts.Unix())
+		if (isMtime && !fitsOctal) || !isMtime {
+			whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts)
+			format.mustNotBe(FormatUSTAR)
+		}
+		needsNano := ts.Nanosecond() != 0
+		if !isMtime || !fitsOctal || needsNano {
+			preferPAX = true // USTAR may truncate sub-second measurements
+			if paxKey == paxNone {
+				whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts)
+				format.mustNotBe(FormatPAX)
+			} else {
+				paxHdrs[paxKey] = formatPAXTime(ts)
+			}
+		}
+		if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) {
+			paxHdrs[paxKey] = v
+		}
+	}
+
+	// Check basic fields.
+	var blk block
+	v7 := blk.V7()
+	ustar := blk.USTAR()
+	gnu := blk.GNU()
+	verifyString(h.Name, len(v7.Name()), "Name", paxPath)
+	verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath)
+	verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname)
+	verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname)
+	verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone)
+	verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid)
+	verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid)
+	verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize)
+	verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone)
+	verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone)
+	verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime)
+	verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime)
+	verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime)
+
+	// Check for header-only types.
+	var whyOnlyPAX, whyOnlyGNU string
+	switch h.Typeflag {
+	case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse:
+		// Exclude TypeLink and TypeSymlink, since they may reference directories.
+		if strings.HasSuffix(h.Name, "/") {
+			return FormatUnknown, nil, headerError{"filename may not have trailing slash"}
+		}
+	case TypeXHeader, TypeGNULongName, TypeGNULongLink:
+		return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"}
+	case TypeXGlobalHeader:
+		if !reflect.DeepEqual(h, Header{Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format}) {
+			return FormatUnknown, nil, headerError{"only PAXRecords may be set for TypeXGlobalHeader"}
+		}
+		whyOnlyPAX = "only PAX supports TypeXGlobalHeader"
+		format.mayOnlyBe(FormatPAX)
+	}
+	if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 {
+		return FormatUnknown, nil, headerError{"negative size on header-only type"}
+	}
+
+	// Check PAX records.
+	if len(h.Xattrs) > 0 {
+		for k, v := range h.Xattrs {
+			paxHdrs[paxSchilyXattr+k] = v
+		}
+		whyOnlyPAX = "only PAX supports Xattrs"
+		format.mayOnlyBe(FormatPAX)
+	}
+	if len(h.PAXRecords) > 0 {
+		for k, v := range h.PAXRecords {
+			switch _, exists := paxHdrs[k]; {
+			case exists:
+				continue // Do not overwrite existing records
+			case h.Typeflag == TypeXGlobalHeader:
+				paxHdrs[k] = v // Copy all records
+			case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse):
+				paxHdrs[k] = v // Ignore local records that may conflict
+			}
+		}
+		whyOnlyPAX = "only PAX supports PAXRecords"
+		format.mayOnlyBe(FormatPAX)
+	}
+	for k, v := range paxHdrs {
+		if !validPAXRecord(k, v) {
+			return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)}
+		}
+	}
+
+	// Check sparse files.
+	if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse {
+		if isHeaderOnlyType(h.Typeflag) {
+			return FormatUnknown, nil, headerError{"header-only type cannot be sparse"}
+		}
+		if !validateSparseEntries(h.SparseHoles, h.Size) {
+			return FormatUnknown, nil, headerError{"invalid sparse holes"}
+		}
+		if h.Typeflag == TypeGNUSparse {
+			whyOnlyGNU = "only GNU supports TypeGNUSparse"
+			format.mayOnlyBe(FormatGNU)
+		} else {
+			whyNoGNU = "GNU supports sparse files only with TypeGNUSparse"
+			format.mustNotBe(FormatGNU)
+		}
+		whyNoUSTAR = "USTAR does not support sparse files"
+		format.mustNotBe(FormatUSTAR)
+	}
+
+	// Check desired format.
+	if wantFormat := h.Format; wantFormat != FormatUnknown {
+		if wantFormat.has(FormatPAX) && !preferPAX {
+			wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too
+		}
+		format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted
+	}
+	if format == FormatUnknown {
+		switch h.Format {
+		case FormatUSTAR:
+			err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU}
+		case FormatPAX:
+			err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU}
+		case FormatGNU:
+			err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX}
+		default:
+			err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU}
+		}
+	}
+	return format, paxHdrs, err
+}
+
+var sysSparseDetect func(f *os.File) (sparseHoles, error)
+var sysSparsePunch func(f *os.File, sph sparseHoles) error
+
+// DetectSparseHoles searches for holes within f to populate SparseHoles
+// on supported operating systems and filesystems.
+// The file offset is cleared to zero.
+//
+// When packing a sparse file, DetectSparseHoles should be called prior to
+// serializing the header to the archive with Writer.WriteHeader.
+func (h *Header) DetectSparseHoles(f *os.File) (err error) {
+	defer func() {
+		if _, serr := f.Seek(0, io.SeekStart); err == nil {
+			err = serr
+		}
+	}()
+
+	h.SparseHoles = nil
+	if sysSparseDetect != nil {
+		sph, err := sysSparseDetect(f)
+		h.SparseHoles = sph
+		return err
+	}
+	return nil
+}
+
+// PunchSparseHoles destroys the contents of f, and prepares a sparse file
+// (on supported operating systems and filesystems)
+// with holes punched according to SparseHoles.
+// The file offset is cleared to zero.
+//
+// When extracting a sparse file, PunchSparseHoles should be called prior to
+// populating the content of a file with Reader.WriteTo.
+func (h *Header) PunchSparseHoles(f *os.File) (err error) {
+	defer func() {
+		if _, serr := f.Seek(0, io.SeekStart); err == nil {
+			err = serr
+		}
+	}()
+
+	if err := f.Truncate(0); err != nil {
+		return err
+	}
+
+	var size int64
+	if len(h.SparseHoles) > 0 {
+		size = h.SparseHoles[len(h.SparseHoles)-1].endOffset()
+	}
+	if !validateSparseEntries(h.SparseHoles, size) {
+		return errors.New("tar: invalid sparse holes")
+	}
+
+	if size == 0 {
+		return nil // For non-sparse files, do nothing (other than Truncate)
+	}
+	if sysSparsePunch != nil {
+		return sysSparsePunch(f, h.SparseHoles)
+	}
+	return f.Truncate(size)
 }
 
 // FileInfo returns an os.FileInfo for the Header.
@@ -92,63 +622,43 @@
 
 	// Set setuid, setgid and sticky bits.
 	if fi.h.Mode&c_ISUID != 0 {
-		// setuid
 		mode |= os.ModeSetuid
 	}
 	if fi.h.Mode&c_ISGID != 0 {
-		// setgid
 		mode |= os.ModeSetgid
 	}
 	if fi.h.Mode&c_ISVTX != 0 {
-		// sticky
 		mode |= os.ModeSticky
 	}
 
-	// Set file mode bits.
-	// clear perm, setuid, setgid and sticky bits.
-	m := os.FileMode(fi.h.Mode) &^ 07777
-	if m == c_ISDIR {
-		// directory
+	// Set file mode bits; clear perm, setuid, setgid, and sticky bits.
+	switch m := os.FileMode(fi.h.Mode) &^ 07777; m {
+	case c_ISDIR:
 		mode |= os.ModeDir
-	}
-	if m == c_ISFIFO {
-		// named pipe (FIFO)
+	case c_ISFIFO:
 		mode |= os.ModeNamedPipe
-	}
-	if m == c_ISLNK {
-		// symbolic link
+	case c_ISLNK:
 		mode |= os.ModeSymlink
-	}
-	if m == c_ISBLK {
-		// device file
+	case c_ISBLK:
 		mode |= os.ModeDevice
-	}
-	if m == c_ISCHR {
-		// Unix character device
+	case c_ISCHR:
 		mode |= os.ModeDevice
 		mode |= os.ModeCharDevice
-	}
-	if m == c_ISSOCK {
-		// Unix domain socket
+	case c_ISSOCK:
 		mode |= os.ModeSocket
 	}
 
 	switch fi.h.Typeflag {
 	case TypeSymlink:
-		// symbolic link
 		mode |= os.ModeSymlink
 	case TypeChar:
-		// character device node
 		mode |= os.ModeDevice
 		mode |= os.ModeCharDevice
 	case TypeBlock:
-		// block device node
 		mode |= os.ModeDevice
 	case TypeDir:
-		// directory
 		mode |= os.ModeDir
 	case TypeFifo:
-		// fifo node
 		mode |= os.ModeNamedPipe
 	}
 
@@ -158,11 +668,15 @@
 // sysStat, if non-nil, populates h from system-dependent fields of fi.
 var sysStat func(fi os.FileInfo, h *Header) error
 
-// Mode constants from the tar spec.
 const (
-	c_ISUID  = 04000   // Set uid
-	c_ISGID  = 02000   // Set gid
-	c_ISVTX  = 01000   // Save text (sticky bit)
+	// Mode constants from the USTAR spec:
+	// See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+	c_ISUID = 04000 // Set uid
+	c_ISGID = 02000 // Set gid
+	c_ISVTX = 01000 // Save text (sticky bit)
+
+	// Common Unix mode constants; these are not defined in any common tar standard.
+	// Header.FileInfo understands these, but FileInfoHeader will never produce these.
 	c_ISDIR  = 040000  // Directory
 	c_ISFIFO = 010000  // FIFO
 	c_ISREG  = 0100000 // Regular file
@@ -172,30 +686,16 @@
 	c_ISSOCK = 0140000 // Socket
 )
 
-// Keywords for the PAX Extended Header
-const (
-	paxAtime    = "atime"
-	paxCharset  = "charset"
-	paxComment  = "comment"
-	paxCtime    = "ctime" // please note that ctime is not a valid pax header.
-	paxGid      = "gid"
-	paxGname    = "gname"
-	paxLinkpath = "linkpath"
-	paxMtime    = "mtime"
-	paxPath     = "path"
-	paxSize     = "size"
-	paxUid      = "uid"
-	paxUname    = "uname"
-	paxXattr    = "SCHILY.xattr."
-	paxNone     = ""
-)
-
 // FileInfoHeader creates a partially-populated Header from fi.
 // If fi describes a symlink, FileInfoHeader records link as the link target.
 // If fi describes a directory, a slash is appended to the name.
-// Because os.FileInfo's Name method returns only the base name of
-// the file it describes, it may be necessary to modify the Name field
-// of the returned header to provide the full path name of the file.
+//
+// Since os.FileInfo's Name method only returns the base name of
+// the file it describes, it may be necessary to modify Header.Name
+// to provide the full path name of the file.
+//
+// This function does not populate Header.SparseHoles;
+// for sparse file support, additionally call Header.DetectSparseHoles.
 func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
 	if fi == nil {
 		return nil, errors.New("tar: FileInfo is nil")
@@ -208,32 +708,26 @@
 	}
 	switch {
 	case fm.IsRegular():
-		h.Mode |= c_ISREG
 		h.Typeflag = TypeReg
 		h.Size = fi.Size()
 	case fi.IsDir():
 		h.Typeflag = TypeDir
-		h.Mode |= c_ISDIR
 		h.Name += "/"
 	case fm&os.ModeSymlink != 0:
 		h.Typeflag = TypeSymlink
-		h.Mode |= c_ISLNK
 		h.Linkname = link
 	case fm&os.ModeDevice != 0:
 		if fm&os.ModeCharDevice != 0 {
-			h.Mode |= c_ISCHR
 			h.Typeflag = TypeChar
 		} else {
-			h.Mode |= c_ISBLK
 			h.Typeflag = TypeBlock
 		}
 	case fm&os.ModeNamedPipe != 0:
 		h.Typeflag = TypeFifo
-		h.Mode |= c_ISFIFO
 	case fm&os.ModeSocket != 0:
-		h.Mode |= c_ISSOCK
+		return nil, fmt.Errorf("tar: sockets not supported")
 	default:
-		return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+		return nil, fmt.Errorf("tar: unknown file mode %v", fm)
 	}
 	if fm&os.ModeSetuid != 0 {
 		h.Mode |= c_ISUID
@@ -267,6 +761,15 @@
 			h.Size = 0
 			h.Linkname = sys.Linkname
 		}
+		if sys.SparseHoles != nil {
+			h.SparseHoles = append([]SparseEntry{}, sys.SparseHoles...)
+		}
+		if sys.PAXRecords != nil {
+			h.PAXRecords = make(map[string]string)
+			for k, v := range sys.PAXRecords {
+				h.PAXRecords[k] = v
+			}
+		}
 	}
 	if sysStat != nil {
 		return h, sysStat(fi, h)
@@ -284,3 +787,10 @@
 		return false
 	}
 }
+
+func min(a, b int64) int64 {
+	if a < b {
+		return a
+	}
+	return b
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/format.go b/vendor/github.com/dmcgowan/go-tar/format.go
index c2c9910..cf12895 100644
--- a/vendor/github.com/dmcgowan/go-tar/format.go
+++ b/vendor/github.com/dmcgowan/go-tar/format.go
@@ -4,38 +4,131 @@
 
 package tar
 
+import "strings"
+
+// Format represents the tar archive format.
+//
+// The original tar format was introduced in Unix V7.
+// Since then, there have been multiple competing formats attempting to
+// standardize or extend the V7 format to overcome its limitations.
+// The most common formats are the USTAR, PAX, and GNU formats,
+// each with their own advantages and limitations.
+//
+// The following table captures the capabilities of each format:
+//
+//	                  |  USTAR |       PAX |       GNU
+//	------------------+--------+-----------+----------
+//	Name              |   256B | unlimited | unlimited
+//	Linkname          |   100B | unlimited | unlimited
+//	Size              | uint33 | unlimited |    uint89
+//	Mode              | uint21 |    uint21 |    uint57
+//	Uid/Gid           | uint21 | unlimited |    uint57
+//	Uname/Gname       |    32B | unlimited |       32B
+//	ModTime           | uint33 | unlimited |     int89
+//	AccessTime        |    n/a | unlimited |     int89
+//	ChangeTime        |    n/a | unlimited |     int89
+//	Devmajor/Devminor | uint21 |    uint21 |    uint57
+//	------------------+--------+-----------+----------
+//	string encoding   |  ASCII |     UTF-8 |    binary
+//	sub-second times  |     no |       yes |        no
+//	sparse files      |     no |       yes |       yes
+//
+// The table's upper portion shows the Header fields, where each format reports
+// the maximum number of bytes allowed for each string field and
+// the integer type used to store each numeric field
+// (where timestamps are stored as the number of seconds since the Unix epoch).
+//
+// The table's lower portion shows specialized features of each format,
+// such as supported string encodings, support for sub-second timestamps,
+// or support for sparse files.
+type Format int
+
 // Constants to identify various tar formats.
 const (
-	// The format is unknown.
-	formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc...
+	// Deliberately hide the meaning of constants from public API.
+	_ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc...
+
+	// FormatUnknown indicates that the format is unknown.
+	FormatUnknown
 
 	// The format of the original Unix V7 tar tool prior to standardization.
 	formatV7
 
-	// The old and new GNU formats, which are incompatible with USTAR.
-	// This does cover the old GNU sparse extension.
-	// This does not cover the GNU sparse extensions using PAX headers,
-	// versions 0.0, 0.1, and 1.0; these fall under the PAX format.
-	formatGNU
+	// FormatUSTAR represents the USTAR header format defined in POSIX.1-1988.
+	//
+	// While this format is compatible with most tar readers,
+	// the format has several limitations making it unsuitable for some usages.
+	// Most notably, it cannot support sparse files, files larger than 8GiB,
+	// filenames larger than 256 characters, and non-ASCII filenames.
+	//
+	// Reference:
+	//	http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+	FormatUSTAR
+
+	// FormatPAX represents the PAX header format defined in POSIX.1-2001.
+	//
+	// PAX extends USTAR by writing a special file with Typeflag TypeXHeader
+	// preceding the original header. This file contains a set of key-value
+	// records, which are used to overcome USTAR's shortcomings, in addition to
+	// providing the ability to have sub-second resolution for timestamps.
+	//
+	// Some newer formats add their own extensions to PAX by defining their
+	// own keys and assigning certain semantic meaning to the associated values.
+	// For example, sparse file support in PAX is implemented using keys
+	// defined by the GNU manual (e.g., "GNU.sparse.map").
+	//
+	// Reference:
+	//	http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html
+	FormatPAX
+
+	// FormatGNU represents the GNU header format.
+	//
+	// The GNU header format is older than the USTAR and PAX standards and
+	// is not compatible with them. The GNU format supports
+	// arbitrary file sizes, filenames of arbitrary encoding and length,
+	// sparse files, and other features.
+	//
+	// It is recommended that PAX be chosen over GNU unless the target
+	// application can only parse GNU formatted archives.
+	//
+	// Reference:
+	//	http://www.gnu.org/software/tar/manual/html_node/Standard.html
+	FormatGNU
 
 	// Schily's tar format, which is incompatible with USTAR.
 	// This does not cover STAR extensions to the PAX format; these fall under
 	// the PAX format.
 	formatSTAR
 
-	// USTAR is the former standardization of tar defined in POSIX.1-1988.
-	// This is incompatible with the GNU and STAR formats.
-	formatUSTAR
-
-	// PAX is the latest standardization of tar defined in POSIX.1-2001.
-	// This is an extension of USTAR and is "backwards compatible" with it.
-	//
-	// Some newer formats add their own extensions to PAX, such as GNU sparse
-	// files and SCHILY extended attributes. Since they are backwards compatible
-	// with PAX, they will be labelled as "PAX".
-	formatPAX
+	formatMax
 )
 
+func (f Format) has(f2 Format) bool   { return f&f2 != 0 }
+func (f *Format) mayBe(f2 Format)     { *f |= f2 }
+func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 }
+func (f *Format) mustNotBe(f2 Format) { *f &^= f2 }
+
+var formatNames = map[Format]string{
+	formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR",
+}
+
+func (f Format) String() string {
+	var ss []string
+	for f2 := Format(1); f2 < formatMax; f2 <<= 1 {
+		if f.has(f2) {
+			ss = append(ss, formatNames[f2])
+		}
+	}
+	switch len(ss) {
+	case 0:
+		return "<unknown>"
+	case 1:
+		return ss[0]
+	default:
+		return "(" + strings.Join(ss, " | ") + ")"
+	}
+}
+
 // Magics used to identify various formats.
 const (
 	magicGNU, versionGNU     = "ustar ", " \x00"
@@ -50,6 +143,12 @@
 	prefixSize = 155 // Max length of the prefix field in USTAR format
 )
 
+// blockPadding computes the number of bytes needed to pad offset up to the
+// nearest block edge where 0 <= n < blockSize.
+func blockPadding(offset int64) (n int64) {
+	return -offset & (blockSize - 1)
+}
+
 var zeroBlock block
 
 type block [blockSize]byte
@@ -63,14 +162,14 @@
 
 // GetFormat checks that the block is a valid tar header based on the checksum.
 // It then attempts to guess the specific format based on magic values.
-// If the checksum fails, then formatUnknown is returned.
-func (b *block) GetFormat() (format int) {
+// If the checksum fails, then FormatUnknown is returned.
+func (b *block) GetFormat() Format {
 	// Verify checksum.
 	var p parser
 	value := p.parseOctal(b.V7().Chksum())
 	chksum1, chksum2 := b.ComputeChecksum()
 	if p.err != nil || (value != chksum1 && value != chksum2) {
-		return formatUnknown
+		return FormatUnknown
 	}
 
 	// Guess the magic values.
@@ -81,9 +180,9 @@
 	case magic == magicUSTAR && trailer == trailerSTAR:
 		return formatSTAR
 	case magic == magicUSTAR:
-		return formatUSTAR
+		return FormatUSTAR | FormatPAX
 	case magic == magicGNU && version == versionGNU:
-		return formatGNU
+		return FormatGNU
 	default:
 		return formatV7
 	}
@@ -91,19 +190,19 @@
 
 // SetFormat writes the magic values necessary for specified format
 // and then updates the checksum accordingly.
-func (b *block) SetFormat(format int) {
+func (b *block) SetFormat(format Format) {
 	// Set the magic values.
-	switch format {
-	case formatV7:
+	switch {
+	case format.has(formatV7):
 		// Do nothing.
-	case formatGNU:
+	case format.has(FormatGNU):
 		copy(b.GNU().Magic(), magicGNU)
 		copy(b.GNU().Version(), versionGNU)
-	case formatSTAR:
+	case format.has(formatSTAR):
 		copy(b.STAR().Magic(), magicUSTAR)
 		copy(b.STAR().Version(), versionUSTAR)
 		copy(b.STAR().Trailer(), trailerSTAR)
-	case formatUSTAR, formatPAX:
+	case format.has(FormatUSTAR | FormatPAX):
 		copy(b.USTAR().Magic(), magicUSTAR)
 		copy(b.USTAR().Version(), versionUSTAR)
 	default:
@@ -134,6 +233,11 @@
 	return unsigned, signed
 }
 
+// Reset clears the block with all zeros.
+func (b *block) Reset() {
+	*b = block{}
+}
+
 type headerV7 [blockSize]byte
 
 func (h *headerV7) Name() []byte     { return h[000:][:100] }
@@ -187,11 +291,11 @@
 
 type sparseArray []byte
 
-func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) }
+func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) }
 func (s sparseArray) IsExtended() []byte     { return s[24*s.MaxEntries():][:1] }
 func (s sparseArray) MaxEntries() int        { return len(s) / 24 }
 
-type sparseNode []byte
+type sparseElem []byte
 
-func (s sparseNode) Offset() []byte   { return s[00:][:12] }
-func (s sparseNode) NumBytes() []byte { return s[12:][:12] }
+func (s sparseElem) Offset() []byte { return s[00:][:12] }
+func (s sparseElem) Length() []byte { return s[12:][:12] }
diff --git a/vendor/github.com/dmcgowan/go-tar/reader.go b/vendor/github.com/dmcgowan/go-tar/reader.go
index a6142c6..1d06730 100644
--- a/vendor/github.com/dmcgowan/go-tar/reader.go
+++ b/vendor/github.com/dmcgowan/go-tar/reader.go
@@ -4,33 +4,23 @@
 
 package tar
 
-// TODO(dsymonds):
-//   - pax extensions
-
 import (
 	"bytes"
-	"errors"
 	"io"
 	"io/ioutil"
-	"math"
 	"strconv"
 	"strings"
 	"time"
 )
 
-var (
-	ErrHeader = errors.New("archive/tar: invalid tar header")
-)
-
-// A Reader provides sequential access to the contents of a tar archive.
-// A tar archive consists of a sequence of files.
-// The Next method advances to the next file in the archive (including the first),
-// and then it can be treated as an io.Reader to access the file's data.
+// Reader provides sequential access to the contents of a tar archive.
+// Reader.Next advances to the next file in the archive (including the first),
+// and then Reader can be treated as an io.Reader to access the file's data.
 type Reader struct {
 	r    io.Reader
-	pad  int64          // amount of padding (ignored) after current file entry
-	curr numBytesReader // reader for current file entry
-	blk  block          // buffer to use as temporary local storage
+	pad  int64      // Amount of padding (ignored) after current file entry
+	curr fileReader // Reader for current file entry
+	blk  block      // Buffer to use as temporary local storage
 
 	// err is a persistent error.
 	// It is only the responsibility of every exported method of Reader to
@@ -38,68 +28,21 @@
 	err error
 }
 
-// A numBytesReader is an io.Reader with a numBytes method, returning the number
-// of bytes remaining in the underlying encoded data.
-type numBytesReader interface {
+type fileReader interface {
 	io.Reader
-	numBytes() int64
-}
+	fileState
 
-// A regFileReader is a numBytesReader for reading file data from a tar archive.
-type regFileReader struct {
-	r  io.Reader // underlying reader
-	nb int64     // number of unread bytes for current file entry
+	WriteTo(io.Writer) (int64, error)
 }
 
-// A sparseFileReader is a numBytesReader for reading sparse file data from a
-// tar archive.
-type sparseFileReader struct {
-	rfr   numBytesReader // Reads the sparse-encoded file data
-	sp    []sparseEntry  // The sparse map for the file
-	pos   int64          // Keeps track of file position
-	total int64          // Total size of the file
-}
-
-// A sparseEntry holds a single entry in a sparse file's sparse map.
-//
-// Sparse files are represented using a series of sparseEntrys.
-// Despite the name, a sparseEntry represents an actual data fragment that
-// references data found in the underlying archive stream. All regions not
-// covered by a sparseEntry are logically filled with zeros.
-//
-// For example, if the underlying raw file contains the 10-byte data:
-//	var compactData = "abcdefgh"
-//
-// And the sparse map has the following entries:
-//	var sp = []sparseEntry{
-//		{offset: 2,  numBytes: 5} // Data fragment for [2..7]
-//		{offset: 18, numBytes: 3} // Data fragment for [18..21]
-//	}
-//
-// Then the content of the resulting sparse file with a "real" size of 25 is:
-//	var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
-type sparseEntry struct {
-	offset   int64 // Starting position of the fragment
-	numBytes int64 // Length of the fragment
-}
-
-// Keywords for GNU sparse files in a PAX extended header
-const (
-	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
-	paxGNUSparseOffset    = "GNU.sparse.offset"
-	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
-	paxGNUSparseMap       = "GNU.sparse.map"
-	paxGNUSparseName      = "GNU.sparse.name"
-	paxGNUSparseMajor     = "GNU.sparse.major"
-	paxGNUSparseMinor     = "GNU.sparse.minor"
-	paxGNUSparseSize      = "GNU.sparse.size"
-	paxGNUSparseRealSize  = "GNU.sparse.realsize"
-)
-
 // NewReader creates a new Reader reading from r.
-func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+func NewReader(r io.Reader) *Reader {
+	return &Reader{r: r, curr: &regFileReader{r, 0}}
+}
 
 // Next advances to the next entry in the tar archive.
+// The Header.Size determines how many bytes can be read for the next file.
+// Any remaining data in the current file is automatically discarded.
 //
 // io.EOF is returned at the end of the input.
 func (tr *Reader) Next() (*Header, error) {
@@ -112,18 +55,26 @@
 }
 
 func (tr *Reader) next() (*Header, error) {
-	var extHdrs map[string]string
+	var paxHdrs map[string]string
+	var gnuLongName, gnuLongLink string
 
 	// Externally, Next iterates through the tar archive as if it is a series of
 	// files. Internally, the tar format often uses fake "files" to add meta
 	// data that describes the next file. These meta data "files" should not
 	// normally be visible to the outside. As such, this loop iterates through
 	// one or more "header files" until it finds a "normal file".
+	format := FormatUSTAR | FormatPAX | FormatGNU
 loop:
 	for {
-		if err := tr.skipUnread(); err != nil {
+		// Discard the remainder of the file and any padding.
+		if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil {
 			return nil, err
 		}
+		if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
+			return nil, err
+		}
+		tr.pad = 0
+
 		hdr, rawHdr, err := tr.readHeader()
 		if err != nil {
 			return nil, err
@@ -131,43 +82,57 @@
 		if err := tr.handleRegularFile(hdr); err != nil {
 			return nil, err
 		}
+		format.mayOnlyBe(hdr.Format)
 
 		// Check for PAX/GNU special headers and files.
 		switch hdr.Typeflag {
-		case TypeXHeader:
-			extHdrs, err = parsePAX(tr)
+		case TypeXHeader, TypeXGlobalHeader:
+			format.mayOnlyBe(FormatPAX)
+			paxHdrs, err = parsePAX(tr)
 			if err != nil {
 				return nil, err
 			}
+			if hdr.Typeflag == TypeXGlobalHeader {
+				mergePAX(hdr, paxHdrs)
+				return &Header{
+					Typeflag:   hdr.Typeflag,
+					Xattrs:     hdr.Xattrs,
+					PAXRecords: hdr.PAXRecords,
+					Format:     format,
+				}, nil
+			}
 			continue loop // This is a meta header affecting the next header
 		case TypeGNULongName, TypeGNULongLink:
+			format.mayOnlyBe(FormatGNU)
 			realname, err := ioutil.ReadAll(tr)
 			if err != nil {
 				return nil, err
 			}
 
-			// Convert GNU extensions to use PAX headers.
-			if extHdrs == nil {
-				extHdrs = make(map[string]string)
-			}
 			var p parser
 			switch hdr.Typeflag {
 			case TypeGNULongName:
-				extHdrs[paxPath] = p.parseString(realname)
+				gnuLongName = p.parseString(realname)
 			case TypeGNULongLink:
-				extHdrs[paxLinkpath] = p.parseString(realname)
-			}
-			if p.err != nil {
-				return nil, p.err
+				gnuLongLink = p.parseString(realname)
 			}
 			continue loop // This is a meta header affecting the next header
 		default:
 			// The old GNU sparse format is handled here since it is technically
 			// just a regular file with additional attributes.
 
-			if err := mergePAX(hdr, extHdrs); err != nil {
+			if err := mergePAX(hdr, paxHdrs); err != nil {
 				return nil, err
 			}
+			if gnuLongName != "" {
+				hdr.Name = gnuLongName
+			}
+			if gnuLongLink != "" {
+				hdr.Linkname = gnuLongLink
+			}
+			if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") {
+				hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
+			}
 
 			// The extended headers may have updated the size.
 			// Thus, setup the regFileReader again after merging PAX headers.
@@ -177,9 +142,15 @@
 
 			// Sparse formats rely on being able to read from the logical data
 			// section; there must be a preceding call to handleRegularFile.
-			if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil {
+			if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
 				return nil, err
 			}
+
+			// Set the final guess at the format.
+			if format.has(FormatUSTAR) && format.has(FormatPAX) {
+				format.mayOnlyBe(FormatUSTAR)
+			}
+			hdr.Format = format
 			return hdr, nil // This is a file, so stop
 		}
 	}
@@ -197,105 +168,87 @@
 		return ErrHeader
 	}
 
-	tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+	tr.pad = blockPadding(nb)
 	tr.curr = &regFileReader{r: tr.r, nb: nb}
 	return nil
 }
 
 // handleSparseFile checks if the current file is a sparse format of any type
 // and sets the curr reader appropriately.
-func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error {
-	var sp []sparseEntry
+func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
+	var spd sparseDatas
 	var err error
 	if hdr.Typeflag == TypeGNUSparse {
-		sp, err = tr.readOldGNUSparseMap(hdr, rawHdr)
-		if err != nil {
-			return err
-		}
+		spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
 	} else {
-		sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
-		if err != nil {
-			return err
-		}
+		spd, err = tr.readGNUSparsePAXHeaders(hdr)
 	}
 
 	// If sp is non-nil, then this is a sparse file.
-	// Note that it is possible for len(sp) to be zero.
-	if sp != nil {
-		tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size)
+	// Note that it is possible for len(sp) == 0.
+	if err == nil && spd != nil {
+		if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
+			return ErrHeader
+		}
+		sph := invertSparseEntries(spd, hdr.Size)
+		tr.curr = &sparseFileReader{tr.curr, sph, 0}
+		hdr.SparseHoles = append([]SparseEntry{}, sph...)
 	}
 	return err
 }
 
-// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
-// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
-// be treated as a regular file.
-func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
-	var sparseFormat string
-
-	// Check for sparse format indicators
-	major, majorOk := headers[paxGNUSparseMajor]
-	minor, minorOk := headers[paxGNUSparseMinor]
-	sparseName, sparseNameOk := headers[paxGNUSparseName]
-	_, sparseMapOk := headers[paxGNUSparseMap]
-	sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
-	sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
-
-	// Identify which, if any, sparse format applies from which PAX headers are set
-	if majorOk && minorOk {
-		sparseFormat = major + "." + minor
-	} else if sparseNameOk && sparseMapOk {
-		sparseFormat = "0.1"
-	} else if sparseSizeOk {
-		sparseFormat = "0.0"
-	} else {
-		// Not a PAX format GNU sparse file.
-		return nil, nil
+// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
+// If they are found, then this function reads the sparse map and returns it.
+// This assumes that 0.0 headers have already been converted to 0.1 headers
+// by the the PAX header parsing logic.
+func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
+	// Identify the version of GNU headers.
+	var is1x0 bool
+	major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
+	switch {
+	case major == "0" && (minor == "0" || minor == "1"):
+		is1x0 = false
+	case major == "1" && minor == "0":
+		is1x0 = true
+	case major != "" || minor != "":
+		return nil, nil // Unknown GNU sparse PAX version
+	case hdr.PAXRecords[paxGNUSparseMap] != "":
+		is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
+	default:
+		return nil, nil // Not a PAX format GNU sparse file.
 	}
+	hdr.Format.mayOnlyBe(FormatPAX)
 
-	// Check for unknown sparse format
-	if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
-		return nil, nil
+	// Update hdr from GNU sparse PAX headers.
+	if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
+		hdr.Name = name
 	}
-
-	// Update hdr from GNU sparse PAX headers
-	if sparseNameOk {
-		hdr.Name = sparseName
+	size := hdr.PAXRecords[paxGNUSparseSize]
+	if size == "" {
+		size = hdr.PAXRecords[paxGNUSparseRealSize]
 	}
-	if sparseSizeOk {
-		realSize, err := strconv.ParseInt(sparseSize, 10, 64)
+	if size != "" {
+		n, err := strconv.ParseInt(size, 10, 64)
 		if err != nil {
 			return nil, ErrHeader
 		}
-		hdr.Size = realSize
-	} else if sparseRealSizeOk {
-		realSize, err := strconv.ParseInt(sparseRealSize, 10, 64)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		hdr.Size = realSize
+		hdr.Size = n
 	}
 
-	// Set up the sparse map, according to the particular sparse format in use
-	var sp []sparseEntry
-	var err error
-	switch sparseFormat {
-	case "0.0", "0.1":
-		sp, err = readGNUSparseMap0x1(headers)
-	case "1.0":
-		sp, err = readGNUSparseMap1x0(tr.curr)
+	// Read the sparse map according to the appropriate format.
+	if is1x0 {
+		return readGNUSparseMap1x0(tr.curr)
 	}
-	return sp, err
+	return readGNUSparseMap0x1(hdr.PAXRecords)
 }
 
-// mergePAX merges well known headers according to PAX standard.
-// In general headers with the same name as those found
-// in the header struct overwrite those found in the header
-// struct with higher precision or longer values. Esp. useful
-// for name and linkname fields.
-func mergePAX(hdr *Header, headers map[string]string) (err error) {
-	var id64 int64
-	for k, v := range headers {
+// mergePAX merges paxHdrs into hdr for all relevant fields of Header.
+func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
+	for k, v := range paxHdrs {
+		if v == "" {
+			continue // Keep the original USTAR value
+		}
+		var id64 int64
 		switch k {
 		case paxPath:
 			hdr.Name = v
@@ -320,17 +273,18 @@
 		case paxSize:
 			hdr.Size, err = strconv.ParseInt(v, 10, 64)
 		default:
-			if strings.HasPrefix(k, paxXattr) {
+			if strings.HasPrefix(k, paxSchilyXattr) {
 				if hdr.Xattrs == nil {
 					hdr.Xattrs = make(map[string]string)
 				}
-				hdr.Xattrs[k[len(paxXattr):]] = v
+				hdr.Xattrs[k[len(paxSchilyXattr):]] = v
 			}
 		}
 		if err != nil {
 			return ErrHeader
 		}
 	}
+	hdr.PAXRecords = paxHdrs
 	return nil
 }
 
@@ -348,7 +302,7 @@
 	// headers since 0.0 headers were not PAX compliant.
 	var sparseMap []string
 
-	extHdrs := make(map[string]string)
+	paxHdrs := make(map[string]string)
 	for len(sbuf) > 0 {
 		key, value, residual, err := parsePAXRecord(sbuf)
 		if err != nil {
@@ -366,58 +320,13 @@
 			}
 			sparseMap = append(sparseMap, value)
 		default:
-			// According to PAX specification, a value is stored only if it is
-			// non-empty. Otherwise, the key is deleted.
-			if len(value) > 0 {
-				extHdrs[key] = value
-			} else {
-				delete(extHdrs, key)
-			}
+			paxHdrs[key] = value
 		}
 	}
 	if len(sparseMap) > 0 {
-		extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
+		paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
 	}
-	return extHdrs, nil
-}
-
-// skipUnread skips any unread bytes in the existing file entry, as well as any
-// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
-// encountered in the data portion; it is okay to hit io.EOF in the padding.
-//
-// Note that this function still works properly even when sparse files are being
-// used since numBytes returns the bytes remaining in the underlying io.Reader.
-func (tr *Reader) skipUnread() error {
-	dataSkip := tr.numBytes()      // Number of data bytes to skip
-	totalSkip := dataSkip + tr.pad // Total number of bytes to skip
-	tr.curr, tr.pad = nil, 0
-
-	// If possible, Seek to the last byte before the end of the data section.
-	// Do this because Seek is often lazy about reporting errors; this will mask
-	// the fact that the tar stream may be truncated. We can rely on the
-	// io.CopyN done shortly afterwards to trigger any IO errors.
-	var seekSkipped int64 // Number of bytes skipped via Seek
-	if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
-		// Not all io.Seeker can actually Seek. For example, os.Stdin implements
-		// io.Seeker, but calling Seek always returns an error and performs
-		// no action. Thus, we try an innocent seek to the current position
-		// to see if Seek is really supported.
-		pos1, err := sr.Seek(0, io.SeekCurrent)
-		if err == nil {
-			// Seek seems supported, so perform the real Seek.
-			pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent)
-			if err != nil {
-				return err
-			}
-			seekSkipped = pos2 - pos1
-		}
-	}
-
-	copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
-	if err == io.EOF && seekSkipped+copySkipped < dataSkip {
-		err = io.ErrUnexpectedEOF
-	}
-	return err
+	return paxHdrs, nil
 }
 
 // readHeader reads the next block header and assumes that the underlying reader
@@ -445,7 +354,7 @@
 
 	// Verify the header matches a known format.
 	format := tr.blk.GetFormat()
-	if format == formatUnknown {
+	if format == FormatUnknown {
 		return nil, nil, ErrHeader
 	}
 
@@ -454,37 +363,86 @@
 
 	// Unpack the V7 header.
 	v7 := tr.blk.V7()
+	hdr.Typeflag = v7.TypeFlag()[0]
 	hdr.Name = p.parseString(v7.Name())
+	hdr.Linkname = p.parseString(v7.LinkName())
+	hdr.Size = p.parseNumeric(v7.Size())
 	hdr.Mode = p.parseNumeric(v7.Mode())
 	hdr.Uid = int(p.parseNumeric(v7.UID()))
 	hdr.Gid = int(p.parseNumeric(v7.GID()))
-	hdr.Size = p.parseNumeric(v7.Size())
 	hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
-	hdr.Typeflag = v7.TypeFlag()[0]
-	hdr.Linkname = p.parseString(v7.LinkName())
 
 	// Unpack format specific fields.
 	if format > formatV7 {
 		ustar := tr.blk.USTAR()
 		hdr.Uname = p.parseString(ustar.UserName())
 		hdr.Gname = p.parseString(ustar.GroupName())
-		if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
-			hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
-			hdr.Devminor = p.parseNumeric(ustar.DevMinor())
-		}
+		hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
+		hdr.Devminor = p.parseNumeric(ustar.DevMinor())
 
 		var prefix string
-		switch format {
-		case formatUSTAR, formatGNU:
-			// TODO(dsnet): Do not use the prefix field for the GNU format!
-			// See golang.org/issues/12594
+		switch {
+		case format.has(FormatUSTAR | FormatPAX):
+			hdr.Format = format
 			ustar := tr.blk.USTAR()
 			prefix = p.parseString(ustar.Prefix())
-		case formatSTAR:
+
+			// For Format detection, check if block is properly formatted since
+			// the parser is more liberal than what USTAR actually permits.
+			notASCII := func(r rune) bool { return r >= 0x80 }
+			if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
+				hdr.Format = FormatUnknown // Non-ASCII characters in block.
+			}
+			nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
+			if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) &&
+				nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) {
+				hdr.Format = FormatUnknown // Numeric fields must end in NUL
+			}
+		case format.has(formatSTAR):
 			star := tr.blk.STAR()
 			prefix = p.parseString(star.Prefix())
 			hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
 			hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
+		case format.has(FormatGNU):
+			hdr.Format = format
+			var p2 parser
+			gnu := tr.blk.GNU()
+			if b := gnu.AccessTime(); b[0] != 0 {
+				hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
+			}
+			if b := gnu.ChangeTime(); b[0] != 0 {
+				hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
+			}
+
+			// Prior to Go1.8, the Writer had a bug where it would output
+			// an invalid tar file in certain rare situations because the logic
+			// incorrectly believed that the old GNU format had a prefix field.
+			// This is wrong and leads to an output file that mangles the
+			// atime and ctime fields, which are often left unused.
+			//
+			// In order to continue reading tar files created by former, buggy
+			// versions of Go, we skeptically parse the atime and ctime fields.
+			// If we are unable to parse them and the prefix field looks like
+			// an ASCII string, then we fallback on the pre-Go1.8 behavior
+			// of treating these fields as the USTAR prefix field.
+			//
+			// Note that this will not use the fallback logic for all possible
+			// files generated by a pre-Go1.8 toolchain. If the generated file
+			// happened to have a prefix field that parses as valid
+			// atime and ctime fields (e.g., when they are valid octal strings),
+			// then it is impossible to distinguish between an valid GNU file
+			// and an invalid pre-Go1.8 file.
+			//
+			// See https://golang.org/issues/12594
+			// See https://golang.org/issues/21005
+			if p2.err != nil {
+				hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
+				ustar := tr.blk.USTAR()
+				if s := p.parseString(ustar.Prefix()); isASCII(s) {
+					prefix = s
+				}
+				hdr.Format = FormatUnknown // Buggy file is not GNU
+			}
 		}
 		if len(prefix) > 0 {
 			hdr.Name = prefix + "/" + hdr.Name
@@ -501,21 +459,22 @@
 // The Header.Size does not reflect the size of any extended headers used.
 // Thus, this function will read from the raw io.Reader to fetch extra headers.
 // This method mutates blk in the process.
-func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) {
+func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
 	// Make sure that the input format is GNU.
 	// Unfortunately, the STAR format also has a sparse header format that uses
 	// the same type flag but has a completely different layout.
-	if blk.GetFormat() != formatGNU {
+	if blk.GetFormat() != FormatGNU {
 		return nil, ErrHeader
 	}
+	hdr.Format.mayOnlyBe(FormatGNU)
 
 	var p parser
 	hdr.Size = p.parseNumeric(blk.GNU().RealSize())
 	if p.err != nil {
 		return nil, p.err
 	}
-	var s sparseArray = blk.GNU().Sparse()
-	var sp = make([]sparseEntry, 0, s.MaxEntries())
+	s := blk.GNU().Sparse()
+	spd := make(sparseDatas, 0, s.MaxEntries())
 	for {
 		for i := 0; i < s.MaxEntries(); i++ {
 			// This termination condition is identical to GNU and BSD tar.
@@ -523,25 +482,22 @@
 				break // Don't return, need to process extended headers (even if empty)
 			}
 			offset := p.parseNumeric(s.Entry(i).Offset())
-			numBytes := p.parseNumeric(s.Entry(i).NumBytes())
+			length := p.parseNumeric(s.Entry(i).Length())
 			if p.err != nil {
 				return nil, p.err
 			}
-			sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+			spd = append(spd, SparseEntry{Offset: offset, Length: length})
 		}
 
 		if s.IsExtended()[0] > 0 {
 			// There are more entries. Read an extension header and parse its entries.
-			if _, err := io.ReadFull(tr.r, blk[:]); err != nil {
-				if err == io.EOF {
-					err = io.ErrUnexpectedEOF
-				}
+			if _, err := mustReadFull(tr.r, blk[:]); err != nil {
 				return nil, err
 			}
 			s = blk.Sparse()
 			continue
 		}
-		return sp, nil // Done
+		return spd, nil // Done
 	}
 }
 
@@ -549,28 +505,27 @@
 // version 1.0. The format of the sparse map consists of a series of
 // newline-terminated numeric fields. The first field is the number of entries
 // and is always present. Following this are the entries, consisting of two
-// fields (offset, numBytes). This function must stop reading at the end
+// fields (offset, length). This function must stop reading at the end
 // boundary of the block containing the last newline.
 //
 // Note that the GNU manual says that numeric values should be encoded in octal
 // format. However, the GNU tar utility itself outputs these values in decimal.
 // As such, this library treats values as being encoded in decimal.
-func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
-	var cntNewline int64
-	var buf bytes.Buffer
-	var blk = make([]byte, blockSize)
+func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
+	var (
+		cntNewline int64
+		buf        bytes.Buffer
+		blk        block
+	)
 
-	// feedTokens copies data in numBlock chunks from r into buf until there are
+	// feedTokens copies data in blocks from r into buf until there are
 	// at least cnt newlines in buf. It will not read more blocks than needed.
-	var feedTokens = func(cnt int64) error {
-		for cntNewline < cnt {
-			if _, err := io.ReadFull(r, blk); err != nil {
-				if err == io.EOF {
-					err = io.ErrUnexpectedEOF
-				}
+	feedTokens := func(n int64) error {
+		for cntNewline < n {
+			if _, err := mustReadFull(r, blk[:]); err != nil {
 				return err
 			}
-			buf.Write(blk)
+			buf.Write(blk[:])
 			for _, c := range blk {
 				if c == '\n' {
 					cntNewline++
@@ -582,10 +537,10 @@
 
 	// nextToken gets the next token delimited by a newline. This assumes that
 	// at least one newline exists in the buffer.
-	var nextToken = func() string {
+	nextToken := func() string {
 		cntNewline--
 		tok, _ := buf.ReadString('\n')
-		return tok[:len(tok)-1] // Cut off newline
+		return strings.TrimRight(tok, "\n")
 	}
 
 	// Parse for the number of entries.
@@ -604,80 +559,67 @@
 	if err := feedTokens(2 * numEntries); err != nil {
 		return nil, err
 	}
-	sp := make([]sparseEntry, 0, numEntries)
+	spd := make(sparseDatas, 0, numEntries)
 	for i := int64(0); i < numEntries; i++ {
-		offset, err := strconv.ParseInt(nextToken(), 10, 64)
-		if err != nil {
+		offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
+		length, err2 := strconv.ParseInt(nextToken(), 10, 64)
+		if err1 != nil || err2 != nil {
 			return nil, ErrHeader
 		}
-		numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+		spd = append(spd, SparseEntry{Offset: offset, Length: length})
 	}
-	return sp, nil
+	return spd, nil
 }
 
 // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
 // version 0.1. The sparse map is stored in the PAX headers.
-func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
+func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
 	// Get number of entries.
 	// Use integer overflow resistant math to check this.
-	numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
+	numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
 	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
 	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
 		return nil, ErrHeader
 	}
 
 	// There should be two numbers in sparseMap for each entry.
-	sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
+	sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
+	if len(sparseMap) == 1 && sparseMap[0] == "" {
+		sparseMap = sparseMap[:0]
+	}
 	if int64(len(sparseMap)) != 2*numEntries {
 		return nil, ErrHeader
 	}
 
 	// Loop through the entries in the sparse map.
 	// numEntries is trusted now.
-	sp := make([]sparseEntry, 0, numEntries)
-	for i := int64(0); i < numEntries; i++ {
-		offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
-		if err != nil {
+	spd := make(sparseDatas, 0, numEntries)
+	for len(sparseMap) >= 2 {
+		offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
+		length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
+		if err1 != nil || err2 != nil {
 			return nil, ErrHeader
 		}
-		numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
-		if err != nil {
-			return nil, ErrHeader
-		}
-		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+		spd = append(spd, SparseEntry{Offset: offset, Length: length})
+		sparseMap = sparseMap[2:]
 	}
-	return sp, nil
+	return spd, nil
 }
 
-// numBytes returns the number of bytes left to read in the current file's entry
-// in the tar archive, or 0 if there is no current file.
-func (tr *Reader) numBytes() int64 {
-	if tr.curr == nil {
-		// No current file, so no bytes
-		return 0
-	}
-	return tr.curr.numBytes()
-}
-
-// Read reads from the current entry in the tar archive.
-// It returns 0, io.EOF when it reaches the end of that entry,
-// until Next is called to advance to the next entry.
+// Read reads from the current file in the tar archive.
+// It returns (0, io.EOF) when it reaches the end of that file,
+// until Next is called to advance to the next file.
 //
-// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
-// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
+// If the current file is sparse, then the regions marked as a hole
+// are read back as NUL-bytes.
+//
+// Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
 // the Header.Size claims.
 func (tr *Reader) Read(b []byte) (int, error) {
 	if tr.err != nil {
 		return 0, tr.err
 	}
-	if tr.curr == nil {
-		return 0, io.EOF
-	}
-
 	n, err := tr.curr.Read(b)
 	if err != nil && err != io.EOF {
 		tr.err = err
@@ -685,116 +627,226 @@
 	return n, err
 }
 
-func (rfr *regFileReader) Read(b []byte) (n int, err error) {
-	if rfr.nb == 0 {
-		// file consumed
-		return 0, io.EOF
+// WriteTo writes the content of the current file to w.
+// The bytes written matches the number of remaining bytes in the current file.
+//
+// If the current file is sparse and w is an io.WriteSeeker,
+// then WriteTo uses Seek to skip past holes defined in Header.SparseHoles,
+// assuming that skipped regions are filled with NULs.
+// This always writes the last byte to ensure w is the right size.
+func (tr *Reader) WriteTo(w io.Writer) (int64, error) {
+	if tr.err != nil {
+		return 0, tr.err
 	}
-	if int64(len(b)) > rfr.nb {
-		b = b[0:rfr.nb]
-	}
-	n, err = rfr.r.Read(b)
-	rfr.nb -= int64(n)
-
-	if err == io.EOF && rfr.nb > 0 {
-		err = io.ErrUnexpectedEOF
-	}
-	return
-}
-
-// numBytes returns the number of bytes left to read in the file's data in the tar archive.
-func (rfr *regFileReader) numBytes() int64 {
-	return rfr.nb
-}
-
-// newSparseFileReader creates a new sparseFileReader, but validates all of the
-// sparse entries before doing so.
-func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
-	if total < 0 {
-		return nil, ErrHeader // Total size cannot be negative
-	}
-
-	// Validate all sparse entries. These are the same checks as performed by
-	// the BSD tar utility.
-	for i, s := range sp {
-		switch {
-		case s.offset < 0 || s.numBytes < 0:
-			return nil, ErrHeader // Negative values are never okay
-		case s.offset > math.MaxInt64-s.numBytes:
-			return nil, ErrHeader // Integer overflow with large length
-		case s.offset+s.numBytes > total:
-			return nil, ErrHeader // Region extends beyond the "real" size
-		case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
-			return nil, ErrHeader // Regions can't overlap and must be in order
-		}
-	}
-	return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
-}
-
-// readHole reads a sparse hole ending at endOffset.
-func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
-	n64 := endOffset - sfr.pos
-	if n64 > int64(len(b)) {
-		n64 = int64(len(b))
-	}
-	n := int(n64)
-	for i := 0; i < n; i++ {
-		b[i] = 0
-	}
-	sfr.pos += n64
-	return n
-}
-
-// Read reads the sparse file data in expanded form.
-func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
-	// Skip past all empty fragments.
-	for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
-		sfr.sp = sfr.sp[1:]
-	}
-
-	// If there are no more fragments, then it is possible that there
-	// is one last sparse hole.
-	if len(sfr.sp) == 0 {
-		// This behavior matches the BSD tar utility.
-		// However, GNU tar stops returning data even if sfr.total is unmet.
-		if sfr.pos < sfr.total {
-			return sfr.readHole(b, sfr.total), nil
-		}
-		return 0, io.EOF
-	}
-
-	// In front of a data fragment, so read a hole.
-	if sfr.pos < sfr.sp[0].offset {
-		return sfr.readHole(b, sfr.sp[0].offset), nil
-	}
-
-	// In a data fragment, so read from it.
-	// This math is overflow free since we verify that offset and numBytes can
-	// be safely added when creating the sparseFileReader.
-	endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
-	bytesLeft := endPos - sfr.pos                   // Bytes left in fragment
-	if int64(len(b)) > bytesLeft {
-		b = b[:bytesLeft]
-	}
-
-	n, err = sfr.rfr.Read(b)
-	sfr.pos += int64(n)
-	if err == io.EOF {
-		if sfr.pos < endPos {
-			err = io.ErrUnexpectedEOF // There was supposed to be more data
-		} else if sfr.pos < sfr.total {
-			err = nil // There is still an implicit sparse hole at the end
-		}
-	}
-
-	if sfr.pos == endPos {
-		sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
+	n, err := tr.curr.WriteTo(w)
+	if err != nil {
+		tr.err = err
 	}
 	return n, err
 }
 
-// numBytes returns the number of bytes left to read in the sparse file's
-// sparse-encoded data in the tar archive.
-func (sfr *sparseFileReader) numBytes() int64 {
-	return sfr.rfr.numBytes()
+// regFileReader is a fileReader for reading data from a regular file entry.
+type regFileReader struct {
+	r  io.Reader // Underlying Reader
+	nb int64     // Number of remaining bytes to read
+}
+
+func (fr *regFileReader) Read(b []byte) (n int, err error) {
+	if int64(len(b)) > fr.nb {
+		b = b[:fr.nb]
+	}
+	if len(b) > 0 {
+		n, err = fr.r.Read(b)
+		fr.nb -= int64(n)
+	}
+	switch {
+	case err == io.EOF && fr.nb > 0:
+		return n, io.ErrUnexpectedEOF
+	case err == nil && fr.nb == 0:
+		return n, io.EOF
+	default:
+		return n, err
+	}
+}
+
+func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
+	return io.Copy(w, struct{ io.Reader }{fr})
+}
+
+func (fr regFileReader) LogicalRemaining() int64 {
+	return fr.nb
+}
+
+func (fr regFileReader) PhysicalRemaining() int64 {
+	return fr.nb
+}
+
+// sparseFileReader is a fileReader for reading data from a sparse file entry.
+type sparseFileReader struct {
+	fr  fileReader  // Underlying fileReader
+	sp  sparseHoles // Normalized list of sparse holes
+	pos int64       // Current position in sparse file
+}
+
+func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
+	finished := int64(len(b)) >= sr.LogicalRemaining()
+	if finished {
+		b = b[:sr.LogicalRemaining()]
+	}
+
+	b0 := b
+	endPos := sr.pos + int64(len(b))
+	for endPos > sr.pos && err == nil {
+		var nf int // Bytes read in fragment
+		holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+		if sr.pos < holeStart { // In a data fragment
+			bf := b[:min(int64(len(b)), holeStart-sr.pos)]
+			nf, err = tryReadFull(sr.fr, bf)
+		} else { // In a hole fragment
+			bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
+			nf, err = tryReadFull(zeroReader{}, bf)
+		}
+		b = b[nf:]
+		sr.pos += int64(nf)
+		if sr.pos >= holeEnd && len(sr.sp) > 1 {
+			sr.sp = sr.sp[1:] // Ensure last fragment always remains
+		}
+	}
+
+	n = len(b0) - len(b)
+	switch {
+	case err == io.EOF:
+		return n, errMissData // Less data in dense file than sparse file
+	case err != nil:
+		return n, err
+	case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+		return n, errUnrefData // More data in dense file than sparse file
+	case finished:
+		return n, io.EOF
+	default:
+		return n, nil
+	}
+}
+
+func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
+	ws, ok := w.(io.WriteSeeker)
+	if ok {
+		if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
+			ok = false // Not all io.Seeker can really seek
+		}
+	}
+	if !ok {
+		return io.Copy(w, struct{ io.Reader }{sr})
+	}
+
+	var writeLastByte bool
+	pos0 := sr.pos
+	for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil {
+		var nf int64 // Size of fragment
+		holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
+		if sr.pos < holeStart { // In a data fragment
+			nf = holeStart - sr.pos
+			nf, err = io.CopyN(ws, sr.fr, nf)
+		} else { // In a hole fragment
+			nf = holeEnd - sr.pos
+			if sr.PhysicalRemaining() == 0 {
+				writeLastByte = true
+				nf--
+			}
+			_, err = ws.Seek(nf, io.SeekCurrent)
+		}
+		sr.pos += nf
+		if sr.pos >= holeEnd && len(sr.sp) > 1 {
+			sr.sp = sr.sp[1:] // Ensure last fragment always remains
+		}
+	}
+
+	// If the last fragment is a hole, then seek to 1-byte before EOF, and
+	// write a single byte to ensure the file is the right size.
+	if writeLastByte && err == nil {
+		_, err = ws.Write([]byte{0})
+		sr.pos++
+	}
+
+	n = sr.pos - pos0
+	switch {
+	case err == io.EOF:
+		return n, errMissData // Less data in dense file than sparse file
+	case err != nil:
+		return n, err
+	case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0:
+		return n, errUnrefData // More data in dense file than sparse file
+	default:
+		return n, nil
+	}
+}
+
+func (sr sparseFileReader) LogicalRemaining() int64 {
+	return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
+}
+func (sr sparseFileReader) PhysicalRemaining() int64 {
+	return sr.fr.PhysicalRemaining()
+}
+
+type zeroReader struct{}
+
+func (zeroReader) Read(b []byte) (int, error) {
+	for i := range b {
+		b[i] = 0
+	}
+	return len(b), nil
+}
+
+// mustReadFull is like io.ReadFull except it returns
+// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
+func mustReadFull(r io.Reader, b []byte) (int, error) {
+	n, err := tryReadFull(r, b)
+	if err == io.EOF {
+		err = io.ErrUnexpectedEOF
+	}
+	return n, err
+}
+
+// tryReadFull is like io.ReadFull except it returns
+// io.EOF when it is hit before len(b) bytes are read.
+func tryReadFull(r io.Reader, b []byte) (n int, err error) {
+	for len(b) > n && err == nil {
+		var nn int
+		nn, err = r.Read(b[n:])
+		n += nn
+	}
+	if len(b) == n && err == io.EOF {
+		err = nil
+	}
+	return n, err
+}
+
+// discard skips n bytes in r, reporting an error if unable to do so.
+func discard(r io.Reader, n int64) error {
+	// If possible, Seek to the last byte before the end of the data section.
+	// Do this because Seek is often lazy about reporting errors; this will mask
+	// the fact that the stream may be truncated. We can rely on the
+	// io.CopyN done shortly afterwards to trigger any IO errors.
+	var seekSkipped int64 // Number of bytes skipped via Seek
+	if sr, ok := r.(io.Seeker); ok && n > 1 {
+		// Not all io.Seeker can actually Seek. For example, os.Stdin implements
+		// io.Seeker, but calling Seek always returns an error and performs
+		// no action. Thus, we try an innocent seek to the current position
+		// to see if Seek is really supported.
+		pos1, err := sr.Seek(0, io.SeekCurrent)
+		if pos1 >= 0 && err == nil {
+			// Seek seems supported, so perform the real Seek.
+			pos2, err := sr.Seek(n-1, io.SeekCurrent)
+			if pos2 < 0 || err != nil {
+				return err
+			}
+			seekSkipped = pos2 - pos1
+		}
+	}
+
+	copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped)
+	if err == io.EOF && seekSkipped+copySkipped < n {
+		err = io.ErrUnexpectedEOF
+	}
+	return err
 }
diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_unix.go b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go
new file mode 100644
index 0000000..c623c1e
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/sparse_unix.go
@@ -0,0 +1,77 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+	"io"
+	"os"
+	"runtime"
+	"syscall"
+)
+
+func init() {
+	sysSparseDetect = sparseDetectUnix
+}
+
+func sparseDetectUnix(f *os.File) (sph sparseHoles, err error) {
+	// SEEK_DATA and SEEK_HOLE originated from Solaris and support for it
+	// has been added to most of the other major Unix systems.
+	var seekData, seekHole = 3, 4 // SEEK_DATA/SEEK_HOLE from unistd.h
+
+	if runtime.GOOS == "darwin" {
+		// Darwin has the constants swapped, compared to all other UNIX.
+		seekData, seekHole = 4, 3
+	}
+
+	// Check for seekData/seekHole support.
+	// Different OS and FS may differ in the exact errno that is returned when
+	// there is no support. Rather than special-casing every possible errno
+	// representing "not supported", just assume that a non-nil error means
+	// that seekData/seekHole is not supported.
+	if _, err := f.Seek(0, seekHole); err != nil {
+		return nil, nil
+	}
+
+	// Populate the SparseHoles.
+	var last, pos int64 = -1, 0
+	for {
+		// Get the location of the next hole section.
+		if pos, err = fseek(f, pos, seekHole); pos == last || err != nil {
+			return sph, err
+		}
+		offset := pos
+		last = pos
+
+		// Get the location of the next data section.
+		if pos, err = fseek(f, pos, seekData); pos == last || err != nil {
+			return sph, err
+		}
+		length := pos - offset
+		last = pos
+
+		if length > 0 {
+			sph = append(sph, SparseEntry{offset, length})
+		}
+	}
+}
+
+func fseek(f *os.File, pos int64, whence int) (int64, error) {
+	pos, err := f.Seek(pos, whence)
+	if errno(err) == syscall.ENXIO {
+		// SEEK_DATA returns ENXIO when past the last data fragment,
+		// which makes determining the size of the last hole difficult.
+		pos, err = f.Seek(0, io.SeekEnd)
+	}
+	return pos, err
+}
+
+func errno(err error) error {
+	if perr, ok := err.(*os.PathError); ok {
+		return perr.Err
+	}
+	return err
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/sparse_windows.go b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go
new file mode 100644
index 0000000..05bf1a9
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/sparse_windows.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package tar
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+var errInvalidFunc = syscall.Errno(1) // ERROR_INVALID_FUNCTION from WinError.h
+
+func init() {
+	sysSparseDetect = sparseDetectWindows
+	sysSparsePunch = sparsePunchWindows
+}
+
+func sparseDetectWindows(f *os.File) (sph sparseHoles, err error) {
+	const queryAllocRanges = 0x000940CF                  // FSCTL_QUERY_ALLOCATED_RANGES from WinIoCtl.h
+	type allocRangeBuffer struct{ offset, length int64 } // FILE_ALLOCATED_RANGE_BUFFER from WinIoCtl.h
+
+	s, err := f.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	queryRange := allocRangeBuffer{0, s.Size()}
+	allocRanges := make([]allocRangeBuffer, 64)
+
+	// Repeatedly query for ranges until the input buffer is large enough.
+	var bytesReturned uint32
+	for {
+		err := syscall.DeviceIoControl(
+			syscall.Handle(f.Fd()), queryAllocRanges,
+			(*byte)(unsafe.Pointer(&queryRange)), uint32(unsafe.Sizeof(queryRange)),
+			(*byte)(unsafe.Pointer(&allocRanges[0])), uint32(len(allocRanges)*int(unsafe.Sizeof(allocRanges[0]))),
+			&bytesReturned, nil,
+		)
+		if err == syscall.ERROR_MORE_DATA {
+			allocRanges = make([]allocRangeBuffer, 2*len(allocRanges))
+			continue
+		}
+		if err == errInvalidFunc {
+			return nil, nil // Sparse file not supported on this FS
+		}
+		if err != nil {
+			return nil, err
+		}
+		break
+	}
+	n := bytesReturned / uint32(unsafe.Sizeof(allocRanges[0]))
+	allocRanges = append(allocRanges[:n], allocRangeBuffer{s.Size(), 0})
+
+	// Invert the data fragments into hole fragments.
+	var pos int64
+	for _, r := range allocRanges {
+		if r.offset > pos {
+			sph = append(sph, SparseEntry{pos, r.offset - pos})
+		}
+		pos = r.offset + r.length
+	}
+	return sph, nil
+}
+
+func sparsePunchWindows(f *os.File, sph sparseHoles) error {
+	const setSparse = 0x000900C4                 // FSCTL_SET_SPARSE from WinIoCtl.h
+	const setZeroData = 0x000980C8               // FSCTL_SET_ZERO_DATA from WinIoCtl.h
+	type zeroDataInfo struct{ start, end int64 } // FILE_ZERO_DATA_INFORMATION from WinIoCtl.h
+
+	// Set the file as being sparse.
+	var bytesReturned uint32
+	devErr := syscall.DeviceIoControl(
+		syscall.Handle(f.Fd()), setSparse,
+		nil, 0, nil, 0,
+		&bytesReturned, nil,
+	)
+	if devErr != nil && devErr != errInvalidFunc {
+		return devErr
+	}
+
+	// Set the file to the right size.
+	var size int64
+	if len(sph) > 0 {
+		size = sph[len(sph)-1].endOffset()
+	}
+	if err := f.Truncate(size); err != nil {
+		return err
+	}
+	if devErr == errInvalidFunc {
+		// Sparse file not supported on this FS.
+		// Call sparsePunchManual since SetEndOfFile does not guarantee that
+		// the extended space is filled with zeros.
+		return sparsePunchManual(f, sph)
+	}
+
+	// Punch holes for all relevant fragments.
+	for _, s := range sph {
+		zdi := zeroDataInfo{s.Offset, s.endOffset()}
+		err := syscall.DeviceIoControl(
+			syscall.Handle(f.Fd()), setZeroData,
+			(*byte)(unsafe.Pointer(&zdi)), uint32(unsafe.Sizeof(zdi)),
+			nil, 0,
+			&bytesReturned, nil,
+		)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// sparsePunchManual writes zeros into each hole.
+func sparsePunchManual(f *os.File, sph sparseHoles) error {
+	const chunkSize = 32 << 10
+	zbuf := make([]byte, chunkSize)
+	for _, s := range sph {
+		for pos := s.Offset; pos < s.endOffset(); pos += chunkSize {
+			n := min(chunkSize, s.endOffset()-pos)
+			if _, err := f.WriteAt(zbuf[:n], pos); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atim.go b/vendor/github.com/dmcgowan/go-tar/stat_actime1.go
similarity index 100%
rename from vendor/github.com/dmcgowan/go-tar/stat_atim.go
rename to vendor/github.com/dmcgowan/go-tar/stat_actime1.go
diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go b/vendor/github.com/dmcgowan/go-tar/stat_actime2.go
similarity index 100%
rename from vendor/github.com/dmcgowan/go-tar/stat_atimespec.go
rename to vendor/github.com/dmcgowan/go-tar/stat_actime2.go
diff --git a/vendor/github.com/dmcgowan/go-tar/stat_unix.go b/vendor/github.com/dmcgowan/go-tar/stat_unix.go
index cb843db..868105f 100644
--- a/vendor/github.com/dmcgowan/go-tar/stat_unix.go
+++ b/vendor/github.com/dmcgowan/go-tar/stat_unix.go
@@ -8,6 +8,10 @@
 
 import (
 	"os"
+	"os/user"
+	"runtime"
+	"strconv"
+	"sync"
 	"syscall"
 )
 
@@ -15,6 +19,10 @@
 	sysStat = statUnix
 }
 
+// userMap and groupMap caches UID and GID lookups for performance reasons.
+// The downside is that renaming uname or gname by the OS never takes effect.
+var userMap, groupMap sync.Map // map[int]string
+
 func statUnix(fi os.FileInfo, h *Header) error {
 	sys, ok := fi.Sys().(*syscall.Stat_t)
 	if !ok {
@@ -22,11 +30,67 @@
 	}
 	h.Uid = int(sys.Uid)
 	h.Gid = int(sys.Gid)
-	// TODO(bradfitz): populate username & group.  os/user
-	// doesn't cache LookupId lookups, and lacks group
-	// lookup functions.
+
+	// Best effort at populating Uname and Gname.
+	// The os/user functions may fail for any number of reasons
+	// (not implemented on that platform, cgo not enabled, etc).
+	if u, ok := userMap.Load(h.Uid); ok {
+		h.Uname = u.(string)
+	} else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil {
+		h.Uname = u.Username
+		userMap.Store(h.Uid, h.Uname)
+	}
+	if g, ok := groupMap.Load(h.Gid); ok {
+		h.Gname = g.(string)
+	} else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil {
+		h.Gname = g.Name
+		groupMap.Store(h.Gid, h.Gname)
+	}
+
 	h.AccessTime = statAtime(sys)
 	h.ChangeTime = statCtime(sys)
-	// TODO(bradfitz): major/minor device numbers?
+
+	// Best effort at populating Devmajor and Devminor.
+	if h.Typeflag == TypeChar || h.Typeflag == TypeBlock {
+		dev := uint64(sys.Rdev) // May be int32 or uint32
+		switch runtime.GOOS {
+		case "linux":
+			// Copied from golang.org/x/sys/unix/dev_linux.go.
+			major := uint32((dev & 0x00000000000fff00) >> 8)
+			major |= uint32((dev & 0xfffff00000000000) >> 32)
+			minor := uint32((dev & 0x00000000000000ff) >> 0)
+			minor |= uint32((dev & 0x00000ffffff00000) >> 12)
+			h.Devmajor, h.Devminor = int64(major), int64(minor)
+		case "darwin":
+			// Copied from golang.org/x/sys/unix/dev_darwin.go.
+			major := uint32((dev >> 24) & 0xff)
+			minor := uint32(dev & 0xffffff)
+			h.Devmajor, h.Devminor = int64(major), int64(minor)
+		case "dragonfly":
+			// Copied from golang.org/x/sys/unix/dev_dragonfly.go.
+			major := uint32((dev >> 8) & 0xff)
+			minor := uint32(dev & 0xffff00ff)
+			h.Devmajor, h.Devminor = int64(major), int64(minor)
+		case "freebsd":
+			// Copied from golang.org/x/sys/unix/dev_freebsd.go.
+			major := uint32((dev >> 8) & 0xff)
+			minor := uint32(dev & 0xffff00ff)
+			h.Devmajor, h.Devminor = int64(major), int64(minor)
+		case "netbsd":
+			// Copied from golang.org/x/sys/unix/dev_netbsd.go.
+			major := uint32((dev & 0x000fff00) >> 8)
+			minor := uint32((dev & 0x000000ff) >> 0)
+			minor |= uint32((dev & 0xfff00000) >> 12)
+			h.Devmajor, h.Devminor = int64(major), int64(minor)
+		case "openbsd":
+			// Copied from golang.org/x/sys/unix/dev_openbsd.go.
+			major := uint32((dev & 0x0000ff00) >> 8)
+			minor := uint32((dev & 0x000000ff) >> 0)
+			minor |= uint32((dev & 0xffff0000) >> 8)
+			h.Devmajor, h.Devminor = int64(major), int64(minor)
+		default:
+			// TODO: Implement solaris (see https://golang.org/issue/8106)
+		}
+	}
 	return nil
 }
diff --git a/vendor/github.com/dmcgowan/go-tar/strconv.go b/vendor/github.com/dmcgowan/go-tar/strconv.go
index bb5b51c..8bbd65c 100644
--- a/vendor/github.com/dmcgowan/go-tar/strconv.go
+++ b/vendor/github.com/dmcgowan/go-tar/strconv.go
@@ -12,26 +12,34 @@
 	"time"
 )
 
+// hasNUL reports whether the NUL character exists within s.
+func hasNUL(s string) bool {
+	return strings.IndexByte(s, 0) >= 0
+}
+
+// isASCII reports whether the input is an ASCII C-style string.
 func isASCII(s string) bool {
 	for _, c := range s {
-		if c >= 0x80 {
+		if c >= 0x80 || c == 0x00 {
 			return false
 		}
 	}
 	return true
 }
 
+// toASCII converts the input to an ASCII C-style string.
+// This a best effort conversion, so invalid characters are dropped.
 func toASCII(s string) string {
 	if isASCII(s) {
 		return s
 	}
-	var buf bytes.Buffer
+	b := make([]byte, 0, len(s))
 	for _, c := range s {
-		if c < 0x80 {
-			buf.WriteByte(byte(c))
+		if c < 0x80 && c != 0x00 {
+			b = append(b, byte(c))
 		}
 	}
-	return buf.String()
+	return string(b)
 }
 
 type parser struct {
@@ -45,23 +53,28 @@
 // parseString parses bytes as a NUL-terminated C-style string.
 // If a NUL byte is not found then the whole slice is returned as a string.
 func (*parser) parseString(b []byte) string {
-	n := 0
-	for n < len(b) && b[n] != 0 {
-		n++
+	if i := bytes.IndexByte(b, 0); i >= 0 {
+		return string(b[:i])
 	}
-	return string(b[0:n])
+	return string(b)
 }
 
-// Write s into b, terminating it with a NUL if there is room.
+// formatString copies s into b, NUL-terminating if possible.
 func (f *formatter) formatString(b []byte, s string) {
 	if len(s) > len(b) {
 		f.err = ErrFieldTooLong
-		return
 	}
-	ascii := toASCII(s)
-	copy(b, ascii)
-	if len(ascii) < len(b) {
-		b[len(ascii)] = 0
+	copy(b, s)
+	if len(s) < len(b) {
+		b[len(s)] = 0
+	}
+
+	// Some buggy readers treat regular files with a trailing slash
+	// in the V7 path field as a directory even though the full path
+	// recorded elsewhere (e.g., via PAX record) contains no trailing slash.
+	if len(s) > len(b) && b[len(b)-1] == '/' {
+		n := len(strings.TrimRight(s[:len(b)], "/"))
+		b[n] = 0 // Replace trailing slash with NUL terminator
 	}
 }
 
@@ -73,7 +86,7 @@
 // that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
 // equivalent to the sign bit in two's complement form.
 func fitsInBase256(n int, x int64) bool {
-	var binBits = uint(n-1) * 8
+	binBits := uint(n-1) * 8
 	return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
 }
 
@@ -121,8 +134,14 @@
 	return p.parseOctal(b)
 }
 
-// Write x into b, as binary (GNUtar/star extension).
+// formatNumeric encodes x into b using base-8 (octal) encoding if possible.
+// Otherwise it will attempt to use base-256 (binary) encoding.
 func (f *formatter) formatNumeric(b []byte, x int64) {
+	if fitsInOctal(len(b), x) {
+		f.formatOctal(b, x)
+		return
+	}
+
 	if fitsInBase256(len(b), x) {
 		for i := len(b) - 1; i >= 0; i-- {
 			b[i] = byte(x)
@@ -155,6 +174,11 @@
 }
 
 func (f *formatter) formatOctal(b []byte, x int64) {
+	if !fitsInOctal(len(b), x) {
+		x = 0 // Last resort, just write zero
+		f.err = ErrFieldTooLong
+	}
+
 	s := strconv.FormatInt(x, 8)
 	// Add leading zeros, but leave room for a NUL.
 	if n := len(b) - len(s) - 1; n > 0 {
@@ -163,6 +187,13 @@
 	f.formatString(b, s)
 }
 
+// fitsInOctal reports whether the integer x fits in a field n-bytes long
+// using octal encoding with the appropriate NUL terminator.
+func fitsInOctal(n int, x int64) bool {
+	octBits := uint(n-1) * 3
+	return x >= 0 && (n >= 22 || x < 1<<octBits)
+}
+
 // parsePAXTime takes a string of the form %d.%d as described in the PAX
 // specification. Note that this implementation allows for negative timestamps,
 // which is allowed for by the PAX specification, but not always portable.
@@ -200,14 +231,27 @@
 	return time.Unix(secs, int64(nsecs)), nil
 }
 
-// TODO(dsnet): Implement formatPAXTime.
+// formatPAXTime converts ts into a time of the form %d.%d as described in the
+// PAX specification. This function is capable of negative timestamps.
+func formatPAXTime(ts time.Time) (s string) {
+	secs, nsecs := ts.Unix(), ts.Nanosecond()
+	if nsecs == 0 {
+		return strconv.FormatInt(secs, 10)
+	}
+
+	// If seconds is negative, then perform correction.
+	sign := ""
+	if secs < 0 {
+		sign = "-"             // Remember sign
+		secs = -(secs + 1)     // Add a second to secs
+		nsecs = -(nsecs - 1E9) // Take that second away from nsecs
+	}
+	return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
+}
 
 // parsePAXRecord parses the input PAX record string into a key-value pair.
 // If parsing is successful, it will slice off the currently read record and
 // return the remainder as r.
-//
-// A PAX record is of the following form:
-//	"%d %s=%s\n" % (size, key, value)
 func parsePAXRecord(s string) (k, v, r string, err error) {
 	// The size field ends at the first space.
 	sp := strings.IndexByte(s, ' ')
@@ -232,21 +276,51 @@
 	if eq == -1 {
 		return "", "", s, ErrHeader
 	}
-	return rec[:eq], rec[eq+1:], rem, nil
+	k, v = rec[:eq], rec[eq+1:]
+
+	if !validPAXRecord(k, v) {
+		return "", "", s, ErrHeader
+	}
+	return k, v, rem, nil
 }
 
 // formatPAXRecord formats a single PAX record, prefixing it with the
 // appropriate length.
-func formatPAXRecord(k, v string) string {
+func formatPAXRecord(k, v string) (string, error) {
+	if !validPAXRecord(k, v) {
+		return "", ErrHeader
+	}
+
 	const padding = 3 // Extra padding for ' ', '=', and '\n'
 	size := len(k) + len(v) + padding
 	size += len(strconv.Itoa(size))
-	record := fmt.Sprintf("%d %s=%s\n", size, k, v)
+	record := strconv.Itoa(size) + " " + k + "=" + v + "\n"
 
 	// Final adjustment if adding size field increased the record size.
 	if len(record) != size {
 		size = len(record)
-		record = fmt.Sprintf("%d %s=%s\n", size, k, v)
+		record = strconv.Itoa(size) + " " + k + "=" + v + "\n"
 	}
-	return record
+	return record, nil
+}
+
+// validPAXRecord reports whether the key-value pair is valid where each
+// record is formatted as:
+//	"%d %s=%s\n" % (size, key, value)
+//
+// Keys and values should be UTF-8, but the number of bad writers out there
+// forces us to be a more liberal.
+// Thus, we only reject all keys with NUL, and only reject NULs in values
+// for the PAX version of the USTAR string fields.
+// The key must not contain an '=' character.
+func validPAXRecord(k, v string) bool {
+	if k == "" || strings.IndexByte(k, '=') >= 0 {
+		return false
+	}
+	switch k {
+	case paxPath, paxLinkpath, paxUname, paxGname:
+		return !hasNUL(v)
+	default:
+		return !hasNUL(k)
+	}
 }
diff --git a/vendor/github.com/dmcgowan/go-tar/writer.go b/vendor/github.com/dmcgowan/go-tar/writer.go
index 596fb8b..2eed619 100644
--- a/vendor/github.com/dmcgowan/go-tar/writer.go
+++ b/vendor/github.com/dmcgowan/go-tar/writer.go
@@ -4,12 +4,8 @@
 
 package tar
 
-// TODO(dsymonds):
-// - catch more errors (no first header, etc.)
-
 import (
 	"bytes"
-	"errors"
 	"fmt"
 	"io"
 	"path"
@@ -19,234 +15,365 @@
 	"time"
 )
 
-var (
-	ErrWriteTooLong    = errors.New("archive/tar: write too long")
-	ErrFieldTooLong    = errors.New("archive/tar: header field too long")
-	ErrWriteAfterClose = errors.New("archive/tar: write after close")
-	errInvalidHeader   = errors.New("archive/tar: header field too long or contains invalid values")
-)
-
-// A Writer provides sequential writing of a tar archive in POSIX.1 format.
-// A tar archive consists of a sequence of files.
-// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
-// writing at most hdr.Size bytes in total.
+// Writer provides sequential writing of a tar archive.
+// Write.WriteHeader begins a new file with the provided Header,
+// and then Writer can be treated as an io.Writer to supply that file's data.
 type Writer struct {
-	w          io.Writer
-	err        error
-	nb         int64 // number of unwritten bytes for current file entry
-	pad        int64 // amount of padding to write after current file entry
-	closed     bool
-	usedBinary bool  // whether the binary numeric field extension was used
-	preferPax  bool  // use PAX header instead of binary numeric header
-	hdrBuff    block // buffer to use in writeHeader when writing a regular header
-	paxHdrBuff block // buffer to use in writeHeader when writing a PAX header
+	w    io.Writer
+	pad  int64      // Amount of padding to write after current file entry
+	curr fileWriter // Writer for current file entry
+	hdr  Header     // Shallow copy of Header that is safe for mutations
+	blk  block      // Buffer to use as temporary local storage
+
+	// err is a persistent error.
+	// It is only the responsibility of every exported method of Writer to
+	// ensure that this error is sticky.
+	err error
 }
 
 // NewWriter creates a new Writer writing to w.
-func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{w: w, curr: &regFileWriter{w, 0}}
+}
 
-// Flush finishes writing the current file (optional).
+type fileWriter interface {
+	io.Writer
+	fileState
+
+	ReadFrom(io.Reader) (int64, error)
+}
+
+// Flush finishes writing the current file's block padding.
+// The current file must be fully written before Flush can be called.
+//
+// Deprecated: This is unnecessary as the next call to WriteHeader or Close
+// will implicitly flush out the file's padding.
 func (tw *Writer) Flush() error {
-	if tw.nb > 0 {
-		tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
-		return tw.err
-	}
-
-	n := tw.nb + tw.pad
-	for n > 0 && tw.err == nil {
-		nr := n
-		if nr > blockSize {
-			nr = blockSize
-		}
-		var nw int
-		nw, tw.err = tw.w.Write(zeroBlock[0:nr])
-		n -= int64(nw)
-	}
-	tw.nb = 0
-	tw.pad = 0
-	return tw.err
-}
-
-var (
-	minTime = time.Unix(0, 0)
-	// There is room for 11 octal digits (33 bits) of mtime.
-	maxTime = minTime.Add((1<<33 - 1) * time.Second)
-)
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-func (tw *Writer) WriteHeader(hdr *Header) error {
-	return tw.writeHeader(hdr, true)
-}
-
-// WriteHeader writes hdr and prepares to accept the file's contents.
-// WriteHeader calls Flush if it is not the first header.
-// Calling after a Close will return ErrWriteAfterClose.
-// As this method is called internally by writePax header to allow it to
-// suppress writing the pax header.
-func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
-	if tw.closed {
-		return ErrWriteAfterClose
-	}
-	if tw.err == nil {
-		tw.Flush()
-	}
 	if tw.err != nil {
 		return tw.err
 	}
-
-	// a map to hold pax header records, if any are needed
-	paxHeaders := make(map[string]string)
-
-	// TODO(dsnet): we might want to use PAX headers for
-	// subsecond time resolution, but for now let's just capture
-	// too long fields or non ascii characters
-
-	// We need to select which scratch buffer to use carefully,
-	// since this method is called recursively to write PAX headers.
-	// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
-	// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
-	// already being used by the non-recursive call, so we must use paxHdrBuff.
-	header := &tw.hdrBuff
-	if !allowPax {
-		header = &tw.paxHdrBuff
+	if nb := tw.curr.LogicalRemaining(); nb > 0 {
+		return fmt.Errorf("tar: missed writing %d bytes", nb)
 	}
-	copy(header[:], zeroBlock[:])
-
-	// Wrappers around formatter that automatically sets paxHeaders if the
-	// argument extends beyond the capacity of the input byte slice.
-	var f formatter
-	var formatString = func(b []byte, s string, paxKeyword string) {
-		needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
-		if needsPaxHeader {
-			paxHeaders[paxKeyword] = s
-			return
-		}
-		f.formatString(b, s)
-	}
-	var formatNumeric = func(b []byte, x int64, paxKeyword string) {
-		// Try octal first.
-		s := strconv.FormatInt(x, 8)
-		if len(s) < len(b) {
-			f.formatOctal(b, x)
-			return
-		}
-
-		// If it is too long for octal, and PAX is preferred, use a PAX header.
-		if paxKeyword != paxNone && tw.preferPax {
-			f.formatOctal(b, 0)
-			s := strconv.FormatInt(x, 10)
-			paxHeaders[paxKeyword] = s
-			return
-		}
-
-		tw.usedBinary = true
-		f.formatNumeric(b, x)
-	}
-
-	// Handle out of range ModTime carefully.
-	var modTime int64
-	if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
-		modTime = hdr.ModTime.Unix()
-	}
-
-	v7 := header.V7()
-	formatString(v7.Name(), hdr.Name, paxPath)
-	// TODO(dsnet): The GNU format permits the mode field to be encoded in
-	// base-256 format. Thus, we can use formatNumeric instead of formatOctal.
-	f.formatOctal(v7.Mode(), hdr.Mode)
-	formatNumeric(v7.UID(), int64(hdr.Uid), paxUid)
-	formatNumeric(v7.GID(), int64(hdr.Gid), paxGid)
-	formatNumeric(v7.Size(), hdr.Size, paxSize)
-	// TODO(dsnet): Consider using PAX for finer time granularity.
-	formatNumeric(v7.ModTime(), modTime, paxNone)
-	v7.TypeFlag()[0] = hdr.Typeflag
-	formatString(v7.LinkName(), hdr.Linkname, paxLinkpath)
-
-	ustar := header.USTAR()
-	formatString(ustar.UserName(), hdr.Uname, paxUname)
-	formatString(ustar.GroupName(), hdr.Gname, paxGname)
-	formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone)
-	formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone)
-
-	// TODO(dsnet): The logic surrounding the prefix field is broken when trying
-	// to encode the header as GNU format. The challenge with the current logic
-	// is that we are unsure what format we are using at any given moment until
-	// we have processed *all* of the fields. The problem is that by the time
-	// all fields have been processed, some work has already been done to handle
-	// each field under the assumption that it is for one given format or
-	// another. In some situations, this causes the Writer to be confused and
-	// encode a prefix field when the format being used is GNU. Thus, producing
-	// an invalid tar file.
-	//
-	// As a short-term fix, we disable the logic to use the prefix field, which
-	// will force the badly generated GNU files to become encoded as being
-	// the PAX format.
-	//
-	// As an alternative fix, we could hard-code preferPax to be true. However,
-	// this is problematic for the following reasons:
-	//	* The preferPax functionality is not tested at all.
-	//	* This can result in headers that try to use both the GNU and PAX
-	//	features at the same time, which is also wrong.
-	//
-	// The proper fix for this is to use a two-pass method:
-	//	* The first pass simply determines what set of formats can possibly
-	//	encode the given header.
-	//	* The second pass actually encodes the header as that given format
-	//	without worrying about violating the format.
-	//
-	// See the following:
-	//	https://golang.org/issue/12594
-	//	https://golang.org/issue/17630
-	//	https://golang.org/issue/9683
-	const usePrefix = false
-
-	// try to use a ustar header when only the name is too long
-	_, paxPathUsed := paxHeaders[paxPath]
-	if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
-		prefix, suffix, ok := splitUSTARPath(hdr.Name)
-		if ok {
-			// Since we can encode in USTAR format, disable PAX header.
-			delete(paxHeaders, paxPath)
-
-			// Update the path fields
-			formatString(v7.Name(), suffix, paxNone)
-			formatString(ustar.Prefix(), prefix, paxNone)
-		}
-	}
-
-	if tw.usedBinary {
-		header.SetFormat(formatGNU)
-	} else {
-		header.SetFormat(formatUSTAR)
-	}
-
-	// Check if there were any formatting errors.
-	if f.err != nil {
-		tw.err = f.err
+	if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil {
 		return tw.err
 	}
+	tw.pad = 0
+	return nil
+}
 
-	if allowPax {
-		for k, v := range hdr.Xattrs {
-			paxHeaders[paxXattr+k] = v
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// The Header.Size determines how many bytes can be written for the next file.
+// If the current file is not fully written, then this returns an error.
+// This implicitly flushes any padding necessary before writing the header.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+	if err := tw.Flush(); err != nil {
+		return err
+	}
+	tw.hdr = *hdr // Shallow copy of Header
+
+	// Round ModTime and ignore AccessTime and ChangeTime unless
+	// the format is explicitly chosen.
+	// This ensures nominal usage of WriteHeader (without specifying the format)
+	// does not always result in the PAX format being chosen, which
+	// causes a 1KiB increase to every header.
+	if tw.hdr.Format == FormatUnknown {
+		tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second)
+		tw.hdr.AccessTime = time.Time{}
+		tw.hdr.ChangeTime = time.Time{}
+	}
+
+	allowedFormats, paxHdrs, err := tw.hdr.allowedFormats()
+	switch {
+	case allowedFormats.has(FormatUSTAR):
+		tw.err = tw.writeUSTARHeader(&tw.hdr)
+		return tw.err
+	case allowedFormats.has(FormatPAX):
+		tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs)
+		return tw.err
+	case allowedFormats.has(FormatGNU):
+		tw.err = tw.writeGNUHeader(&tw.hdr)
+		return tw.err
+	default:
+		return err // Non-fatal error
+	}
+}
+
+func (tw *Writer) writeUSTARHeader(hdr *Header) error {
+	// Check if we can use USTAR prefix/suffix splitting.
+	var namePrefix string
+	if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok {
+		namePrefix, hdr.Name = prefix, suffix
+	}
+
+	// Pack the main header.
+	var f formatter
+	blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal)
+	f.formatString(blk.USTAR().Prefix(), namePrefix)
+	blk.SetFormat(FormatUSTAR)
+	if f.err != nil {
+		return f.err // Should never happen since header is validated
+	}
+	return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag)
+}
+
+func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error {
+	realName, realSize := hdr.Name, hdr.Size
+
+	// Handle sparse files.
+	var spd sparseDatas
+	var spb []byte
+	if len(hdr.SparseHoles) > 0 {
+		sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map
+		sph = alignSparseEntries(sph, hdr.Size)
+		spd = invertSparseEntries(sph, hdr.Size)
+
+		// Format the sparse map.
+		hdr.Size = 0 // Replace with encoded size
+		spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n')
+		for _, s := range spd {
+			hdr.Size += s.Length
+			spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n')
+			spb = append(strconv.AppendInt(spb, s.Length, 10), '\n')
+		}
+		pad := blockPadding(int64(len(spb)))
+		spb = append(spb, zeroBlock[:pad]...)
+		hdr.Size += int64(len(spb)) // Accounts for encoded sparse map
+
+		// Add and modify appropriate PAX records.
+		dir, file := path.Split(realName)
+		hdr.Name = path.Join(dir, "GNUSparseFile.0", file)
+		paxHdrs[paxGNUSparseMajor] = "1"
+		paxHdrs[paxGNUSparseMinor] = "0"
+		paxHdrs[paxGNUSparseName] = realName
+		paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10)
+		paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10)
+		delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName
+	}
+
+	// Write PAX records to the output.
+	isGlobal := hdr.Typeflag == TypeXGlobalHeader
+	if len(paxHdrs) > 0 || isGlobal {
+		// Sort keys for deterministic ordering.
+		var keys []string
+		for k := range paxHdrs {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+
+		// Write each record to a buffer.
+		var buf bytes.Buffer
+		for _, k := range keys {
+			rec, err := formatPAXRecord(k, paxHdrs[k])
+			if err != nil {
+				return err
+			}
+			buf.WriteString(rec)
+		}
+
+		// Write the extended header file.
+		var name string
+		var flag byte
+		if isGlobal {
+			name = "GlobalHead.0.0"
+			flag = TypeXGlobalHeader
+		} else {
+			dir, file := path.Split(realName)
+			name = path.Join(dir, "PaxHeaders.0", file)
+			flag = TypeXHeader
+		}
+		data := buf.String()
+		if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal {
+			return err // Global headers return here
 		}
 	}
 
-	if len(paxHeaders) > 0 {
-		if !allowPax {
-			return errInvalidHeader
+	// Pack the main header.
+	var f formatter // Ignore errors since they are expected
+	fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) }
+	blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal)
+	blk.SetFormat(FormatPAX)
+	if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+		return err
+	}
+
+	// Write the sparse map and setup the sparse writer if necessary.
+	if len(spd) > 0 {
+		// Use tw.curr since the sparse map is accounted for in hdr.Size.
+		if _, err := tw.curr.Write(spb); err != nil {
+			return err
 		}
-		if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+		tw.curr = &sparseFileWriter{tw.curr, spd, 0}
+	}
+	return nil
+}
+
+func (tw *Writer) writeGNUHeader(hdr *Header) error {
+	// Use long-link files if Name or Linkname exceeds the field size.
+	const longName = "././@LongLink"
+	if len(hdr.Name) > nameSize {
+		data := hdr.Name + "\x00"
+		if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil {
 			return err
 		}
 	}
-	tw.nb = hdr.Size
-	tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+	if len(hdr.Linkname) > nameSize {
+		data := hdr.Linkname + "\x00"
+		if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil {
+			return err
+		}
+	}
 
-	_, tw.err = tw.w.Write(header[:])
-	return tw.err
+	// Pack the main header.
+	var f formatter // Ignore errors since they are expected
+	var spd sparseDatas
+	var spb []byte
+	blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric)
+	if !hdr.AccessTime.IsZero() {
+		f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix())
+	}
+	if !hdr.ChangeTime.IsZero() {
+		f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix())
+	}
+	if hdr.Typeflag == TypeGNUSparse {
+		sph := append([]SparseEntry{}, hdr.SparseHoles...) // Copy sparse map
+		sph = alignSparseEntries(sph, hdr.Size)
+		spd = invertSparseEntries(sph, hdr.Size)
+
+		// Format the sparse map.
+		formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas {
+			for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ {
+				f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset)
+				f.formatNumeric(sa.Entry(i).Length(), sp[0].Length)
+				sp = sp[1:]
+			}
+			if len(sp) > 0 {
+				sa.IsExtended()[0] = 1
+			}
+			return sp
+		}
+		sp2 := formatSPD(spd, blk.GNU().Sparse())
+		for len(sp2) > 0 {
+			var spHdr block
+			sp2 = formatSPD(sp2, spHdr.Sparse())
+			spb = append(spb, spHdr[:]...)
+		}
+
+		// Update size fields in the header block.
+		realSize := hdr.Size
+		hdr.Size = 0 // Encoded size; does not account for encoded sparse map
+		for _, s := range spd {
+			hdr.Size += s.Length
+		}
+		copy(blk.V7().Size(), zeroBlock[:]) // Reset field
+		f.formatNumeric(blk.V7().Size(), hdr.Size)
+		f.formatNumeric(blk.GNU().RealSize(), realSize)
+	}
+	blk.SetFormat(FormatGNU)
+	if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil {
+		return err
+	}
+
+	// Write the extended sparse map and setup the sparse writer if necessary.
+	if len(spd) > 0 {
+		// Use tw.w since the sparse map is not accounted for in hdr.Size.
+		if _, err := tw.w.Write(spb); err != nil {
+			return err
+		}
+		tw.curr = &sparseFileWriter{tw.curr, spd, 0}
+	}
+	return nil
+}
+
+type (
+	stringFormatter func([]byte, string)
+	numberFormatter func([]byte, int64)
+)
+
+// templateV7Plus fills out the V7 fields of a block using values from hdr.
+// It also fills out fields (uname, gname, devmajor, devminor) that are
+// shared in the USTAR, PAX, and GNU formats using the provided formatters.
+//
+// The block returned is only valid until the next call to
+// templateV7Plus or writeRawFile.
+func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block {
+	tw.blk.Reset()
+
+	modTime := hdr.ModTime
+	if modTime.IsZero() {
+		modTime = time.Unix(0, 0)
+	}
+
+	v7 := tw.blk.V7()
+	v7.TypeFlag()[0] = hdr.Typeflag
+	fmtStr(v7.Name(), hdr.Name)
+	fmtStr(v7.LinkName(), hdr.Linkname)
+	fmtNum(v7.Mode(), hdr.Mode)
+	fmtNum(v7.UID(), int64(hdr.Uid))
+	fmtNum(v7.GID(), int64(hdr.Gid))
+	fmtNum(v7.Size(), hdr.Size)
+	fmtNum(v7.ModTime(), modTime.Unix())
+
+	ustar := tw.blk.USTAR()
+	fmtStr(ustar.UserName(), hdr.Uname)
+	fmtStr(ustar.GroupName(), hdr.Gname)
+	fmtNum(ustar.DevMajor(), hdr.Devmajor)
+	fmtNum(ustar.DevMinor(), hdr.Devminor)
+
+	return &tw.blk
+}
+
+// writeRawFile writes a minimal file with the given name and flag type.
+// It uses format to encode the header format and will write data as the body.
+// It uses default values for all of the other fields (as BSD and GNU tar does).
+func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error {
+	tw.blk.Reset()
+
+	// Best effort for the filename.
+	name = toASCII(name)
+	if len(name) > nameSize {
+		name = name[:nameSize]
+	}
+	name = strings.TrimRight(name, "/")
+
+	var f formatter
+	v7 := tw.blk.V7()
+	v7.TypeFlag()[0] = flag
+	f.formatString(v7.Name(), name)
+	f.formatOctal(v7.Mode(), 0)
+	f.formatOctal(v7.UID(), 0)
+	f.formatOctal(v7.GID(), 0)
+	f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB
+	f.formatOctal(v7.ModTime(), 0)
+	tw.blk.SetFormat(format)
+	if f.err != nil {
+		return f.err // Only occurs if size condition is violated
+	}
+
+	// Write the header and data.
+	if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil {
+		return err
+	}
+	_, err := io.WriteString(tw, data)
+	return err
+}
+
+// writeRawHeader writes the value of blk, regardless of its value.
+// It sets up the Writer such that it can accept a file of the given size.
+// If the flag is a special header-only flag, then the size is treated as zero.
+func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error {
+	if err := tw.Flush(); err != nil {
+		return err
+	}
+	if _, err := tw.w.Write(blk[:]); err != nil {
+		return err
+	}
+	if isHeaderOnlyType(flag) {
+		size = 0
+	}
+	tw.curr = &regFileWriter{tw.w, size}
+	tw.pad = blockPadding(size)
+	return nil
 }
 
 // splitUSTARPath splits a path according to USTAR prefix and suffix rules.
@@ -270,95 +397,233 @@
 	return name[:i], name[i+1:], true
 }
 
-// writePaxHeader writes an extended pax header to the
-// archive.
-func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
-	// Prepare extended header
-	ext := new(Header)
-	ext.Typeflag = TypeXHeader
-	// Setting ModTime is required for reader parsing to
-	// succeed, and seems harmless enough.
-	ext.ModTime = hdr.ModTime
-	// The spec asks that we namespace our pseudo files
-	// with the current pid. However, this results in differing outputs
-	// for identical inputs. As such, the constant 0 is now used instead.
-	// golang.org/issue/12358
-	dir, file := path.Split(hdr.Name)
-	fullName := path.Join(dir, "PaxHeaders.0", file)
-
-	ascii := toASCII(fullName)
-	if len(ascii) > nameSize {
-		ascii = ascii[:nameSize]
-	}
-	ext.Name = ascii
-	// Construct the body
-	var buf bytes.Buffer
-
-	// Keys are sorted before writing to body to allow deterministic output.
-	keys := make([]string, 0, len(paxHeaders))
-	for k := range paxHeaders {
-		keys = append(keys, k)
-	}
-	sort.Strings(keys)
-
-	for _, k := range keys {
-		fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
-	}
-
-	ext.Size = int64(len(buf.Bytes()))
-	if err := tw.writeHeader(ext, false); err != nil {
-		return err
-	}
-	if _, err := tw.Write(buf.Bytes()); err != nil {
-		return err
-	}
-	if err := tw.Flush(); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Write writes to the current entry in the tar archive.
+// Write writes to the current file in the tar archive.
 // Write returns the error ErrWriteTooLong if more than
-// hdr.Size bytes are written after WriteHeader.
-func (tw *Writer) Write(b []byte) (n int, err error) {
-	if tw.closed {
-		err = ErrWriteAfterClose
-		return
+// Header.Size bytes are written after WriteHeader.
+//
+// If the current file is sparse, then the regions marked as a hole
+// must be written as NUL-bytes.
+//
+// Calling Write on special types like TypeLink, TypeSymlink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless
+// of what the Header.Size claims.
+func (tw *Writer) Write(b []byte) (int, error) {
+	if tw.err != nil {
+		return 0, tw.err
 	}
-	overwrite := false
-	if int64(len(b)) > tw.nb {
-		b = b[0:tw.nb]
-		overwrite = true
+	n, err := tw.curr.Write(b)
+	if err != nil && err != ErrWriteTooLong {
+		tw.err = err
 	}
-	n, err = tw.w.Write(b)
-	tw.nb -= int64(n)
-	if err == nil && overwrite {
-		err = ErrWriteTooLong
-		return
-	}
-	tw.err = err
-	return
+	return n, err
 }
 
-// Close closes the tar archive, flushing any unwritten
-// data to the underlying writer.
-func (tw *Writer) Close() error {
-	if tw.err != nil || tw.closed {
-		return tw.err
+// ReadFrom populates the content of the current file by reading from r.
+// The bytes read must match the number of remaining bytes in the current file.
+//
+// If the current file is sparse and r is an io.ReadSeeker,
+// then ReadFrom uses Seek to skip past holes defined in Header.SparseHoles,
+// assuming that skipped regions are all NULs.
+// This always reads the last byte to ensure r is the right size.
+func (tw *Writer) ReadFrom(r io.Reader) (int64, error) {
+	if tw.err != nil {
+		return 0, tw.err
 	}
-	tw.Flush()
-	tw.closed = true
+	n, err := tw.curr.ReadFrom(r)
+	if err != nil && err != ErrWriteTooLong {
+		tw.err = err
+	}
+	return n, err
+}
+
+// Close closes the tar archive by flushing the padding, and writing the footer.
+// If the current file (from a prior call to WriteHeader) is not fully written,
+// then this returns an error.
+func (tw *Writer) Close() error {
+	if tw.err == ErrWriteAfterClose {
+		return nil
+	}
 	if tw.err != nil {
 		return tw.err
 	}
 
-	// trailer: two zero blocks
-	for i := 0; i < 2; i++ {
-		_, tw.err = tw.w.Write(zeroBlock[:])
-		if tw.err != nil {
-			break
+	// Trailer: two zero blocks.
+	err := tw.Flush()
+	for i := 0; i < 2 && err == nil; i++ {
+		_, err = tw.w.Write(zeroBlock[:])
+	}
+
+	// Ensure all future actions are invalid.
+	tw.err = ErrWriteAfterClose
+	return err // Report IO errors
+}
+
+// regFileWriter is a fileWriter for writing data to a regular file entry.
+type regFileWriter struct {
+	w  io.Writer // Underlying Writer
+	nb int64     // Number of remaining bytes to write
+}
+
+func (fw *regFileWriter) Write(b []byte) (n int, err error) {
+	overwrite := int64(len(b)) > fw.nb
+	if overwrite {
+		b = b[:fw.nb]
+	}
+	if len(b) > 0 {
+		n, err = fw.w.Write(b)
+		fw.nb -= int64(n)
+	}
+	switch {
+	case err != nil:
+		return n, err
+	case overwrite:
+		return n, ErrWriteTooLong
+	default:
+		return n, nil
+	}
+}
+
+func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) {
+	return io.Copy(struct{ io.Writer }{fw}, r)
+}
+
+func (fw regFileWriter) LogicalRemaining() int64 {
+	return fw.nb
+}
+func (fw regFileWriter) PhysicalRemaining() int64 {
+	return fw.nb
+}
+
+// sparseFileWriter is a fileWriter for writing data to a sparse file entry.
+type sparseFileWriter struct {
+	fw  fileWriter  // Underlying fileWriter
+	sp  sparseDatas // Normalized list of data fragments
+	pos int64       // Current position in sparse file
+}
+
+func (sw *sparseFileWriter) Write(b []byte) (n int, err error) {
+	overwrite := int64(len(b)) > sw.LogicalRemaining()
+	if overwrite {
+		b = b[:sw.LogicalRemaining()]
+	}
+
+	b0 := b
+	endPos := sw.pos + int64(len(b))
+	for endPos > sw.pos && err == nil {
+		var nf int // Bytes written in fragment
+		dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+		if sw.pos < dataStart { // In a hole fragment
+			bf := b[:min(int64(len(b)), dataStart-sw.pos)]
+			nf, err = zeroWriter{}.Write(bf)
+		} else { // In a data fragment
+			bf := b[:min(int64(len(b)), dataEnd-sw.pos)]
+			nf, err = sw.fw.Write(bf)
+		}
+		b = b[nf:]
+		sw.pos += int64(nf)
+		if sw.pos >= dataEnd && len(sw.sp) > 1 {
+			sw.sp = sw.sp[1:] // Ensure last fragment always remains
 		}
 	}
-	return tw.err
+
+	n = len(b0) - len(b)
+	switch {
+	case err == ErrWriteTooLong:
+		return n, errMissData // Not possible; implies bug in validation logic
+	case err != nil:
+		return n, err
+	case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+		return n, errUnrefData // Not possible; implies bug in validation logic
+	case overwrite:
+		return n, ErrWriteTooLong
+	default:
+		return n, nil
+	}
+}
+
+func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) {
+	rs, ok := r.(io.ReadSeeker)
+	if ok {
+		if _, err := rs.Seek(0, io.SeekCurrent); err != nil {
+			ok = false // Not all io.Seeker can really seek
+		}
+	}
+	if !ok {
+		return io.Copy(struct{ io.Writer }{sw}, r)
+	}
+
+	var readLastByte bool
+	pos0 := sw.pos
+	for sw.LogicalRemaining() > 0 && !readLastByte && err == nil {
+		var nf int64 // Size of fragment
+		dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset()
+		if sw.pos < dataStart { // In a hole fragment
+			nf = dataStart - sw.pos
+			if sw.PhysicalRemaining() == 0 {
+				readLastByte = true
+				nf--
+			}
+			_, err = rs.Seek(nf, io.SeekCurrent)
+		} else { // In a data fragment
+			nf = dataEnd - sw.pos
+			nf, err = io.CopyN(sw.fw, rs, nf)
+		}
+		sw.pos += nf
+		if sw.pos >= dataEnd && len(sw.sp) > 1 {
+			sw.sp = sw.sp[1:] // Ensure last fragment always remains
+		}
+	}
+
+	// If the last fragment is a hole, then seek to 1-byte before EOF, and
+	// read a single byte to ensure the file is the right size.
+	if readLastByte && err == nil {
+		_, err = mustReadFull(rs, []byte{0})
+		sw.pos++
+	}
+
+	n = sw.pos - pos0
+	switch {
+	case err == io.EOF:
+		return n, io.ErrUnexpectedEOF
+	case err == ErrWriteTooLong:
+		return n, errMissData // Not possible; implies bug in validation logic
+	case err != nil:
+		return n, err
+	case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0:
+		return n, errUnrefData // Not possible; implies bug in validation logic
+	default:
+		return n, ensureEOF(rs)
+	}
+}
+
+func (sw sparseFileWriter) LogicalRemaining() int64 {
+	return sw.sp[len(sw.sp)-1].endOffset() - sw.pos
+}
+func (sw sparseFileWriter) PhysicalRemaining() int64 {
+	return sw.fw.PhysicalRemaining()
+}
+
+// zeroWriter may only be written with NULs, otherwise it returns errWriteHole.
+type zeroWriter struct{}
+
+func (zeroWriter) Write(b []byte) (int, error) {
+	for i, c := range b {
+		if c != 0 {
+			return i, errWriteHole
+		}
+	}
+	return len(b), nil
+}
+
+// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so.
+func ensureEOF(r io.Reader) error {
+	n, err := tryReadFull(r, []byte{0})
+	switch {
+	case n > 0:
+		return ErrWriteTooLong
+	case err == io.EOF:
+		return nil
+	default:
+		return err
+	}
 }
diff --git a/vendor/github.com/docker/libnetwork/agent.go b/vendor/github.com/docker/libnetwork/agent.go
index f120065..085b8c4 100644
--- a/vendor/github.com/docker/libnetwork/agent.go
+++ b/vendor/github.com/docker/libnetwork/agent.go
@@ -293,11 +293,13 @@
 			c.Config().Daemon.NetworkControlPlaneMTU, netDBConf.PacketBufferSize)
 	}
 	nDB, err := networkdb.New(netDBConf)
-
 	if err != nil {
 		return err
 	}
 
+	// Register the diagnose handlers
+	c.DiagnoseServer.RegisterHandler(nDB, networkdb.NetDbPaths2Func)
+
 	var cancelList []func()
 	ch, cancel := nDB.Watch(libnetworkEPTable, "", "")
 	cancelList = append(cancelList, cancel)
@@ -436,7 +438,7 @@
 	for eid, value := range entries {
 		var epRec EndpointRecord
 		nid := n.ID()
-		if err := proto.Unmarshal(value.([]byte), &epRec); err != nil {
+		if err := proto.Unmarshal(value.Value, &epRec); err != nil {
 			logrus.Errorf("Unmarshal of libnetworkEPTable failed for endpoint %s in network %s, %v", eid, nid, err)
 			continue
 		}
@@ -461,7 +463,7 @@
 		}
 		entries := agent.networkDB.GetTableByNetwork(table.name, n.id)
 		for key, value := range entries {
-			epID, info := d.DecodeTableEntry(table.name, key, value.([]byte))
+			epID, info := d.DecodeTableEntry(table.name, key, value.Value)
 			if ep, ok := eps[epID]; !ok {
 				logrus.Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID)
 			} else {
diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go
index 236095c..e938948 100644
--- a/vendor/github.com/docker/libnetwork/controller.go
+++ b/vendor/github.com/docker/libnetwork/controller.go
@@ -60,6 +60,7 @@
 	"github.com/docker/libnetwork/cluster"
 	"github.com/docker/libnetwork/config"
 	"github.com/docker/libnetwork/datastore"
+	"github.com/docker/libnetwork/diagnose"
 	"github.com/docker/libnetwork/discoverapi"
 	"github.com/docker/libnetwork/driverapi"
 	"github.com/docker/libnetwork/drvregistry"
@@ -133,6 +134,13 @@
 
 	// SetKeys configures the encryption key for gossip and overlay data path
 	SetKeys(keys []*types.EncryptionKey) error
+
+	// StartDiagnose start the network diagnose mode
+	StartDiagnose(port int)
+	// StopDiagnose start the network diagnose mode
+	StopDiagnose()
+	// IsDiagnoseEnabled returns true if the diagnose is enabled
+	IsDiagnoseEnabled() bool
 }
 
 // NetworkWalker is a client provided function which will be used to walk the Networks.
@@ -167,6 +175,7 @@
 	agentStopDone          chan struct{}
 	keys                   []*types.EncryptionKey
 	clusterConfigAvailable bool
+	DiagnoseServer         *diagnose.Server
 	sync.Mutex
 }
 
@@ -185,7 +194,9 @@
 		serviceBindings: make(map[serviceKey]*service),
 		agentInitDone:   make(chan struct{}),
 		networkLocker:   locker.New(),
+		DiagnoseServer:  diagnose.New(),
 	}
+	c.DiagnoseServer.Init()
 
 	if err := c.initStores(); err != nil {
 		return nil, err
@@ -837,11 +848,34 @@
 	if err = c.updateToStore(network); err != nil {
 		return nil, err
 	}
+	defer func() {
+		if err != nil {
+			if e := c.deleteFromStore(network); e != nil {
+				logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e)
+			}
+		}
+	}()
+
 	if network.configOnly {
 		return network, nil
 	}
 
 	joinCluster(network)
+	defer func() {
+		if err != nil {
+			network.cancelDriverWatches()
+			if e := network.leaveCluster(); e != nil {
+				logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e)
+			}
+		}
+	}()
+
+	if len(network.loadBalancerIP) != 0 {
+		if err = network.createLoadBalancerSandbox(); err != nil {
+			return nil, err
+		}
+	}
+
 	if !c.isDistributedControl() {
 		c.Lock()
 		arrangeIngressFilterRule()
@@ -1268,3 +1302,28 @@
 	c.stopExternalKeyListener()
 	osl.GC()
 }
+
+// StartDiagnose start the network diagnose mode
+func (c *controller) StartDiagnose(port int) {
+	c.Lock()
+	if !c.DiagnoseServer.IsDebugEnable() {
+		c.DiagnoseServer.EnableDebug("127.0.0.1", port)
+	}
+	c.Unlock()
+}
+
+// StopDiagnose start the network diagnose mode
+func (c *controller) StopDiagnose() {
+	c.Lock()
+	if c.DiagnoseServer.IsDebugEnable() {
+		c.DiagnoseServer.DisableDebug()
+	}
+	c.Unlock()
+}
+
+// IsDiagnoseEnabled returns true if the diagnose is enabled
+func (c *controller) IsDiagnoseEnabled() bool {
+	c.Lock()
+	defer c.Unlock()
+	return c.DiagnoseServer.IsDebugEnable()
+}
diff --git a/vendor/github.com/docker/libnetwork/default_gateway_solaris.go b/vendor/github.com/docker/libnetwork/default_gateway_solaris.go
deleted file mode 100644
index 8d86a66..0000000
--- a/vendor/github.com/docker/libnetwork/default_gateway_solaris.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package libnetwork
-
-import (
-	"fmt"
-	"strconv"
-
-	"github.com/docker/libnetwork/drivers/solaris/bridge"
-)
-
-const libnGWNetwork = "docker_gwbridge"
-
-func getPlatformOption() EndpointOption {
-	return nil
-}
-
-func (c *controller) createGWNetwork() (Network, error) {
-	netOption := map[string]string{
-		bridge.BridgeName:         libnGWNetwork,
-		bridge.EnableICC:          strconv.FormatBool(false),
-		bridge.EnableIPMasquerade: strconv.FormatBool(true),
-	}
-
-	n, err := c.NewNetwork("bridge", libnGWNetwork, "",
-		NetworkOptionDriverOpts(netOption),
-		NetworkOptionEnableIPv6(false),
-	)
-
-	if err != nil {
-		return nil, fmt.Errorf("error creating external connectivity network: %v", err)
-	}
-	return n, err
-}
diff --git a/vendor/github.com/docker/libnetwork/diagnose/diagnose.go b/vendor/github.com/docker/libnetwork/diagnose/diagnose.go
deleted file mode 100644
index 2849397..0000000
--- a/vendor/github.com/docker/libnetwork/diagnose/diagnose.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package diagnose
-
-import (
-	"fmt"
-	"net"
-	"net/http"
-	"sync"
-
-	"github.com/sirupsen/logrus"
-)
-
-// HTTPHandlerFunc TODO
-type HTTPHandlerFunc func(interface{}, http.ResponseWriter, *http.Request)
-
-type httpHandlerCustom struct {
-	ctx interface{}
-	F   func(interface{}, http.ResponseWriter, *http.Request)
-}
-
-// ServeHTTP TODO
-func (h httpHandlerCustom) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	h.F(h.ctx, w, r)
-}
-
-var diagPaths2Func = map[string]HTTPHandlerFunc{
-	"/":      notImplemented,
-	"/help":  help,
-	"/ready": ready,
-}
-
-// Server when the debug is enabled exposes a
-// This data structure is protected by the Agent mutex so does not require and additional mutex here
-type Server struct {
-	sk                net.Listener
-	port              int
-	mux               *http.ServeMux
-	registeredHanders []string
-	sync.Mutex
-}
-
-// Init TODO
-func (n *Server) Init() {
-	n.mux = http.NewServeMux()
-
-	// Register local handlers
-	n.RegisterHandler(n, diagPaths2Func)
-}
-
-// RegisterHandler TODO
-func (n *Server) RegisterHandler(ctx interface{}, hdlrs map[string]HTTPHandlerFunc) {
-	n.Lock()
-	defer n.Unlock()
-	for path, fun := range hdlrs {
-		n.mux.Handle(path, httpHandlerCustom{ctx, fun})
-		n.registeredHanders = append(n.registeredHanders, path)
-	}
-}
-
-// EnableDebug opens a TCP socket to debug the passed network DB
-func (n *Server) EnableDebug(ip string, port int) {
-	n.Lock()
-	defer n.Unlock()
-
-	n.port = port
-	logrus.SetLevel(logrus.DebugLevel)
-
-	if n.sk != nil {
-		logrus.Infof("The server is already up and running")
-		return
-	}
-
-	logrus.Infof("Starting the server listening on %d for commands", port)
-
-	// // Create the socket
-	// var err error
-	// n.sk, err = net.Listen("tcp", listeningAddr)
-	// if err != nil {
-	// 	log.Fatal(err)
-	// }
-	//
-	// go func() {
-	// 	http.Serve(n.sk, n.mux)
-	// }()
-	http.ListenAndServe(":8000", n.mux)
-}
-
-// DisableDebug stop the dubug and closes the tcp socket
-func (n *Server) DisableDebug() {
-	n.Lock()
-	defer n.Unlock()
-	n.sk.Close()
-	n.sk = nil
-}
-
-// IsDebugEnable returns true when the debug is enabled
-func (n *Server) IsDebugEnable() bool {
-	n.Lock()
-	defer n.Unlock()
-	return n.sk != nil
-}
-
-func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) {
-	fmt.Fprintf(w, "URL path: %s no method implemented check /help\n", r.URL.Path)
-}
-
-func help(ctx interface{}, w http.ResponseWriter, r *http.Request) {
-	n, ok := ctx.(*Server)
-	if ok {
-		for _, path := range n.registeredHanders {
-			fmt.Fprintf(w, "%s\n", path)
-		}
-	}
-}
-
-func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) {
-	fmt.Fprintf(w, "OK\n")
-}
-
-// DebugHTTPForm TODO
-func DebugHTTPForm(r *http.Request) {
-	r.ParseForm()
-	for k, v := range r.Form {
-		logrus.Debugf("Form[%q] = %q\n", k, v)
-	}
-}
-
-// HTTPReplyError TODO
-func HTTPReplyError(w http.ResponseWriter, message, usage string) {
-	fmt.Fprintf(w, "%s\n", message)
-	if usage != "" {
-		fmt.Fprintf(w, "Usage: %s\n", usage)
-	}
-}
diff --git a/vendor/github.com/docker/libnetwork/diagnose/server.go b/vendor/github.com/docker/libnetwork/diagnose/server.go
new file mode 100644
index 0000000..c841e51
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/diagnose/server.go
@@ -0,0 +1,228 @@
+package diagnose
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"sync"
+	"sync/atomic"
+
+	stackdump "github.com/docker/docker/pkg/signal"
+	"github.com/docker/libnetwork/common"
+	"github.com/sirupsen/logrus"
+)
+
+// HTTPHandlerFunc TODO
+type HTTPHandlerFunc func(interface{}, http.ResponseWriter, *http.Request)
+
+type httpHandlerCustom struct {
+	ctx interface{}
+	F   func(interface{}, http.ResponseWriter, *http.Request)
+}
+
+// ServeHTTP TODO
+func (h httpHandlerCustom) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	h.F(h.ctx, w, r)
+}
+
+var diagPaths2Func = map[string]HTTPHandlerFunc{
+	"/":          notImplemented,
+	"/help":      help,
+	"/ready":     ready,
+	"/stackdump": stackTrace,
+}
+
+// Server when the debug is enabled exposes a
+// This data structure is protected by the Agent mutex so does not require and additional mutex here
+type Server struct {
+	enable            int32
+	srv               *http.Server
+	port              int
+	mux               *http.ServeMux
+	registeredHanders map[string]bool
+	sync.Mutex
+}
+
+// New creates a new diagnose server
+func New() *Server {
+	return &Server{
+		registeredHanders: make(map[string]bool),
+	}
+}
+
+// Init initialize the mux for the http handling and register the base hooks
+func (s *Server) Init() {
+	s.mux = http.NewServeMux()
+
+	// Register local handlers
+	s.RegisterHandler(s, diagPaths2Func)
+}
+
+// RegisterHandler allows to register new handlers to the mux and to a specific path
+func (s *Server) RegisterHandler(ctx interface{}, hdlrs map[string]HTTPHandlerFunc) {
+	s.Lock()
+	defer s.Unlock()
+	for path, fun := range hdlrs {
+		if _, ok := s.registeredHanders[path]; ok {
+			continue
+		}
+		s.mux.Handle(path, httpHandlerCustom{ctx, fun})
+		s.registeredHanders[path] = true
+	}
+}
+
+// ServeHTTP this is the method called bu the ListenAndServe, and is needed to allow us to
+// use our custom mux
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	s.mux.ServeHTTP(w, r)
+}
+
+// EnableDebug opens a TCP socket to debug the passed network DB
+func (s *Server) EnableDebug(ip string, port int) {
+	s.Lock()
+	defer s.Unlock()
+
+	s.port = port
+
+	if s.enable == 1 {
+		logrus.Info("The server is already up and running")
+		return
+	}
+
+	logrus.Infof("Starting the diagnose server listening on %d for commands", port)
+	srv := &http.Server{Addr: fmt.Sprintf("127.0.0.1:%d", port), Handler: s}
+	s.srv = srv
+	s.enable = 1
+	go func(n *Server) {
+		// Ingore ErrServerClosed that is returned on the Shutdown call
+		if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
+			logrus.Errorf("ListenAndServe error: %s", err)
+			atomic.SwapInt32(&n.enable, 0)
+		}
+	}(s)
+
+}
+
+// DisableDebug stop the dubug and closes the tcp socket
+func (s *Server) DisableDebug() {
+	s.Lock()
+	defer s.Unlock()
+
+	s.srv.Shutdown(context.Background())
+	s.srv = nil
+	s.enable = 0
+	logrus.Info("Disabling the diagnose server")
+}
+
+// IsDebugEnable returns true when the debug is enabled
+func (s *Server) IsDebugEnable() bool {
+	s.Lock()
+	defer s.Unlock()
+	return s.enable == 1
+}
+
+func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	_, json := ParseHTTPFormOptions(r)
+	rsp := WrongCommand("not implemented", fmt.Sprintf("URL path: %s no method implemented check /help\n", r.URL.Path))
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("command not implemented done")
+
+	HTTPReply(w, rsp, json)
+}
+
+func help(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	_, json := ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("help done")
+
+	n, ok := ctx.(*Server)
+	var result string
+	if ok {
+		for path := range n.registeredHanders {
+			result += fmt.Sprintf("%s\n", path)
+		}
+		HTTPReply(w, CommandSucceed(&StringCmd{Info: result}), json)
+	}
+}
+
+func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	_, json := ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("ready done")
+	HTTPReply(w, CommandSucceed(&StringCmd{Info: "OK"}), json)
+}
+
+func stackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	_, json := ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("stack trace")
+
+	path, err := stackdump.DumpStacks("/tmp/")
+	if err != nil {
+		log.WithError(err).Error("failed to write goroutines dump")
+		HTTPReply(w, FailCommand(err), json)
+	} else {
+		log.Info("stack trace done")
+		HTTPReply(w, CommandSucceed(&StringCmd{Info: fmt.Sprintf("goroutine stacks written to %s", path)}), json)
+	}
+}
+
+// DebugHTTPForm helper to print the form url parameters
+func DebugHTTPForm(r *http.Request) {
+	for k, v := range r.Form {
+		logrus.Debugf("Form[%q] = %q\n", k, v)
+	}
+}
+
+// JSONOutput contains details on JSON output printing
+type JSONOutput struct {
+	enable      bool
+	prettyPrint bool
+}
+
+// ParseHTTPFormOptions easily parse the JSON printing options
+func ParseHTTPFormOptions(r *http.Request) (bool, *JSONOutput) {
+	_, unsafe := r.Form["unsafe"]
+	v, json := r.Form["json"]
+	var pretty bool
+	if len(v) > 0 {
+		pretty = v[0] == "pretty"
+	}
+	return unsafe, &JSONOutput{enable: json, prettyPrint: pretty}
+}
+
+// HTTPReply helper function that takes care of sending the message out
+func HTTPReply(w http.ResponseWriter, r *HTTPResult, j *JSONOutput) (int, error) {
+	var response []byte
+	if j.enable {
+		w.Header().Set("Content-Type", "application/json")
+		var err error
+		if j.prettyPrint {
+			response, err = json.MarshalIndent(r, "", "  ")
+			if err != nil {
+				response, _ = json.MarshalIndent(FailCommand(err), "", "  ")
+			}
+		} else {
+			response, err = json.Marshal(r)
+			if err != nil {
+				response, _ = json.Marshal(FailCommand(err))
+			}
+		}
+	} else {
+		response = []byte(r.String())
+	}
+	return fmt.Fprint(w, string(response))
+}
diff --git a/vendor/github.com/docker/libnetwork/diagnose/types.go b/vendor/github.com/docker/libnetwork/diagnose/types.go
new file mode 100644
index 0000000..982c54a
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/diagnose/types.go
@@ -0,0 +1,122 @@
+package diagnose
+
+import "fmt"
+
+// StringInterface interface that has to be implemented by messages
+type StringInterface interface {
+	String() string
+}
+
+// CommandSucceed creates a success message
+func CommandSucceed(result StringInterface) *HTTPResult {
+	return &HTTPResult{
+		Message: "OK",
+		Details: result,
+	}
+}
+
+// FailCommand creates a failure message with error
+func FailCommand(err error) *HTTPResult {
+	return &HTTPResult{
+		Message: "FAIL",
+		Details: &ErrorCmd{Error: err.Error()},
+	}
+}
+
+// WrongCommand creates a wrong command response
+func WrongCommand(message, usage string) *HTTPResult {
+	return &HTTPResult{
+		Message: message,
+		Details: &UsageCmd{Usage: usage},
+	}
+}
+
+// HTTPResult Diagnose Server HTTP result operation
+type HTTPResult struct {
+	Message string          `json:"message"`
+	Details StringInterface `json:"details"`
+}
+
+func (h *HTTPResult) String() string {
+	rsp := h.Message
+	if h.Details != nil {
+		rsp += "\n" + h.Details.String()
+	}
+	return rsp
+}
+
+// UsageCmd command with usage field
+type UsageCmd struct {
+	Usage string `json:"usage"`
+}
+
+func (u *UsageCmd) String() string {
+	return "Usage: " + u.Usage
+}
+
+// StringCmd command with info string
+type StringCmd struct {
+	Info string `json:"info"`
+}
+
+func (s *StringCmd) String() string {
+	return s.Info
+}
+
+// ErrorCmd command with error
+type ErrorCmd struct {
+	Error string `json:"error"`
+}
+
+func (e *ErrorCmd) String() string {
+	return "Error: " + e.Error
+}
+
+// TableObj network db table object
+type TableObj struct {
+	Length   int               `json:"size"`
+	Elements []StringInterface `json:"entries"`
+}
+
+func (t *TableObj) String() string {
+	output := fmt.Sprintf("total entries: %d\n", t.Length)
+	for _, e := range t.Elements {
+		output += e.String()
+	}
+	return output
+}
+
+// PeerEntryObj entry in the networkdb peer table
+type PeerEntryObj struct {
+	Index int    `json:"-"`
+	Name  string `json:"-=name"`
+	IP    string `json:"ip"`
+}
+
+func (p *PeerEntryObj) String() string {
+	return fmt.Sprintf("%d) %s -> %s\n", p.Index, p.Name, p.IP)
+}
+
+// TableEntryObj network db table entry object
+type TableEntryObj struct {
+	Index int    `json:"-"`
+	Key   string `json:"key"`
+	Value string `json:"value"`
+	Owner string `json:"owner"`
+}
+
+func (t *TableEntryObj) String() string {
+	return fmt.Sprintf("%d) k:`%s` -> v:`%s` owner:`%s`\n", t.Index, t.Key, t.Value, t.Owner)
+}
+
+// TableEndpointsResult fully typed message for proper unmarshaling on the client side
+type TableEndpointsResult struct {
+	TableObj
+	Elements []TableEntryObj `json:"entries"`
+}
+
+// TablePeersResult fully typed message for proper unmarshaling on the client side
+type TablePeersResult struct {
+	TableObj
+	Elements []PeerEntryObj `json:"entries"`
+}
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
index 1742c8d..1fa8f0e 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -42,6 +42,14 @@
 	DefaultGatewayV6AuxKey = "DefaultGatewayIPv6"
 )
 
+type defaultBridgeNetworkConflict struct {
+	ID string
+}
+
+func (d defaultBridgeNetworkConflict) Error() string {
+	return fmt.Sprintf("Stale default bridge network %s", d.ID)
+}
+
 type iptableCleanFunc func() error
 type iptablesCleanFuncs []iptableCleanFunc
 
@@ -137,6 +145,7 @@
 	networks       map[string]*bridgeNetwork
 	store          datastore.DataStore
 	nlh            *netlink.Handle
+	configNetwork  sync.Mutex
 	sync.Mutex
 }
 
@@ -322,41 +331,6 @@
 	return nil
 }
 
-// Checks whether this network's configuration for the network with this id conflicts with any of the passed networks
-func (c *networkConfiguration) conflictsWithNetworks(id string, others []*bridgeNetwork) error {
-	for _, nw := range others {
-
-		nw.Lock()
-		nwID := nw.id
-		nwConfig := nw.config
-		nwBridge := nw.bridge
-		nw.Unlock()
-
-		if nwID == id {
-			continue
-		}
-		// Verify the name (which may have been set by newInterface()) does not conflict with
-		// existing bridge interfaces. Ironically the system chosen name gets stored in the config...
-		// Basically we are checking if the two original configs were both empty.
-		if nwConfig.BridgeName == c.BridgeName {
-			return types.ForbiddenErrorf("conflicts with network %s (%s) by bridge name", nwID, nwConfig.BridgeName)
-		}
-		// If this network config specifies the AddressIPv4, we need
-		// to make sure it does not conflict with any previously allocated
-		// bridges. This could not be completely caught by the config conflict
-		// check, because networks which config does not specify the AddressIPv4
-		// get their address and subnet selected by the driver (see electBridgeIPv4())
-		if c.AddressIPv4 != nil && nwBridge.bridgeIPv4 != nil {
-			if nwBridge.bridgeIPv4.Contains(c.AddressIPv4.IP) ||
-				c.AddressIPv4.Contains(nwBridge.bridgeIPv4.IP) {
-				return types.ForbiddenErrorf("conflicts with network %s (%s) by ip network", nwID, nwConfig.BridgeName)
-			}
-		}
-	}
-
-	return nil
-}
-
 func (d *driver) configure(option map[string]interface{}) error {
 	var (
 		config         *configuration
@@ -602,11 +576,27 @@
 		return err
 	}
 
-	err = config.processIPAM(id, ipV4Data, ipV6Data)
-	if err != nil {
+	if err = config.processIPAM(id, ipV4Data, ipV6Data); err != nil {
 		return err
 	}
 
+	// start the critical section, from this point onward we are dealing with the list of networks
+	// so to be consistent we cannot allow that the list changes
+	d.configNetwork.Lock()
+	defer d.configNetwork.Unlock()
+
+	// check network conflicts
+	if err = d.checkConflict(config); err != nil {
+		nerr, ok := err.(defaultBridgeNetworkConflict)
+		if !ok {
+			return err
+		}
+		// Got a conflict with a stale default network, clean that up and continue
+		logrus.Warn(nerr)
+		d.deleteNetwork(nerr.ID)
+	}
+
+	// there is no conflict, now create the network
 	if err = d.createNetwork(config); err != nil {
 		return err
 	}
@@ -614,33 +604,47 @@
 	return d.storeUpdate(config)
 }
 
+func (d *driver) checkConflict(config *networkConfiguration) error {
+	networkList := d.getNetworks()
+	for _, nw := range networkList {
+		nw.Lock()
+		nwConfig := nw.config
+		nw.Unlock()
+		if err := nwConfig.Conflicts(config); err != nil {
+			if config.DefaultBridge {
+				// We encountered and identified a stale default network
+				// We must delete it as libnetwork is the source of truth
+				// The default network being created must be the only one
+				// This can happen only from docker 1.12 on ward
+				logrus.Infof("Found stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName)
+				return defaultBridgeNetworkConflict{nwConfig.ID}
+			}
+
+			return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s",
+				config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error())
+		}
+	}
+	return nil
+}
+
 func (d *driver) createNetwork(config *networkConfiguration) error {
 	var err error
 
 	defer osl.InitOSContext()()
 
 	networkList := d.getNetworks()
-	for i, nw := range networkList {
-		nw.Lock()
-		nwConfig := nw.config
-		nw.Unlock()
-		if err := nwConfig.Conflicts(config); err != nil {
-			if config.DefaultBridge {
-				// We encountered and identified a stale default network
-				// We must delete it as libnetwork is the source of thruth
-				// The default network being created must be the only one
-				// This can happen only from docker 1.12 on ward
-				logrus.Infof("Removing stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName)
-				if err := d.DeleteNetwork(nwConfig.ID); err != nil {
-					logrus.Warnf("Failed to remove stale default network: %s (%s): %v. Will remove from store.", nwConfig.ID, nwConfig.BridgeName, err)
-					d.storeDelete(nwConfig)
-				}
-				networkList = append(networkList[:i], networkList[i+1:]...)
-			} else {
-				return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s",
-					config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error())
-			}
-		}
+
+	// Initialize handle when needed
+	d.Lock()
+	if d.nlh == nil {
+		d.nlh = ns.NlHandle()
+	}
+	d.Unlock()
+
+	// Create or retrieve the bridge L3 interface
+	bridgeIface, err := newInterface(d.nlh, config)
+	if err != nil {
+		return err
 	}
 
 	// Create and set network handler in driver
@@ -649,6 +653,7 @@
 		endpoints:  make(map[string]*bridgeEndpoint),
 		config:     config,
 		portMapper: portmapper.New(d.config.UserlandProxyPath),
+		bridge:     bridgeIface,
 		driver:     d,
 	}
 
@@ -665,35 +670,15 @@
 		}
 	}()
 
-	// Initialize handle when needed
-	d.Lock()
-	if d.nlh == nil {
-		d.nlh = ns.NlHandle()
-	}
-	d.Unlock()
-
-	// Create or retrieve the bridge L3 interface
-	bridgeIface, err := newInterface(d.nlh, config)
-	if err != nil {
-		return err
-	}
-	network.bridge = bridgeIface
-
-	// Verify the network configuration does not conflict with previously installed
-	// networks. This step is needed now because driver might have now set the bridge
-	// name on this config struct. And because we need to check for possible address
-	// conflicts, so we need to check against operationa lnetworks.
-	if err = config.conflictsWithNetworks(config.ID, networkList); err != nil {
-		return err
-	}
-
+	// Add inter-network communication rules.
 	setupNetworkIsolationRules := func(config *networkConfiguration, i *bridgeInterface) error {
 		if err := network.isolateNetwork(networkList, true); err != nil {
-			if err := network.isolateNetwork(networkList, false); err != nil {
+			if err = network.isolateNetwork(networkList, false); err != nil {
 				logrus.Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err)
 			}
 			return err
 		}
+		// register the cleanup function
 		network.registerIptCleanFunc(func() error {
 			nwList := d.getNetworks()
 			return network.isolateNetwork(nwList, false)
@@ -767,10 +752,17 @@
 }
 
 func (d *driver) DeleteNetwork(nid string) error {
+
+	d.configNetwork.Lock()
+	defer d.configNetwork.Unlock()
+
+	return d.deleteNetwork(nid)
+}
+
+func (d *driver) deleteNetwork(nid string) error {
 	var err error
 
 	defer osl.InitOSContext()()
-
 	// Get network handler and remove it from driver
 	d.Lock()
 	n, ok := d.networks[nid]
@@ -814,12 +806,6 @@
 		}
 	}()
 
-	// Sanity check
-	if n == nil {
-		err = driverapi.ErrNoNetwork(nid)
-		return err
-	}
-
 	switch config.BridgeIfaceCreator {
 	case ifaceCreatedByLibnetwork, ifaceCreatorUnknown:
 		// We only delete the bridge if it was created by the bridge driver and
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go
index fc45a7e..50cbdb1 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go
@@ -9,7 +9,7 @@
 	d.Unlock()
 
 	// Sanity check.
-	if driverConfig.EnableIPTables == false {
+	if !driverConfig.EnableIPTables {
 		return IPTableCfgError(config.BridgeName)
 	}
 
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
index 3fbfccf..1131417 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
@@ -696,6 +696,12 @@
 	var nlSock *nl.NetlinkSocket
 	sbox.InvokeFunc(func() {
 		nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)
+		if err != nil {
+			return
+		}
+		// set the receive timeout to not remain stuck on the RecvFrom if the fd gets closed
+		tv := syscall.NsecToTimeval(soTimeout.Nanoseconds())
+		err = nlSock.SetReceiveTimeout(&tv)
 	})
 	n.setNetlinkSocket(nlSock)
 
@@ -721,6 +727,11 @@
 				// The netlink socket got closed, simply exit to not leak this goroutine
 				return
 			}
+			// When the receive timeout expires the receive will return EAGAIN
+			if err == syscall.EAGAIN {
+				// we continue here to avoid spam for timeouts
+				continue
+			}
 			logrus.Errorf("Failed to receive from netlink: %v ", err)
 			continue
 		}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge.go b/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge.go
deleted file mode 100644
index 558157a..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge.go
+++ /dev/null
@@ -1,1263 +0,0 @@
-// +build solaris
-
-package bridge
-
-import (
-	"bufio"
-	"errors"
-	"fmt"
-	"net"
-	"os"
-	"os/exec"
-	"strconv"
-	"strings"
-	"sync"
-
-	"github.com/docker/libnetwork/datastore"
-	"github.com/docker/libnetwork/discoverapi"
-	"github.com/docker/libnetwork/driverapi"
-	"github.com/docker/libnetwork/iptables"
-	"github.com/docker/libnetwork/netlabel"
-	"github.com/docker/libnetwork/netutils"
-	"github.com/docker/libnetwork/options"
-	"github.com/docker/libnetwork/portmapper"
-	"github.com/docker/libnetwork/types"
-	"github.com/sirupsen/logrus"
-)
-
-const (
-	networkType = "bridge"
-
-	// DefaultBridgeName is the default name for the bridge interface managed
-	// by the driver when unspecified by the caller.
-	DefaultBridgeName = "docker0"
-
-	// BridgeName label for bridge driver
-	BridgeName = "com.docker.network.bridge.name"
-
-	// EnableIPMasquerade label for bridge driver
-	EnableIPMasquerade = "com.docker.network.bridge.enable_ip_masquerade"
-
-	// EnableICC label
-	EnableICC = "com.docker.network.bridge.enable_icc"
-
-	// DefaultBindingIP label
-	DefaultBindingIP = "com.docker.network.bridge.host_binding_ipv4"
-
-	// DefaultBridge label
-	DefaultBridge = "com.docker.network.bridge.default_bridge"
-
-	// DefaultGatewayV4AuxKey represents the default-gateway configured by the user
-	DefaultGatewayV4AuxKey = "DefaultGatewayIPv4"
-
-	// DefaultGatewayV6AuxKey represents the ipv6 default-gateway configured by the user
-	DefaultGatewayV6AuxKey = "DefaultGatewayIPv6"
-)
-
-// configuration info for the "bridge" driver.
-type configuration struct {
-	EnableIPForwarding  bool
-	EnableIPTables      bool
-	EnableUserlandProxy bool
-}
-
-// networkConfiguration for network specific configuration
-type networkConfiguration struct {
-	ID                 string
-	BridgeName         string
-	BridgeNameInternal string
-	EnableIPv6         bool
-	EnableIPMasquerade bool
-	EnableICC          bool
-	Mtu                int
-	DefaultBindingIntf string
-	DefaultBindingIP   net.IP
-	DefaultBridge      bool
-	// Internal fields set after ipam data parsing
-	AddressIPv4        *net.IPNet
-	AddressIPv6        *net.IPNet
-	DefaultGatewayIPv4 net.IP
-	DefaultGatewayIPv6 net.IP
-	dbIndex            uint64
-	dbExists           bool
-	Internal           bool
-}
-
-// endpointConfiguration represents the user specified configuration for the sandbox endpoint
-type endpointConfiguration struct {
-	MacAddress   net.HardwareAddr
-	PortBindings []types.PortBinding
-	ExposedPorts []types.TransportPort
-}
-
-// containerConfiguration represents the user specified configuration for a container
-type containerConfiguration struct {
-	ParentEndpoints []string
-	ChildEndpoints  []string
-}
-
-// cnnectivityConfiguration represents the user specified configuration regarding the external connectivity
-type connectivityConfiguration struct {
-	PortBindings []types.PortBinding
-	ExposedPorts []types.TransportPort
-}
-
-type bridgeEndpoint struct {
-	id              string
-	nid             string
-	srcName         string
-	addr            *net.IPNet
-	addrv6          *net.IPNet
-	macAddress      net.HardwareAddr
-	config          *endpointConfiguration // User specified parameters
-	containerConfig *containerConfiguration
-	extConnConfig   *connectivityConfiguration
-	portMapping     []types.PortBinding // Operation port bindings
-	dbIndex         uint64
-	dbExists        bool
-}
-
-type bridgeInterface struct {
-	bridgeIPv4  *net.IPNet
-	bridgeIPv6  *net.IPNet
-	gatewayIPv4 net.IP
-	gatewayIPv6 net.IP
-}
-
-type bridgeNetwork struct {
-	id         string
-	bridge     *bridgeInterface
-	config     *networkConfiguration
-	endpoints  map[string]*bridgeEndpoint // key: endpoint id
-	portMapper *portmapper.PortMapper
-	driver     *driver // The network's driver
-	sync.Mutex
-}
-
-type driver struct {
-	config         *configuration
-	network        *bridgeNetwork
-	natChain       *iptables.ChainInfo
-	filterChain    *iptables.ChainInfo
-	isolationChain *iptables.ChainInfo
-	networks       map[string]*bridgeNetwork
-	store          datastore.DataStore
-	sync.Mutex
-	defrouteIP net.IP
-}
-
-// New constructs a new bridge driver
-func newDriver() *driver {
-	return &driver{networks: map[string]*bridgeNetwork{}}
-}
-
-// Init registers a new instance of bridge driver
-func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
-	d := newDriver()
-	if err := d.configure(config); err != nil {
-		return err
-	}
-
-	c := driverapi.Capability{
-		DataScope:         datastore.LocalScope,
-		ConnectivityScope: datastore.LocalScope,
-	}
-	return dc.RegisterDriver(networkType, d, c)
-}
-
-func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) {
-	return nil, types.NotImplementedErrorf("not implemented")
-}
-
-func (d *driver) NetworkFree(id string) error {
-	return types.NotImplementedErrorf("not implemented")
-}
-
-func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
-}
-
-func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) {
-	return "", nil
-}
-
-func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
-	if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" {
-		return types.BadRequestErrorf("ipv4 pool is empty")
-	}
-	// Sanity checks
-	d.Lock()
-	if _, ok := d.networks[id]; ok {
-		d.Unlock()
-		return types.ForbiddenErrorf("network %s exists", id)
-	}
-	d.Unlock()
-
-	// Parse and validate the config. It should not conflict with existing networks' config
-	config, err := parseNetworkOptions(d, id, option)
-	if err != nil {
-		return err
-	}
-
-	err = config.processIPAM(id, ipV4Data, ipV6Data)
-	if err != nil {
-		return err
-	}
-
-	if err = d.createNetwork(config); err != nil {
-		return err
-	}
-
-	return d.storeUpdate(config)
-}
-
-func newInterface(config *networkConfiguration) *bridgeInterface {
-	i := &bridgeInterface{}
-
-	i.bridgeIPv4 = config.AddressIPv4
-	i.gatewayIPv4 = config.AddressIPv4.IP
-	if config.BridgeName == "" {
-		config.BridgeName = DefaultBridgeName
-	}
-	return i
-}
-
-// This function prunes the pf.conf for the firewall
-// that enable the service successfully.
-func fixPFConf() error {
-	conf := "/etc/firewall/pf.conf"
-	f, err := os.Open("/etc/firewall/pf.conf")
-	if err != nil {
-		return fmt.Errorf("cannot open %s: %v", conf, err)
-	}
-	defer f.Close()
-
-	// Look for line beginning with "REMOVE THIS LINE"
-	modify := false
-	lines := []string{}
-	scanner := bufio.NewScanner(f)
-	for scanner.Scan() {
-		l := scanner.Text()
-		if strings.Contains(l, "REMOVE THIS LINE") {
-			modify = true
-			continue
-		}
-		lines = append(lines, fmt.Sprintf("%s\n", l))
-	}
-	if err = scanner.Err(); err != nil {
-		return fmt.Errorf("cannot open %s: %v", conf, err)
-	}
-
-	// No changes needed to fix pf.conf
-	if !modify {
-		return nil
-	}
-
-	// Write back the file removing the line found above
-	tmpname := "/etc/firewall/pf.conf.tmp." + strconv.Itoa(os.Getpid())
-	tmp, err := os.OpenFile(tmpname,
-		os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_APPEND, 0644)
-	if err != nil {
-		return fmt.Errorf("cannot open %s: %v", tmpname, err)
-	}
-	defer tmp.Close()
-	for _, l := range lines {
-		_, err = tmp.WriteString(l)
-		if err != nil {
-			return fmt.Errorf("cannot write to %s: %v",
-				tmpname, err)
-		}
-	}
-	if err = tmp.Sync(); err != nil {
-		return fmt.Errorf("cannot sync %s: %v", tmpname, err)
-	}
-	if err = os.Rename(tmpname, conf); err != nil {
-		return fmt.Errorf("cannot rename %s to %s: %v",
-			tmpname, conf, err)
-	}
-	return nil
-}
-
-func (d *driver) initFirewall() error {
-	out, err := exec.Command("/usr/bin/svcs", "-Ho", "state",
-		"firewall").Output()
-	if err != nil {
-		return fmt.Errorf("cannot check firewall state: %v", err)
-	}
-	state := strings.TrimSpace(string(out))
-	if state != "online" {
-		if state != "disabled" {
-			return fmt.Errorf("firewall service is in %s state. "+
-				"please enable service manually.", state)
-		}
-		if err = fixPFConf(); err != nil {
-			return fmt.Errorf("cannot verify pf.conf: %v", err)
-		}
-		err = exec.Command("/usr/sbin/svcadm", "enable", "-ts",
-			"firewall").Run()
-		if err != nil {
-			return fmt.Errorf("cannot enable firewall service: %v", err)
-		}
-	}
-	out, err = exec.Command("/usr/sbin/pfctl", "-sr").Output()
-	if err != nil {
-		return fmt.Errorf("failed to list firewall rules: %v", err)
-	}
-	if strings.Contains(string(out), "anchor \"_auto/docker/*\" all") {
-		return nil
-	}
-	pfctlCmd := "(/usr/sbin/pfctl -sr; " +
-		"/usr/bin/echo \"anchor \\\"_auto/docker/*\\\"\") |" +
-		"/usr/sbin/pfctl -f -"
-	err = exec.Command("/usr/bin/bash", "-c", pfctlCmd).Run()
-	if err != nil {
-		return fmt.Errorf("failed to add docker firewall rules: %v", err)
-	}
-	return nil
-}
-
-func (d *driver) initRouting() error {
-	err := exec.Command("/usr/sbin/ipadm", "set-prop", "-t",
-		"-p", "forwarding=on", "ipv4").Run()
-	if err != nil {
-		return fmt.Errorf("cannot switch-on IP forwarding: %v", err)
-	}
-	routeCmd := "/usr/sbin/ipadm show-addr -p -o addr " +
-		"`/usr/sbin/route get default | /usr/bin/grep interface | " +
-		"/usr/bin/awk '{print $2}'`"
-	out, err := exec.Command("/usr/bin/bash", "-c", routeCmd).Output()
-	if err != nil {
-		return fmt.Errorf("cannot get default route: %v", err)
-	}
-	defroute := strings.SplitN(string(out), "/", 2)
-	d.defrouteIP = net.ParseIP(defroute[0])
-	if d.defrouteIP == nil {
-		return &ErrNoIPAddr{}
-	}
-	return nil
-}
-
-func (d *driver) configure(option map[string]interface{}) error {
-	var err error
-
-	if err = d.initFirewall(); err != nil {
-		return fmt.Errorf("failed to configure firewall: %v", err)
-	}
-	if err = d.initRouting(); err != nil {
-		return fmt.Errorf("failed to configure routing: %v", err)
-	}
-	if err = d.initStore(option); err != nil {
-		return fmt.Errorf("failed to initialize datastore: %v", err)
-	}
-
-	return nil
-}
-
-func (d *driver) getNetwork(id string) (*bridgeNetwork, error) {
-	d.Lock()
-	defer d.Unlock()
-
-	if id == "" {
-		return nil, types.BadRequestErrorf("invalid network id: %s", id)
-	}
-
-	if nw, ok := d.networks[id]; ok {
-		return nw, nil
-	}
-
-	return nil, types.NotFoundErrorf("network not found: %s", id)
-}
-
-// Return a slice of networks over which caller can iterate safely
-func (d *driver) getNetworks() []*bridgeNetwork {
-	d.Lock()
-	defer d.Unlock()
-
-	ls := make([]*bridgeNetwork, 0, len(d.networks))
-	for _, nw := range d.networks {
-		ls = append(ls, nw)
-	}
-	return ls
-}
-
-func bridgeSetup(config *networkConfiguration) error {
-	var err error
-	var bindingIntf string
-
-	bridgeName := config.BridgeName
-	gwName := fmt.Sprintf("%s_gw0", bridgeName)
-	gwIP := config.AddressIPv4.String()
-
-	if config.DefaultBindingIP == nil {
-		// Default to net0 if bindingIP is not provided.
-		bindingIntf = "net0"
-	} else {
-		ipadmCmd := "/usr/sbin/ipadm show-addr -p -o addrobj,addr |" +
-			"/usr/bin/grep " + config.DefaultBindingIP.String()
-		out, err := exec.Command("/usr/bin/bash", "-c", ipadmCmd).Output()
-		if err != nil {
-			logrus.Warn("cannot find binding interface")
-			return err
-		}
-		bindingIntf = strings.SplitN(string(out), "/", 2)[0]
-		if bindingIntf == "" {
-			logrus.Warnf("cannot parse binding interface %s", string(out))
-			return &ErrNoIPAddr{}
-		}
-	}
-	config.DefaultBindingIntf = bindingIntf
-
-	err = exec.Command("/usr/sbin/dladm", "create-etherstub",
-		"-t", config.BridgeNameInternal).Run()
-	if err != nil {
-		logrus.Warnf("cannot create etherstub %s: %+v", config.BridgeNameInternal, err)
-		return err
-	}
-	err = exec.Command("/usr/sbin/dladm", "create-vnic",
-		"-t", "-l", config.BridgeNameInternal, gwName).Run()
-	if err != nil {
-		logrus.Warnf("cannot create vnic %s", gwName)
-		return err
-	}
-	err = exec.Command("/usr/sbin/ifconfig", gwName,
-		"plumb", gwIP, "up").Run()
-	if err != nil {
-		logrus.Warnf("cannot create gateway interface %s on %s",
-			gwIP, gwName)
-		return err
-	}
-
-	tableName := "bridge_nw_subnets"
-	pfAnchor := fmt.Sprintf("_auto/docker/%s", tableName)
-	err = exec.Command("/usr/sbin/pfctl", "-a", pfAnchor, "-t", tableName, "-T", "add", gwIP).Run()
-	if err != nil {
-		logrus.Warnf("cannot add bridge network '%s' to PF table", bridgeName)
-	}
-
-	pfCmd := fmt.Sprintf(
-		"/usr/bin/echo \"pass out on %s from %s:network to any nat-to (%s)\n"+
-			"block in quick from { <%s>, ! %s } to %s\" |"+
-			"/usr/sbin/pfctl -a _auto/docker/%s -f -",
-		bindingIntf, gwName, bindingIntf,
-		tableName, gwIP, gwIP,
-		bridgeName)
-	err = exec.Command("/usr/bin/bash", "-c", pfCmd).Run()
-	if err != nil {
-		logrus.Warnf("cannot add pf rule using: %s", pfCmd)
-		return err
-	}
-
-	return nil
-}
-
-func bridgeCleanup(config *networkConfiguration, logErr bool) {
-	var err error
-
-	bridgeName := config.BridgeName
-	tableName := "bridge_nw_subnets"
-	gwName := fmt.Sprintf("%s_gw0", bridgeName)
-	gwIP := config.AddressIPv4.String()
-	pfAnchor := fmt.Sprintf("_auto/docker/%s", bridgeName)
-	tableAnchor := fmt.Sprintf("_auto/docker/%s", tableName)
-
-	err = exec.Command("/usr/sbin/pfctl", "-a", pfAnchor, "-F", "all").Run()
-	if err != nil && logErr {
-		logrus.Warn("cannot flush firewall rules")
-	}
-	err = exec.Command("/usr/sbin/ifconfig", gwName, "unplumb").Run()
-	if err != nil && logErr {
-		logrus.Warn("cannot remove gateway interface")
-	}
-	err = exec.Command("/usr/sbin/dladm", "delete-vnic",
-		"-t", gwName).Run()
-	if err != nil && logErr {
-		logrus.Warn("cannot delete vnic")
-	}
-	err = exec.Command("/usr/sbin/dladm", "delete-etherstub",
-		"-t", config.BridgeNameInternal).Run()
-	if err != nil && logErr {
-		logrus.Warn("cannot delete etherstub")
-	}
-	err = exec.Command("/usr/sbin/pfctl", "-a", tableAnchor, "-t", tableName, "-T", "delete", gwIP).Run()
-	if err != nil && logErr {
-		logrus.Warnf("cannot remove bridge network '%s' from PF table", bridgeName)
-	}
-}
-
-func (d *driver) createNetwork(config *networkConfiguration) error {
-	var err error
-
-	logrus.Infof("Creating bridge network: %s %s %s", config.ID,
-		config.BridgeName, config.AddressIPv4)
-
-	networkList := d.getNetworks()
-	for i, nw := range networkList {
-		nw.Lock()
-		nwConfig := nw.config
-		nw.Unlock()
-		if err := nwConfig.Conflicts(config); err != nil {
-			if config.DefaultBridge {
-				// We encountered and identified a stale default network
-				// We must delete it as libnetwork is the source of thruth
-				// The default network being created must be the only one
-				// This can happen only from docker 1.12 on ward
-				logrus.Infof("Removing stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName)
-				if err := d.DeleteNetwork(nwConfig.ID); err != nil {
-					logrus.Warnf("Failed to remove stale default network: %s (%s): %v. Will remove from store.", nwConfig.ID, nwConfig.BridgeName, err)
-					d.storeDelete(nwConfig)
-				}
-				networkList = append(networkList[:i], networkList[i+1:]...)
-			} else {
-				return types.ForbiddenErrorf(
-					"cannot create network %s (%s): "+
-						"conflicts with network %s (%s): %s",
-					nwConfig.BridgeName, config.ID, nw.id,
-					nw.config.BridgeName, err.Error())
-			}
-		}
-	}
-	if config.DefaultBindingIP == nil ||
-		config.DefaultBindingIP.IsUnspecified() {
-		config.DefaultBindingIP = d.defrouteIP
-	}
-
-	// Create and set network handler in driver
-	network := &bridgeNetwork{
-		id:         config.ID,
-		endpoints:  make(map[string]*bridgeEndpoint),
-		config:     config,
-		portMapper: portmapper.New(""),
-		driver:     d,
-	}
-
-	d.Lock()
-	d.networks[config.ID] = network
-	d.Unlock()
-
-	// On failure make sure to reset driver network handler to nil
-	defer func() {
-		if err != nil {
-			d.Lock()
-			delete(d.networks, config.ID)
-			d.Unlock()
-		}
-	}()
-
-	// Create or retrieve the bridge L3 interface
-	bridgeIface := newInterface(config)
-	network.bridge = bridgeIface
-
-	// Verify the network configuration does not conflict with previously installed
-	// networks. This step is needed now because driver might have now set the bridge
-	// name on this config struct. And because we need to check for possible address
-	// conflicts, so we need to check against operational networks.
-	if err = config.conflictsWithNetworks(config.ID, networkList); err != nil {
-		return err
-	}
-
-	// We only attempt to create the bridge when the requested device name is
-	// the default one.
-	if config.BridgeName != DefaultBridgeName && config.DefaultBridge {
-		return NonDefaultBridgeExistError(config.BridgeName)
-	}
-
-	bridgeCleanup(config, false)
-	err = bridgeSetup(config)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func (d *driver) DeleteNetwork(nid string) error {
-	var err error
-	// Get network handler and remove it from driver
-	d.Lock()
-	n, ok := d.networks[nid]
-	d.Unlock()
-
-	if !ok {
-		return types.InternalMaskableErrorf("network %s does not exist", nid)
-	}
-	d.Lock()
-	delete(d.networks, nid)
-	d.Unlock()
-
-	// On failure set network handler back in driver, but
-	// only if is not already taken over by some other thread
-	defer func() {
-		if err != nil {
-			d.Lock()
-			if _, ok := d.networks[nid]; !ok {
-				d.networks[nid] = n
-			}
-			d.Unlock()
-		}
-	}()
-
-	// Sanity check
-	if n == nil {
-		err = driverapi.ErrNoNetwork(nid)
-		return err
-	}
-
-	// Cannot remove network if endpoints are still present
-	if len(n.endpoints) != 0 {
-		err = ActiveEndpointsError(n.id)
-		return err
-	}
-	bridgeCleanup(n.config, true)
-	logrus.Infof("Deleting bridge network: %s", nid[:12])
-	return d.storeDelete(n.config)
-}
-
-func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error {
-	if ifInfo == nil {
-		return errors.New("invalid interface passed")
-	}
-
-	// Get the network handler and make sure it exists
-	d.Lock()
-	n, ok := d.networks[nid]
-	d.Unlock()
-
-	if !ok {
-		return types.NotFoundErrorf("network %s does not exist", nid)
-	}
-	if n == nil {
-		return driverapi.ErrNoNetwork(nid)
-	}
-
-	// Sanity check
-	n.Lock()
-	if n.id != nid {
-		n.Unlock()
-		return InvalidNetworkIDError(nid)
-	}
-	n.Unlock()
-
-	// Check if endpoint id is good and retrieve correspondent endpoint
-	ep, err := n.getEndpoint(eid)
-	if err != nil {
-		return err
-	}
-
-	// Endpoint with that id exists either on desired or other sandbox
-	if ep != nil {
-		return driverapi.ErrEndpointExists(eid)
-	}
-
-	// Try to convert the options to endpoint configuration
-	epConfig, err := parseEndpointOptions(epOptions)
-	if err != nil {
-		return err
-	}
-
-	// Create and add the endpoint
-	n.Lock()
-	endpoint := &bridgeEndpoint{id: eid, config: epConfig}
-	n.endpoints[eid] = endpoint
-	n.Unlock()
-
-	// On failure make sure to remove the endpoint
-	defer func() {
-		if err != nil {
-			n.Lock()
-			delete(n.endpoints, eid)
-			n.Unlock()
-		}
-	}()
-
-	// Create the sandbox side pipe interface
-	if ifInfo.MacAddress() == nil {
-		// No MAC address assigned to interface. Generate a random MAC to assign
-		endpoint.macAddress = netutils.GenerateRandomMAC()
-		if err := ifInfo.SetMacAddress(endpoint.macAddress); err != nil {
-			logrus.Warnf("Unable to set mac address: %s to endpoint: %s",
-				endpoint.macAddress.String(), endpoint.id)
-			return err
-		}
-	} else {
-		endpoint.macAddress = ifInfo.MacAddress()
-	}
-	endpoint.addr = ifInfo.Address()
-	endpoint.addrv6 = ifInfo.AddressIPv6()
-	c := n.config
-
-	// Program any required port mapping and store them in the endpoint
-	endpoint.portMapping, err = n.allocatePorts(endpoint, c.DefaultBindingIntf, c.DefaultBindingIP, true)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (d *driver) DeleteEndpoint(nid, eid string) error {
-	var err error
-
-	// Get the network handler and make sure it exists
-	d.Lock()
-	n, ok := d.networks[nid]
-	d.Unlock()
-
-	if !ok {
-		return types.InternalMaskableErrorf("network %s does not exist", nid)
-	}
-	if n == nil {
-		return driverapi.ErrNoNetwork(nid)
-	}
-
-	// Sanity Check
-	n.Lock()
-	if n.id != nid {
-		n.Unlock()
-		return InvalidNetworkIDError(nid)
-	}
-	n.Unlock()
-
-	// Check endpoint id and if an endpoint is actually there
-	ep, err := n.getEndpoint(eid)
-	if err != nil {
-		return err
-	}
-	if ep == nil {
-		return EndpointNotFoundError(eid)
-	}
-
-	// Remove it
-	n.Lock()
-	delete(n.endpoints, eid)
-	n.Unlock()
-
-	// On failure make sure to set back ep in n.endpoints, but only
-	// if it hasn't been taken over already by some other thread.
-	defer func() {
-		if err != nil {
-			n.Lock()
-			if _, ok := n.endpoints[eid]; !ok {
-				n.endpoints[eid] = ep
-			}
-			n.Unlock()
-		}
-	}()
-
-	err = n.releasePorts(ep)
-	if err != nil {
-		logrus.Warn(err)
-	}
-
-	return nil
-}
-
-func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) {
-	// Get the network handler and make sure it exists
-	d.Lock()
-	n, ok := d.networks[nid]
-	d.Unlock()
-	if !ok {
-		return nil, types.NotFoundErrorf("network %s does not exist", nid)
-	}
-	if n == nil {
-		return nil, driverapi.ErrNoNetwork(nid)
-	}
-
-	// Sanity check
-	n.Lock()
-	if n.id != nid {
-		n.Unlock()
-		return nil, InvalidNetworkIDError(nid)
-	}
-	n.Unlock()
-
-	// Check if endpoint id is good and retrieve correspondent endpoint
-	ep, err := n.getEndpoint(eid)
-	if err != nil {
-		return nil, err
-	}
-	if ep == nil {
-		return nil, driverapi.ErrNoEndpoint(eid)
-	}
-
-	m := make(map[string]interface{})
-
-	if ep.extConnConfig != nil && ep.extConnConfig.ExposedPorts != nil {
-		// Return a copy of the config data
-		epc := make([]types.TransportPort, 0, len(ep.extConnConfig.ExposedPorts))
-		for _, tp := range ep.extConnConfig.ExposedPorts {
-			epc = append(epc, tp.GetCopy())
-		}
-		m[netlabel.ExposedPorts] = epc
-	}
-
-	if ep.portMapping != nil {
-		// Return a copy of the operational data
-		pmc := make([]types.PortBinding, 0, len(ep.portMapping))
-		for _, pm := range ep.portMapping {
-			pmc = append(pmc, pm.GetCopy())
-		}
-		m[netlabel.PortMap] = pmc
-	}
-
-	if len(ep.macAddress) != 0 {
-		m[netlabel.MacAddress] = ep.macAddress
-	}
-	return m, nil
-}
-
-// Join method is invoked when a Sandbox is attached to an endpoint.
-func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
-	network, err := d.getNetwork(nid)
-	if err != nil {
-		return err
-	}
-
-	endpoint, err := network.getEndpoint(eid)
-	if err != nil {
-		return err
-	}
-
-	if endpoint == nil {
-		return EndpointNotFoundError(eid)
-	}
-
-	endpoint.containerConfig, err = parseContainerOptions(options)
-	if err != nil {
-		return err
-	}
-
-	err = jinfo.SetGateway(network.bridge.gatewayIPv4)
-	if err != nil {
-		return err
-	}
-
-	err = jinfo.SetGatewayIPv6(network.bridge.gatewayIPv6)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (d *driver) link(network *bridgeNetwork, endpoint *bridgeEndpoint, enable bool) error {
-	return nil
-}
-
-// Leave method is invoked when a Sandbox detaches from an endpoint.
-func (d *driver) Leave(nid, eid string) error {
-	network, err := d.getNetwork(nid)
-	if err != nil {
-		return types.InternalMaskableErrorf("%s", err)
-	}
-
-	endpoint, err := network.getEndpoint(eid)
-	if err != nil {
-		return err
-	}
-
-	if endpoint == nil {
-		return EndpointNotFoundError(eid)
-	}
-
-	return nil
-}
-
-func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
-	network, err := d.getNetwork(nid)
-	if err != nil {
-		return err
-	}
-
-	endpoint, err := network.getEndpoint(eid)
-	if err != nil {
-		return err
-	}
-
-	if endpoint == nil {
-		return EndpointNotFoundError(eid)
-	}
-
-	endpoint.extConnConfig, err = parseConnectivityOptions(options)
-	if err != nil {
-		return err
-	}
-
-	// Program any required port mapping and store them in the endpoint
-	endpoint.portMapping, err = network.allocatePorts(endpoint, network.config.DefaultBindingIntf, network.config.DefaultBindingIP, true)
-	if err != nil {
-		return err
-	}
-
-	if !network.config.EnableICC {
-		return d.link(network, endpoint, true)
-	}
-
-	return nil
-}
-
-func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
-	network, err := d.getNetwork(nid)
-	if err != nil {
-		return err
-	}
-
-	endpoint, err := network.getEndpoint(eid)
-	if err != nil {
-		return err
-	}
-
-	if endpoint == nil {
-		return EndpointNotFoundError(eid)
-	}
-
-	err = network.releasePorts(endpoint)
-	if err != nil {
-		logrus.Warn(err)
-	}
-
-	return nil
-}
-
-func (d *driver) Type() string {
-	return networkType
-}
-
-func (d *driver) IsBuiltIn() bool {
-	return true
-}
-
-// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
-func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
-	return nil
-}
-
-// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
-func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
-	return nil
-}
-
-// Validate performs a static validation on the network configuration parameters.
-// Whatever can be assessed a priori before attempting any programming.
-func (c *networkConfiguration) Validate() error {
-	if c.Mtu < 0 {
-		return ErrInvalidMtu(c.Mtu)
-	}
-
-	// If bridge v4 subnet is specified
-	if c.AddressIPv4 != nil {
-		// If default gw is specified, it must be part of bridge subnet
-		if c.DefaultGatewayIPv4 != nil {
-			if !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) {
-				return &ErrInvalidGateway{}
-			}
-		}
-	}
-
-	// If default v6 gw is specified, AddressIPv6 must be specified and gw must belong to AddressIPv6 subnet
-	if c.EnableIPv6 && c.DefaultGatewayIPv6 != nil {
-		if c.AddressIPv6 == nil || !c.AddressIPv6.Contains(c.DefaultGatewayIPv6) {
-			return &ErrInvalidGateway{}
-		}
-	}
-	return nil
-}
-
-// Checks whether this network's configuration for the network with this id conflicts with any of the passed networks
-func (c *networkConfiguration) conflictsWithNetworks(id string, others []*bridgeNetwork) error {
-	for _, nw := range others {
-
-		nw.Lock()
-		nwID := nw.id
-		nwConfig := nw.config
-		nwBridge := nw.bridge
-		nw.Unlock()
-
-		if nwID == id {
-			continue
-		}
-		// Verify the name (which may have been set by newInterface()) does not conflict with
-		// existing bridge interfaces. Ironically the system chosen name gets stored in the config...
-		// Basically we are checking if the two original configs were both empty.
-		if nwConfig.BridgeName == c.BridgeName {
-			return types.ForbiddenErrorf("conflicts with network %s (%s) by bridge name", nwID, nwConfig.BridgeName)
-		}
-		// If this network config specifies the AddressIPv4, we need
-		// to make sure it does not conflict with any previously allocated
-		// bridges. This could not be completely caught by the config conflict
-		// check, because networks which config does not specify the AddressIPv4
-		// get their address and subnet selected by the driver (see electBridgeIPv4())
-		if c.AddressIPv4 != nil {
-			if nwBridge.bridgeIPv4.Contains(c.AddressIPv4.IP) ||
-				c.AddressIPv4.Contains(nwBridge.bridgeIPv4.IP) {
-				return types.ForbiddenErrorf("conflicts with network %s (%s) by ip network", nwID, nwConfig.BridgeName)
-			}
-		}
-	}
-
-	return nil
-}
-
-// Conflicts check if two NetworkConfiguration objects overlap
-func (c *networkConfiguration) Conflicts(o *networkConfiguration) error {
-	if o == nil {
-		return fmt.Errorf("same configuration")
-	}
-
-	// Also empty, because only one network with empty name is allowed
-	if c.BridgeName == o.BridgeName {
-		return fmt.Errorf("networks have same bridge name")
-	}
-
-	// They must be in different subnets
-	if (c.AddressIPv4 != nil && o.AddressIPv4 != nil) &&
-		(c.AddressIPv4.Contains(o.AddressIPv4.IP) || o.AddressIPv4.Contains(c.AddressIPv4.IP)) {
-		return fmt.Errorf("networks have overlapping IPv4")
-	}
-
-	// They must be in different v6 subnets
-	if (c.AddressIPv6 != nil && o.AddressIPv6 != nil) &&
-		(c.AddressIPv6.Contains(o.AddressIPv6.IP) || o.AddressIPv6.Contains(c.AddressIPv6.IP)) {
-		return fmt.Errorf("networks have overlapping IPv6")
-	}
-
-	return nil
-}
-
-func (c *networkConfiguration) fromLabels(labels map[string]string) error {
-	var err error
-	for label, value := range labels {
-		switch label {
-		case BridgeName:
-			c.BridgeName = value
-		case netlabel.DriverMTU:
-			if c.Mtu, err = strconv.Atoi(value); err != nil {
-				return parseErr(label, value, err.Error())
-			}
-		case netlabel.EnableIPv6:
-			if c.EnableIPv6, err = strconv.ParseBool(value); err != nil {
-				return parseErr(label, value, err.Error())
-			}
-		case EnableIPMasquerade:
-			if c.EnableIPMasquerade, err = strconv.ParseBool(value); err != nil {
-				return parseErr(label, value, err.Error())
-			}
-		case EnableICC:
-			if c.EnableICC, err = strconv.ParseBool(value); err != nil {
-				return parseErr(label, value, err.Error())
-			}
-		case DefaultBridge:
-			if c.DefaultBridge, err = strconv.ParseBool(value); err != nil {
-				return parseErr(label, value, err.Error())
-			}
-		case DefaultBindingIP:
-			if c.DefaultBindingIP = net.ParseIP(value); c.DefaultBindingIP == nil {
-				return parseErr(label, value, "nil ip")
-			}
-		}
-	}
-
-	return nil
-}
-
-func parseErr(label, value, errString string) error {
-	return types.BadRequestErrorf("failed to parse %s value: %v (%s)", label, value, errString)
-}
-
-func parseNetworkGenericOptions(data interface{}) (*networkConfiguration, error) {
-	var (
-		err    error
-		config *networkConfiguration
-	)
-
-	switch opt := data.(type) {
-	case *networkConfiguration:
-		config = opt
-	case map[string]string:
-		config = &networkConfiguration{
-			EnableICC:          true,
-			EnableIPMasquerade: true,
-		}
-		err = config.fromLabels(opt)
-	case options.Generic:
-		var opaqueConfig interface{}
-		if opaqueConfig, err = options.GenerateFromModel(opt, config); err == nil {
-			config = opaqueConfig.(*networkConfiguration)
-		}
-	default:
-		err = types.BadRequestErrorf("do not recognize network configuration format: %T", opt)
-	}
-
-	return config, err
-}
-
-func parseNetworkOptions(d *driver, id string, option options.Generic) (*networkConfiguration, error) {
-	var (
-		err    error
-		config = &networkConfiguration{}
-	)
-
-	// Parse generic label first, config will be re-assigned
-	if genData, ok := option[netlabel.GenericData]; ok && genData != nil {
-		if config, err = parseNetworkGenericOptions(genData); err != nil {
-			return nil, err
-		}
-	}
-
-	// Process well-known labels next
-	if val, ok := option[netlabel.EnableIPv6]; ok {
-		config.EnableIPv6 = val.(bool)
-	}
-
-	if val, ok := option[netlabel.Internal]; ok {
-		if internal, ok := val.(bool); ok && internal {
-			config.Internal = true
-		}
-	}
-
-	// Finally validate the configuration
-	if err = config.Validate(); err != nil {
-		return nil, err
-	}
-
-	if config.BridgeName == "" && config.DefaultBridge == false {
-		config.BridgeName = "br_" + id[:12] + "_0"
-	}
-
-	lastChar := config.BridgeName[len(config.BridgeName)-1:]
-	if _, err = strconv.Atoi(lastChar); err != nil {
-		config.BridgeNameInternal = config.BridgeName + "_0"
-	} else {
-		config.BridgeNameInternal = config.BridgeName
-	}
-
-	config.ID = id
-	return config, nil
-}
-
-func (c *networkConfiguration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error {
-	if len(ipamV4Data) > 1 || len(ipamV6Data) > 1 {
-		return types.ForbiddenErrorf("bridge driver doesnt support multiple subnets")
-	}
-
-	if len(ipamV4Data) == 0 {
-		return types.BadRequestErrorf("bridge network %s requires ipv4 configuration", id)
-	}
-
-	if ipamV4Data[0].Gateway != nil {
-		c.AddressIPv4 = types.GetIPNetCopy(ipamV4Data[0].Gateway)
-	}
-
-	if gw, ok := ipamV4Data[0].AuxAddresses[DefaultGatewayV4AuxKey]; ok {
-		c.DefaultGatewayIPv4 = gw.IP
-	}
-
-	if len(ipamV6Data) > 0 {
-		c.AddressIPv6 = ipamV6Data[0].Pool
-
-		if ipamV6Data[0].Gateway != nil {
-			c.AddressIPv6 = types.GetIPNetCopy(ipamV6Data[0].Gateway)
-		}
-
-		if gw, ok := ipamV6Data[0].AuxAddresses[DefaultGatewayV6AuxKey]; ok {
-			c.DefaultGatewayIPv6 = gw.IP
-		}
-	}
-
-	return nil
-}
-
-func (n *bridgeNetwork) getEndpoint(eid string) (*bridgeEndpoint, error) {
-	n.Lock()
-	defer n.Unlock()
-
-	if eid == "" {
-		return nil, InvalidEndpointIDError(eid)
-	}
-
-	if ep, ok := n.endpoints[eid]; ok {
-		return ep, nil
-	}
-
-	return nil, nil
-}
-
-func parseEndpointOptions(epOptions map[string]interface{}) (*endpointConfiguration, error) {
-	if epOptions == nil {
-		return nil, nil
-	}
-
-	ec := &endpointConfiguration{}
-
-	if opt, ok := epOptions[netlabel.MacAddress]; ok {
-		if mac, ok := opt.(net.HardwareAddr); ok {
-			ec.MacAddress = mac
-		} else {
-			return nil, &ErrInvalidEndpointConfig{}
-		}
-	}
-
-	if opt, ok := epOptions[netlabel.PortMap]; ok {
-		if bs, ok := opt.([]types.PortBinding); ok {
-			ec.PortBindings = bs
-		} else {
-			return nil, &ErrInvalidEndpointConfig{}
-		}
-	}
-
-	if opt, ok := epOptions[netlabel.ExposedPorts]; ok {
-		if ports, ok := opt.([]types.TransportPort); ok {
-			ec.ExposedPorts = ports
-		} else {
-			return nil, &ErrInvalidEndpointConfig{}
-		}
-	}
-
-	return ec, nil
-}
-
-func parseContainerOptions(cOptions map[string]interface{}) (*containerConfiguration, error) {
-	if cOptions == nil {
-		return nil, nil
-	}
-	genericData := cOptions[netlabel.GenericData]
-	if genericData == nil {
-		return nil, nil
-	}
-	switch opt := genericData.(type) {
-	case options.Generic:
-		opaqueConfig, err := options.GenerateFromModel(opt, &containerConfiguration{})
-		if err != nil {
-			return nil, err
-		}
-		return opaqueConfig.(*containerConfiguration), nil
-	case *containerConfiguration:
-		return opt, nil
-	default:
-		return nil, nil
-	}
-}
-
-func parseConnectivityOptions(cOptions map[string]interface{}) (*connectivityConfiguration, error) {
-	if cOptions == nil {
-		return nil, nil
-	}
-
-	cc := &connectivityConfiguration{}
-
-	if opt, ok := cOptions[netlabel.PortMap]; ok {
-		if pb, ok := opt.([]types.PortBinding); ok {
-			cc.PortBindings = pb
-		} else {
-			return nil, types.BadRequestErrorf("Invalid port mapping data in connectivity configuration: %v", opt)
-		}
-	}
-
-	if opt, ok := cOptions[netlabel.ExposedPorts]; ok {
-		if ports, ok := opt.([]types.TransportPort); ok {
-			cc.ExposedPorts = ports
-		} else {
-			return nil, types.BadRequestErrorf("Invalid exposed ports data in connectivity configuration: %v", opt)
-		}
-	}
-
-	return cc, nil
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge_store.go b/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge_store.go
deleted file mode 100644
index 6f5db4f..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/bridge_store.go
+++ /dev/null
@@ -1,384 +0,0 @@
-// +build solaris
-
-package bridge
-
-import (
-	"encoding/json"
-	"fmt"
-	"net"
-
-	"github.com/docker/libnetwork/datastore"
-	"github.com/docker/libnetwork/discoverapi"
-	"github.com/docker/libnetwork/netlabel"
-	"github.com/docker/libnetwork/types"
-	"github.com/sirupsen/logrus"
-)
-
-const (
-	// network config prefix was not specific enough.
-	// To be backward compatible, need custom endpoint
-	// prefix with different root
-	bridgePrefix         = "bridge"
-	bridgeEndpointPrefix = "bridge-endpoint"
-)
-
-func (d *driver) initStore(option map[string]interface{}) error {
-	if data, ok := option[netlabel.LocalKVClient]; ok {
-		var err error
-		dsc, ok := data.(discoverapi.DatastoreConfigData)
-		if !ok {
-			return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
-		}
-		d.store, err = datastore.NewDataStoreFromConfig(dsc)
-		if err != nil {
-			return types.InternalErrorf("bridge driver failed to initialize data store: %v", err)
-		}
-
-		err = d.populateNetworks()
-		if err != nil {
-			return err
-		}
-
-		err = d.populateEndpoints()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *driver) populateNetworks() error {
-	kvol, err := d.store.List(datastore.Key(bridgePrefix), &networkConfiguration{})
-	if err != nil && err != datastore.ErrKeyNotFound {
-		return fmt.Errorf("failed to get bridge network configurations from store: %v", err)
-	}
-
-	// It's normal for network configuration state to be empty. Just return.
-	if err == datastore.ErrKeyNotFound {
-		return nil
-	}
-
-	for _, kvo := range kvol {
-		ncfg := kvo.(*networkConfiguration)
-		if err = d.createNetwork(ncfg); err != nil {
-			logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err)
-		}
-		logrus.Debugf("Network (%s) restored", ncfg.ID[0:7])
-	}
-
-	return nil
-}
-
-func (d *driver) populateEndpoints() error {
-	kvol, err := d.store.List(datastore.Key(bridgeEndpointPrefix), &bridgeEndpoint{})
-	if err != nil && err != datastore.ErrKeyNotFound {
-		return fmt.Errorf("failed to get bridge endpoints from store: %v", err)
-	}
-
-	if err == datastore.ErrKeyNotFound {
-		return nil
-	}
-
-	for _, kvo := range kvol {
-		ep := kvo.(*bridgeEndpoint)
-		n, ok := d.networks[ep.nid]
-		if !ok {
-			logrus.Debugf("Network (%s) not found for restored bridge endpoint (%s)", ep.nid[0:7], ep.id[0:7])
-			logrus.Debugf("Deleting stale bridge endpoint (%s) from store", ep.nid[0:7])
-			if err := d.storeDelete(ep); err != nil {
-				logrus.Debugf("Failed to delete stale bridge endpoint (%s) from store", ep.nid[0:7])
-			}
-			continue
-		}
-		n.endpoints[ep.id] = ep
-		n.restorePortAllocations(ep)
-		logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7])
-	}
-
-	return nil
-}
-
-func (d *driver) storeUpdate(kvObject datastore.KVObject) error {
-	if d.store == nil {
-		logrus.Warnf("bridge store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...))
-		return nil
-	}
-
-	if err := d.store.PutObjectAtomic(kvObject); err != nil {
-		return fmt.Errorf("failed to update bridge store for object type %T: %v", kvObject, err)
-	}
-
-	return nil
-}
-
-func (d *driver) storeDelete(kvObject datastore.KVObject) error {
-	if d.store == nil {
-		logrus.Debugf("bridge store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...))
-		return nil
-	}
-
-retry:
-	if err := d.store.DeleteObjectAtomic(kvObject); err != nil {
-		if err == datastore.ErrKeyModified {
-			if err := d.store.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {
-				return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err)
-			}
-			goto retry
-		}
-		return err
-	}
-
-	return nil
-}
-
-func (ncfg *networkConfiguration) MarshalJSON() ([]byte, error) {
-	nMap := make(map[string]interface{})
-	nMap["ID"] = ncfg.ID
-	nMap["BridgeName"] = ncfg.BridgeName
-	nMap["BridgeNameInternal"] = ncfg.BridgeNameInternal
-	nMap["EnableIPv6"] = ncfg.EnableIPv6
-	nMap["EnableIPMasquerade"] = ncfg.EnableIPMasquerade
-	nMap["EnableICC"] = ncfg.EnableICC
-	nMap["Mtu"] = ncfg.Mtu
-	nMap["Internal"] = ncfg.Internal
-	nMap["DefaultBridge"] = ncfg.DefaultBridge
-	nMap["DefaultBindingIP"] = ncfg.DefaultBindingIP.String()
-	nMap["DefaultBindingIntf"] = ncfg.DefaultBindingIntf
-	nMap["DefaultGatewayIPv4"] = ncfg.DefaultGatewayIPv4.String()
-	nMap["DefaultGatewayIPv6"] = ncfg.DefaultGatewayIPv6.String()
-
-	if ncfg.AddressIPv4 != nil {
-		nMap["AddressIPv4"] = ncfg.AddressIPv4.String()
-	}
-
-	if ncfg.AddressIPv6 != nil {
-		nMap["AddressIPv6"] = ncfg.AddressIPv6.String()
-	}
-
-	return json.Marshal(nMap)
-}
-
-func (ncfg *networkConfiguration) UnmarshalJSON(b []byte) error {
-	var (
-		err  error
-		nMap map[string]interface{}
-	)
-
-	if err = json.Unmarshal(b, &nMap); err != nil {
-		return err
-	}
-
-	if v, ok := nMap["AddressIPv4"]; ok {
-		if ncfg.AddressIPv4, err = types.ParseCIDR(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode bridge network address IPv4 after json unmarshal: %s", v.(string))
-		}
-	}
-
-	if v, ok := nMap["AddressIPv6"]; ok {
-		if ncfg.AddressIPv6, err = types.ParseCIDR(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode bridge network address IPv6 after json unmarshal: %s", v.(string))
-		}
-	}
-
-	ncfg.DefaultBridge = nMap["DefaultBridge"].(bool)
-	ncfg.DefaultBindingIP = net.ParseIP(nMap["DefaultBindingIP"].(string))
-	ncfg.DefaultBindingIntf = nMap["DefaultBindingIntf"].(string)
-	ncfg.DefaultGatewayIPv4 = net.ParseIP(nMap["DefaultGatewayIPv4"].(string))
-	ncfg.DefaultGatewayIPv6 = net.ParseIP(nMap["DefaultGatewayIPv6"].(string))
-	ncfg.ID = nMap["ID"].(string)
-	ncfg.BridgeName = nMap["BridgeName"].(string)
-	ncfg.BridgeNameInternal = nMap["BridgeNameInternal"].(string)
-	ncfg.EnableIPv6 = nMap["EnableIPv6"].(bool)
-	ncfg.EnableIPMasquerade = nMap["EnableIPMasquerade"].(bool)
-	ncfg.EnableICC = nMap["EnableICC"].(bool)
-	ncfg.Mtu = int(nMap["Mtu"].(float64))
-	if v, ok := nMap["Internal"]; ok {
-		ncfg.Internal = v.(bool)
-	}
-
-	return nil
-}
-
-func (ncfg *networkConfiguration) Key() []string {
-	return []string{bridgePrefix, ncfg.ID}
-}
-
-func (ncfg *networkConfiguration) KeyPrefix() []string {
-	return []string{bridgePrefix}
-}
-
-func (ncfg *networkConfiguration) Value() []byte {
-	b, err := json.Marshal(ncfg)
-	if err != nil {
-		return nil
-	}
-	return b
-}
-
-func (ncfg *networkConfiguration) SetValue(value []byte) error {
-	return json.Unmarshal(value, ncfg)
-}
-
-func (ncfg *networkConfiguration) Index() uint64 {
-	return ncfg.dbIndex
-}
-
-func (ncfg *networkConfiguration) SetIndex(index uint64) {
-	ncfg.dbIndex = index
-	ncfg.dbExists = true
-}
-
-func (ncfg *networkConfiguration) Exists() bool {
-	return ncfg.dbExists
-}
-
-func (ncfg *networkConfiguration) Skip() bool {
-	return false
-}
-
-func (ncfg *networkConfiguration) New() datastore.KVObject {
-	return &networkConfiguration{}
-}
-
-func (ncfg *networkConfiguration) CopyTo(o datastore.KVObject) error {
-	dstNcfg := o.(*networkConfiguration)
-	*dstNcfg = *ncfg
-	return nil
-}
-
-func (ncfg *networkConfiguration) DataScope() string {
-	return datastore.LocalScope
-}
-
-func (ep *bridgeEndpoint) MarshalJSON() ([]byte, error) {
-	epMap := make(map[string]interface{})
-	epMap["id"] = ep.id
-	epMap["nid"] = ep.nid
-	epMap["SrcName"] = ep.srcName
-	epMap["MacAddress"] = ep.macAddress.String()
-	epMap["Addr"] = ep.addr.String()
-	if ep.addrv6 != nil {
-		epMap["Addrv6"] = ep.addrv6.String()
-	}
-	epMap["Config"] = ep.config
-	epMap["ContainerConfig"] = ep.containerConfig
-	epMap["ExternalConnConfig"] = ep.extConnConfig
-	epMap["PortMapping"] = ep.portMapping
-
-	return json.Marshal(epMap)
-}
-
-func (ep *bridgeEndpoint) UnmarshalJSON(b []byte) error {
-	var (
-		err   error
-		epMap map[string]interface{}
-	)
-
-	if err = json.Unmarshal(b, &epMap); err != nil {
-		return fmt.Errorf("Failed to unmarshal to bridge endpoint: %v", err)
-	}
-
-	if v, ok := epMap["MacAddress"]; ok {
-		if ep.macAddress, err = net.ParseMAC(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode bridge endpoint MAC address (%s) after json unmarshal: %v", v.(string), err)
-		}
-	}
-	if v, ok := epMap["Addr"]; ok {
-		if ep.addr, err = types.ParseCIDR(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode bridge endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err)
-		}
-	}
-	if v, ok := epMap["Addrv6"]; ok {
-		if ep.addrv6, err = types.ParseCIDR(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode bridge endpoint IPv6 address (%s) after json unmarshal: %v", v.(string), err)
-		}
-	}
-	ep.id = epMap["id"].(string)
-	ep.nid = epMap["nid"].(string)
-	ep.srcName = epMap["SrcName"].(string)
-	d, _ := json.Marshal(epMap["Config"])
-	if err := json.Unmarshal(d, &ep.config); err != nil {
-		logrus.Warnf("Failed to decode endpoint config %v", err)
-	}
-	d, _ = json.Marshal(epMap["ContainerConfig"])
-	if err := json.Unmarshal(d, &ep.containerConfig); err != nil {
-		logrus.Warnf("Failed to decode endpoint container config %v", err)
-	}
-	d, _ = json.Marshal(epMap["ExternalConnConfig"])
-	if err := json.Unmarshal(d, &ep.extConnConfig); err != nil {
-		logrus.Warnf("Failed to decode endpoint external connectivity configuration %v", err)
-	}
-	d, _ = json.Marshal(epMap["PortMapping"])
-	if err := json.Unmarshal(d, &ep.portMapping); err != nil {
-		logrus.Warnf("Failed to decode endpoint port mapping %v", err)
-	}
-
-	return nil
-}
-
-func (ep *bridgeEndpoint) Key() []string {
-	return []string{bridgeEndpointPrefix, ep.id}
-}
-
-func (ep *bridgeEndpoint) KeyPrefix() []string {
-	return []string{bridgeEndpointPrefix}
-}
-
-func (ep *bridgeEndpoint) Value() []byte {
-	b, err := json.Marshal(ep)
-	if err != nil {
-		return nil
-	}
-	return b
-}
-
-func (ep *bridgeEndpoint) SetValue(value []byte) error {
-	return json.Unmarshal(value, ep)
-}
-
-func (ep *bridgeEndpoint) Index() uint64 {
-	return ep.dbIndex
-}
-
-func (ep *bridgeEndpoint) SetIndex(index uint64) {
-	ep.dbIndex = index
-	ep.dbExists = true
-}
-
-func (ep *bridgeEndpoint) Exists() bool {
-	return ep.dbExists
-}
-
-func (ep *bridgeEndpoint) Skip() bool {
-	return false
-}
-
-func (ep *bridgeEndpoint) New() datastore.KVObject {
-	return &bridgeEndpoint{}
-}
-
-func (ep *bridgeEndpoint) CopyTo(o datastore.KVObject) error {
-	dstEp := o.(*bridgeEndpoint)
-	*dstEp = *ep
-	return nil
-}
-
-func (ep *bridgeEndpoint) DataScope() string {
-	return datastore.LocalScope
-}
-
-func (n *bridgeNetwork) restorePortAllocations(ep *bridgeEndpoint) {
-	if ep.extConnConfig == nil ||
-		ep.extConnConfig.ExposedPorts == nil ||
-		ep.extConnConfig.PortBindings == nil {
-		return
-	}
-	tmp := ep.extConnConfig.PortBindings
-	ep.extConnConfig.PortBindings = ep.portMapping
-	_, err := n.allocatePorts(ep, n.config.DefaultBindingIntf, n.config.DefaultBindingIP, n.driver.config.EnableUserlandProxy)
-	if err != nil {
-		logrus.Warnf("Failed to reserve existing port mapping for endpoint %s:%v", ep.id[0:7], err)
-	}
-	ep.extConnConfig.PortBindings = tmp
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/errors.go b/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/errors.go
deleted file mode 100644
index ceb2388..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/errors.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package bridge
-
-import "fmt"
-
-// ErrInvalidEndpointConfig error is returned when an endpoint create is attempted with an invalid endpoint configuration.
-type ErrInvalidEndpointConfig struct{}
-
-func (eiec *ErrInvalidEndpointConfig) Error() string {
-	return "trying to create an endpoint with an invalid endpoint configuration"
-}
-
-// BadRequest denotes the type of this error
-func (eiec *ErrInvalidEndpointConfig) BadRequest() {}
-
-// ErrNoIPAddr error is returned when bridge has no IPv4 address configured.
-type ErrNoIPAddr struct{}
-
-func (enip *ErrNoIPAddr) Error() string {
-	return "bridge has no IPv4 address configured"
-}
-
-// InternalError denotes the type of this error
-func (enip *ErrNoIPAddr) InternalError() {}
-
-// ErrInvalidGateway is returned when the user provided default gateway (v4/v6) is not not valid.
-type ErrInvalidGateway struct{}
-
-func (eig *ErrInvalidGateway) Error() string {
-	return "default gateway ip must be part of the network"
-}
-
-// BadRequest denotes the type of this error
-func (eig *ErrInvalidGateway) BadRequest() {}
-
-// ErrInvalidMtu is returned when the user provided MTU is not valid.
-type ErrInvalidMtu int
-
-func (eim ErrInvalidMtu) Error() string {
-	return fmt.Sprintf("invalid MTU number: %d", int(eim))
-}
-
-// BadRequest denotes the type of this error
-func (eim ErrInvalidMtu) BadRequest() {}
-
-// ErrUnsupportedAddressType is returned when the specified address type is not supported.
-type ErrUnsupportedAddressType string
-
-func (uat ErrUnsupportedAddressType) Error() string {
-	return fmt.Sprintf("unsupported address type: %s", string(uat))
-}
-
-// BadRequest denotes the type of this error
-func (uat ErrUnsupportedAddressType) BadRequest() {}
-
-// ActiveEndpointsError is returned when there are
-// still active endpoints in the network being deleted.
-type ActiveEndpointsError string
-
-func (aee ActiveEndpointsError) Error() string {
-	return fmt.Sprintf("network %s has active endpoint", string(aee))
-}
-
-// Forbidden denotes the type of this error
-func (aee ActiveEndpointsError) Forbidden() {}
-
-// InvalidNetworkIDError is returned when the passed
-// network id for an existing network is not a known id.
-type InvalidNetworkIDError string
-
-func (inie InvalidNetworkIDError) Error() string {
-	return fmt.Sprintf("invalid network id %s", string(inie))
-}
-
-// NotFound denotes the type of this error
-func (inie InvalidNetworkIDError) NotFound() {}
-
-// InvalidEndpointIDError is returned when the passed
-// endpoint id is not valid.
-type InvalidEndpointIDError string
-
-func (ieie InvalidEndpointIDError) Error() string {
-	return fmt.Sprintf("invalid endpoint id: %s", string(ieie))
-}
-
-// BadRequest denotes the type of this error
-func (ieie InvalidEndpointIDError) BadRequest() {}
-
-// EndpointNotFoundError is returned when the no endpoint
-// with the passed endpoint id is found.
-type EndpointNotFoundError string
-
-func (enfe EndpointNotFoundError) Error() string {
-	return fmt.Sprintf("endpoint not found: %s", string(enfe))
-}
-
-// NotFound denotes the type of this error
-func (enfe EndpointNotFoundError) NotFound() {}
-
-// NonDefaultBridgeExistError is returned when a non-default
-// bridge config is passed but it does not already exist.
-type NonDefaultBridgeExistError string
-
-func (ndbee NonDefaultBridgeExistError) Error() string {
-	return fmt.Sprintf("bridge device with non default name %s must be created manually", string(ndbee))
-}
-
-// Forbidden denotes the type of this error
-func (ndbee NonDefaultBridgeExistError) Forbidden() {}
-
-// NonDefaultBridgeNeedsIPError is returned when a non-default
-// bridge config is passed but it has no ip configured
-type NonDefaultBridgeNeedsIPError string
-
-func (ndbee NonDefaultBridgeNeedsIPError) Error() string {
-	return fmt.Sprintf("bridge device with non default name %s must have a valid IP address", string(ndbee))
-}
-
-// Forbidden denotes the type of this error
-func (ndbee NonDefaultBridgeNeedsIPError) Forbidden() {}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/port_mapping.go b/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/port_mapping.go
deleted file mode 100644
index 3b67db3..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/bridge/port_mapping.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// +build solaris
-
-package bridge
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"net"
-	"os"
-	"os/exec"
-
-	"github.com/docker/libnetwork/types"
-	"github.com/sirupsen/logrus"
-)
-
-var (
-	defaultBindingIP = net.IPv4(0, 0, 0, 0)
-)
-
-const (
-	maxAllocatePortAttempts = 10
-)
-
-func addPFRules(epid, bindIntf string, bs []types.PortBinding) {
-	var id string
-
-	if len(epid) > 12 {
-		id = epid[:12]
-	} else {
-		id = epid
-	}
-
-	fname := "/var/lib/docker/network/files/pf." + id
-
-	f, err := os.OpenFile(fname,
-		os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
-	if err != nil {
-		logrus.Warn("cannot open temp pf file")
-		return
-	}
-	for _, b := range bs {
-		r := fmt.Sprintf(
-			"pass in on %s proto %s from any to (%s) "+
-				"port %d rdr-to %s port %d\n", bindIntf,
-			b.Proto.String(), bindIntf, b.HostPort,
-			b.IP.String(), b.Port)
-		_, err = f.WriteString(r)
-		if err != nil {
-			logrus.Warnf("cannot write firewall rules to %s: %v", fname, err)
-		}
-	}
-	f.Close()
-
-	anchor := fmt.Sprintf("_auto/docker/ep%s", id)
-	err = exec.Command("/usr/sbin/pfctl", "-a", anchor, "-f", fname).Run()
-	if err != nil {
-		logrus.Warnf("failed to add firewall rules: %v", err)
-	}
-	os.Remove(fname)
-}
-
-func removePFRules(epid string) {
-	var id string
-
-	if len(epid) > 12 {
-		id = epid[:12]
-	} else {
-		id = epid
-	}
-
-	anchor := fmt.Sprintf("_auto/docker/ep%s", id)
-	err := exec.Command("/usr/sbin/pfctl", "-a", anchor, "-F", "all").Run()
-	if err != nil {
-		logrus.Warnf("failed to remove firewall rules: %v", err)
-	}
-}
-
-func (n *bridgeNetwork) allocatePorts(ep *bridgeEndpoint, bindIntf string, reqDefBindIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
-	if ep.extConnConfig == nil || ep.extConnConfig.PortBindings == nil {
-		return nil, nil
-	}
-
-	defHostIP := defaultBindingIP
-	if reqDefBindIP != nil {
-		defHostIP = reqDefBindIP
-	}
-
-	bs, err := n.allocatePortsInternal(ep.extConnConfig.PortBindings, bindIntf, ep.addr.IP, defHostIP, ulPxyEnabled)
-	if err != nil {
-		return nil, err
-	}
-
-	// Add PF rules for port bindings, if any
-	if len(bs) > 0 {
-		addPFRules(ep.id, bindIntf, bs)
-	}
-
-	return bs, err
-}
-
-func (n *bridgeNetwork) allocatePortsInternal(bindings []types.PortBinding, bindIntf string, containerIP, defHostIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) {
-	bs := make([]types.PortBinding, 0, len(bindings))
-	for _, c := range bindings {
-		b := c.GetCopy()
-		if err := n.allocatePort(&b, containerIP, defHostIP); err != nil {
-			// On allocation failure,release previously
-			// allocated ports. On cleanup error, just log
-			// a warning message
-			if cuErr := n.releasePortsInternal(bs); cuErr != nil {
-				logrus.Warnf("Upon allocation failure "+
-					"for %v, failed to clear previously "+
-					"allocated port bindings: %v", b, cuErr)
-			}
-			return nil, err
-		}
-		bs = append(bs, b)
-	}
-	return bs, nil
-}
-
-func (n *bridgeNetwork) allocatePort(bnd *types.PortBinding, containerIP, defHostIP net.IP) error {
-	var (
-		host net.Addr
-		err  error
-	)
-
-	// Store the container interface address in the operational binding
-	bnd.IP = containerIP
-
-	// Adjust the host address in the operational binding
-	if len(bnd.HostIP) == 0 {
-		bnd.HostIP = defHostIP
-	}
-
-	// Adjust HostPortEnd if this is not a range.
-	if bnd.HostPortEnd == 0 {
-		bnd.HostPortEnd = bnd.HostPort
-	}
-
-	// Construct the container side transport address
-	container, err := bnd.ContainerAddr()
-	if err != nil {
-		return err
-	}
-
-	// Try up to maxAllocatePortAttempts times to get a port that's
-	// not already allocated.
-	for i := 0; i < maxAllocatePortAttempts; i++ {
-		if host, err = n.portMapper.MapRange(container, bnd.HostIP,
-			int(bnd.HostPort), int(bnd.HostPortEnd), false); err == nil {
-			break
-		}
-		// There is no point in immediately retrying to map an
-		// explicitly chosen port.
-		if bnd.HostPort != 0 {
-			logrus.Warnf(
-				"Failed to allocate and map port %d-%d: %s",
-				bnd.HostPort, bnd.HostPortEnd, err)
-			break
-		}
-		logrus.Warnf("Failed to allocate and map port: %s, retry: %d",
-			err, i+1)
-	}
-	if err != nil {
-		return err
-	}
-
-	// Save the host port (regardless it was or not specified in the
-	// binding)
-	switch netAddr := host.(type) {
-	case *net.TCPAddr:
-		bnd.HostPort = uint16(host.(*net.TCPAddr).Port)
-		return nil
-	case *net.UDPAddr:
-		bnd.HostPort = uint16(host.(*net.UDPAddr).Port)
-		return nil
-	default:
-		// For completeness
-		return ErrUnsupportedAddressType(fmt.Sprintf("%T", netAddr))
-	}
-}
-
-func (n *bridgeNetwork) releasePorts(ep *bridgeEndpoint) error {
-	err := n.releasePortsInternal(ep.portMapping)
-	if err != nil {
-		return nil
-	}
-
-	// remove rules if there are any port mappings
-	if len(ep.portMapping) > 0 {
-		removePFRules(ep.id)
-	}
-
-	return nil
-
-}
-
-func (n *bridgeNetwork) releasePortsInternal(bindings []types.PortBinding) error {
-	var errorBuf bytes.Buffer
-
-	// Attempt to release all port bindings, do not stop on failure
-	for _, m := range bindings {
-		if err := n.releasePort(m); err != nil {
-			errorBuf.WriteString(
-				fmt.Sprintf(
-					"\ncould not release %v because of %v",
-					m, err))
-		}
-	}
-
-	if errorBuf.Len() != 0 {
-		return errors.New(errorBuf.String())
-	}
-	return nil
-}
-
-func (n *bridgeNetwork) releasePort(bnd types.PortBinding) error {
-	// Construct the host side transport address
-	host, err := bnd.HostAddr()
-	if err != nil {
-		return err
-	}
-	return n.portMapper.Unmap(host)
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/encryption.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/encryption.go
deleted file mode 100644
index 0af3474..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/encryption.go
+++ /dev/null
@@ -1,274 +0,0 @@
-package overlay
-
-import (
-	"bytes"
-	"encoding/binary"
-	"encoding/hex"
-	"fmt"
-	"hash/fnv"
-	"net"
-	"sync"
-
-	"github.com/docker/libnetwork/types"
-	"github.com/sirupsen/logrus"
-)
-
-const (
-	mark         = uint32(0xD0C4E3)
-	timeout      = 30
-	pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8)
-)
-
-const (
-	forward = iota + 1
-	reverse
-	bidir
-)
-
-type key struct {
-	value []byte
-	tag   uint32
-}
-
-func (k *key) String() string {
-	if k != nil {
-		return fmt.Sprintf("(key: %s, tag: 0x%x)", hex.EncodeToString(k.value)[0:5], k.tag)
-	}
-	return ""
-}
-
-type spi struct {
-	forward int
-	reverse int
-}
-
-func (s *spi) String() string {
-	return fmt.Sprintf("SPI(FWD: 0x%x, REV: 0x%x)", uint32(s.forward), uint32(s.reverse))
-}
-
-type encrMap struct {
-	nodes map[string][]*spi
-	sync.Mutex
-}
-
-func (e *encrMap) String() string {
-	e.Lock()
-	defer e.Unlock()
-	b := new(bytes.Buffer)
-	for k, v := range e.nodes {
-		b.WriteString("\n")
-		b.WriteString(k)
-		b.WriteString(":")
-		b.WriteString("[")
-		for _, s := range v {
-			b.WriteString(s.String())
-			b.WriteString(",")
-		}
-		b.WriteString("]")
-
-	}
-	return b.String()
-}
-
-func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
-	logrus.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
-
-	n := d.network(nid)
-	if n == nil || !n.secure {
-		return nil
-	}
-
-	if len(d.keys) == 0 {
-		return types.ForbiddenErrorf("encryption key is not present")
-	}
-
-	lIP := net.ParseIP(d.bindAddress)
-	aIP := net.ParseIP(d.advertiseAddress)
-	nodes := map[string]net.IP{}
-
-	switch {
-	case isLocal:
-		if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
-			if !aIP.Equal(pEntry.vtep) {
-				nodes[pEntry.vtep.String()] = pEntry.vtep
-			}
-			return false
-		}); err != nil {
-			logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err)
-		}
-	default:
-		if len(d.network(nid).endpoints) > 0 {
-			nodes[rIP.String()] = rIP
-		}
-	}
-
-	logrus.Debugf("List of nodes: %s", nodes)
-
-	if add {
-		for _, rIP := range nodes {
-			if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil {
-				logrus.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err)
-			}
-		}
-	} else {
-		if len(nodes) == 0 {
-			if err := removeEncryption(lIP, rIP, d.secMap); err != nil {
-				logrus.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err)
-			}
-		}
-	}
-
-	return nil
-}
-
-func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
-	logrus.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
-	rIPs := remoteIP.String()
-
-	indices := make([]*spi, 0, len(keys))
-
-	err := programMangle(vni, true)
-	if err != nil {
-		logrus.Warn(err)
-	}
-
-	em.Lock()
-	em.nodes[rIPs] = indices
-	em.Unlock()
-
-	return nil
-}
-
-func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error {
-	return nil
-}
-
-func programMangle(vni uint32, add bool) (err error) {
-	return
-}
-
-func buildSPI(src, dst net.IP, st uint32) int {
-	b := make([]byte, 4)
-	binary.BigEndian.PutUint32(b, st)
-	h := fnv.New32a()
-	h.Write(src)
-	h.Write(b)
-	h.Write(dst)
-	return int(binary.BigEndian.Uint32(h.Sum(nil)))
-}
-
-func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error {
-	d.secMap.Lock()
-	for node, indices := range d.secMap.nodes {
-		idxs, stop := f(node, indices)
-		if idxs != nil {
-			d.secMap.nodes[node] = idxs
-		}
-		if stop {
-			break
-		}
-	}
-	d.secMap.Unlock()
-	return nil
-}
-
-func (d *driver) setKeys(keys []*key) error {
-	if d.keys != nil {
-		return types.ForbiddenErrorf("initial keys are already present")
-	}
-	d.keys = keys
-	logrus.Debugf("Initial encryption keys: %v", d.keys)
-	return nil
-}
-
-// updateKeys allows to add a new key and/or change the primary key and/or prune an existing key
-// The primary key is the key used in transmission and will go in first position in the list.
-func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
-	logrus.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
-
-	logrus.Debugf("Current: %v", d.keys)
-
-	var (
-		newIdx = -1
-		priIdx = -1
-		delIdx = -1
-		lIP    = net.ParseIP(d.bindAddress)
-	)
-
-	d.Lock()
-	// add new
-	if newKey != nil {
-		d.keys = append(d.keys, newKey)
-		newIdx += len(d.keys)
-	}
-	for i, k := range d.keys {
-		if primary != nil && k.tag == primary.tag {
-			priIdx = i
-		}
-		if pruneKey != nil && k.tag == pruneKey.tag {
-			delIdx = i
-		}
-	}
-	d.Unlock()
-
-	if (newKey != nil && newIdx == -1) ||
-		(primary != nil && priIdx == -1) ||
-		(pruneKey != nil && delIdx == -1) {
-		err := types.BadRequestErrorf("cannot find proper key indices while processing key update:"+
-			"(newIdx,priIdx,delIdx):(%d, %d, %d)", newIdx, priIdx, delIdx)
-		logrus.Warn(err)
-		return err
-	}
-
-	d.secMapWalk(func(rIPs string, spis []*spi) ([]*spi, bool) {
-		rIP := net.ParseIP(rIPs)
-		return updateNodeKey(lIP, rIP, spis, d.keys, newIdx, priIdx, delIdx), false
-	})
-
-	d.Lock()
-	// swap primary
-	if priIdx != -1 {
-		swp := d.keys[0]
-		d.keys[0] = d.keys[priIdx]
-		d.keys[priIdx] = swp
-	}
-	// prune
-	if delIdx != -1 {
-		if delIdx == 0 {
-			delIdx = priIdx
-		}
-		d.keys = append(d.keys[:delIdx], d.keys[delIdx+1:]...)
-	}
-	d.Unlock()
-
-	logrus.Debugf("Updated: %v", d.keys)
-
-	return nil
-}
-
-/********************************************************
- * Steady state: rSA0, rSA1, rSA2, fSA1, fSP1
- * Rotation --> -rSA0, +rSA3, +fSA2, +fSP2/-fSP1, -fSA1
- * Steady state: rSA1, rSA2, rSA3, fSA2, fSP2
- *********************************************************/
-
-// Spis and keys are sorted in such away the one in position 0 is the primary
-func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi {
-	logrus.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
-	return nil
-}
-
-func (n *network) maxMTU() int {
-	mtu := 1500
-	if n.mtu != 0 {
-		mtu = n.mtu
-	}
-	mtu -= vxlanEncap
-	if n.secure {
-		// In case of encryption account for the
-		// esp packet espansion and padding
-		mtu -= pktExpansion
-		mtu -= (mtu % 4)
-	}
-	return mtu
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/joinleave.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/joinleave.go
deleted file mode 100644
index ff03f3c..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/joinleave.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package overlay
-
-import (
-	"fmt"
-	"net"
-
-	"github.com/docker/libnetwork/driverapi"
-	"github.com/docker/libnetwork/types"
-	"github.com/gogo/protobuf/proto"
-	"github.com/sirupsen/logrus"
-)
-
-// Join method is invoked when a Sandbox is attached to an endpoint.
-func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error {
-	if err := validateID(nid, eid); err != nil {
-		return err
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return fmt.Errorf("could not find network with id %s", nid)
-	}
-
-	ep := n.endpoint(eid)
-	if ep == nil {
-		return fmt.Errorf("could not find endpoint with id %s", eid)
-	}
-
-	if n.secure && len(d.keys) == 0 {
-		return fmt.Errorf("cannot join secure network: encryption keys not present")
-	}
-
-	s := n.getSubnetforIP(ep.addr)
-	if s == nil {
-		return fmt.Errorf("could not find subnet for endpoint %s", eid)
-	}
-
-	if err := n.obtainVxlanID(s); err != nil {
-		return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err)
-	}
-
-	if err := n.joinSandbox(false); err != nil {
-		return fmt.Errorf("network sandbox join failed: %v", err)
-	}
-
-	if err := n.joinSubnetSandbox(s, false); err != nil {
-		return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
-	}
-
-	// joinSubnetSandbox gets called when an endpoint comes up on a new subnet in the
-	// overlay network. Hence the Endpoint count should be updated outside joinSubnetSandbox
-	n.incEndpointCount()
-
-	// Add creating a veth Pair for Solaris
-
-	containerIfName := "solaris-if"
-	ep.ifName = containerIfName
-
-	if err := d.writeEndpointToStore(ep); err != nil {
-		return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
-	}
-
-	// Add solaris plumbing to add veth (with ep mac addr) to sandbox
-
-	for _, sub := range n.subnets {
-		if sub == s {
-			continue
-		}
-		if err := jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
-			logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
-		}
-	}
-
-	if iNames := jinfo.InterfaceName(); iNames != nil {
-		err := iNames.SetNames(containerIfName, "eth")
-		if err != nil {
-			return err
-		}
-	}
-
-	d.peerDbAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac,
-		net.ParseIP(d.advertiseAddress), true)
-
-	if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
-		logrus.Warn(err)
-	}
-
-	buf, err := proto.Marshal(&PeerRecord{
-		EndpointIP:       ep.addr.String(),
-		EndpointMAC:      ep.mac.String(),
-		TunnelEndpointIP: d.advertiseAddress,
-	})
-	if err != nil {
-		return err
-	}
-
-	if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
-		logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
-	}
-
-	d.pushLocalEndpointEvent("join", nid, eid)
-
-	return nil
-}
-
-func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) {
-	if tableName != ovPeerTable {
-		logrus.Errorf("Unexpected table notification for table %s received", tableName)
-		return
-	}
-
-	eid := key
-
-	var peer PeerRecord
-	if err := proto.Unmarshal(value, &peer); err != nil {
-		logrus.Errorf("Failed to unmarshal peer record: %v", err)
-		return
-	}
-
-	// Ignore local peers. We already know about them and they
-	// should not be added to vxlan fdb.
-	if peer.TunnelEndpointIP == d.advertiseAddress {
-		return
-	}
-
-	addr, err := types.ParseCIDR(peer.EndpointIP)
-	if err != nil {
-		logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP)
-		return
-	}
-
-	mac, err := net.ParseMAC(peer.EndpointMAC)
-	if err != nil {
-		logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC)
-		return
-	}
-
-	vtep := net.ParseIP(peer.TunnelEndpointIP)
-	if vtep == nil {
-		logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP)
-		return
-	}
-
-	if etype == driverapi.Delete {
-		d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
-		return
-	}
-
-	d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true)
-}
-
-func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) {
-	return "", nil
-}
-
-// Leave method is invoked when a Sandbox detaches from an endpoint.
-func (d *driver) Leave(nid, eid string) error {
-	if err := validateID(nid, eid); err != nil {
-		return err
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return fmt.Errorf("could not find network with id %s", nid)
-	}
-
-	ep := n.endpoint(eid)
-
-	if ep == nil {
-		return types.InternalMaskableErrorf("could not find endpoint with id %s", eid)
-	}
-
-	if d.notifyCh != nil {
-		d.notifyCh <- ovNotify{
-			action: "leave",
-			nw:     n,
-			ep:     ep,
-		}
-	}
-
-	n.leaveSandbox()
-
-	if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
-		logrus.Warn(err)
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
deleted file mode 100644
index 9df7a58..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package overlay
-
-import (
-	"encoding/json"
-	"fmt"
-	"net"
-
-	"github.com/docker/libnetwork/datastore"
-	"github.com/docker/libnetwork/driverapi"
-	"github.com/docker/libnetwork/netutils"
-	"github.com/docker/libnetwork/types"
-	"github.com/sirupsen/logrus"
-)
-
-type endpointTable map[string]*endpoint
-
-const overlayEndpointPrefix = "overlay/endpoint"
-
-type endpoint struct {
-	id       string
-	nid      string
-	ifName   string
-	mac      net.HardwareAddr
-	addr     *net.IPNet
-	dbExists bool
-	dbIndex  uint64
-}
-
-func (n *network) endpoint(eid string) *endpoint {
-	n.Lock()
-	defer n.Unlock()
-
-	return n.endpoints[eid]
-}
-
-func (n *network) addEndpoint(ep *endpoint) {
-	n.Lock()
-	n.endpoints[ep.id] = ep
-	n.Unlock()
-}
-
-func (n *network) deleteEndpoint(eid string) {
-	n.Lock()
-	delete(n.endpoints, eid)
-	n.Unlock()
-}
-
-func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
-	epOptions map[string]interface{}) error {
-	var err error
-
-	if err = validateID(nid, eid); err != nil {
-		return err
-	}
-
-	// Since we perform lazy configuration make sure we try
-	// configuring the driver when we enter CreateEndpoint since
-	// CreateNetwork may not be called in every node.
-	if err := d.configure(); err != nil {
-		return err
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return fmt.Errorf("network id %q not found", nid)
-	}
-
-	ep := &endpoint{
-		id:   eid,
-		nid:  n.id,
-		addr: ifInfo.Address(),
-		mac:  ifInfo.MacAddress(),
-	}
-	if ep.addr == nil {
-		return fmt.Errorf("create endpoint was not passed interface IP address")
-	}
-
-	if s := n.getSubnetforIP(ep.addr); s == nil {
-		return fmt.Errorf("no matching subnet for IP %q in network %q", ep.addr, nid)
-	}
-
-	if ep.mac == nil {
-		ep.mac = netutils.GenerateMACFromIP(ep.addr.IP)
-		if err := ifInfo.SetMacAddress(ep.mac); err != nil {
-			return err
-		}
-	}
-
-	n.addEndpoint(ep)
-
-	if err := d.writeEndpointToStore(ep); err != nil {
-		return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err)
-	}
-
-	return nil
-}
-
-func (d *driver) DeleteEndpoint(nid, eid string) error {
-	if err := validateID(nid, eid); err != nil {
-		return err
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return fmt.Errorf("network id %q not found", nid)
-	}
-
-	ep := n.endpoint(eid)
-	if ep == nil {
-		return fmt.Errorf("endpoint id %q not found", eid)
-	}
-
-	n.deleteEndpoint(eid)
-
-	if err := d.deleteEndpointFromStore(ep); err != nil {
-		logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
-	}
-
-	if ep.ifName == "" {
-		return nil
-	}
-
-	// OVERLAY_SOLARIS: Add Solaris unplumbing for removing the interface endpoint
-
-	return nil
-}
-
-func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) {
-	return make(map[string]interface{}, 0), nil
-}
-
-func (d *driver) deleteEndpointFromStore(e *endpoint) error {
-	if d.localStore == nil {
-		return fmt.Errorf("overlay local store not initialized, ep not deleted")
-	}
-
-	return d.localStore.DeleteObjectAtomic(e)
-}
-
-func (d *driver) writeEndpointToStore(e *endpoint) error {
-	if d.localStore == nil {
-		return fmt.Errorf("overlay local store not initialized, ep not added")
-	}
-
-	return d.localStore.PutObjectAtomic(e)
-}
-
-func (ep *endpoint) DataScope() string {
-	return datastore.LocalScope
-}
-
-func (ep *endpoint) New() datastore.KVObject {
-	return &endpoint{}
-}
-
-func (ep *endpoint) CopyTo(o datastore.KVObject) error {
-	dstep := o.(*endpoint)
-	*dstep = *ep
-	return nil
-}
-
-func (ep *endpoint) Key() []string {
-	return []string{overlayEndpointPrefix, ep.id}
-}
-
-func (ep *endpoint) KeyPrefix() []string {
-	return []string{overlayEndpointPrefix}
-}
-
-func (ep *endpoint) Index() uint64 {
-	return ep.dbIndex
-}
-
-func (ep *endpoint) SetIndex(index uint64) {
-	ep.dbIndex = index
-	ep.dbExists = true
-}
-
-func (ep *endpoint) Exists() bool {
-	return ep.dbExists
-}
-
-func (ep *endpoint) Skip() bool {
-	return false
-}
-
-func (ep *endpoint) Value() []byte {
-	b, err := json.Marshal(ep)
-	if err != nil {
-		return nil
-	}
-	return b
-}
-
-func (ep *endpoint) SetValue(value []byte) error {
-	return json.Unmarshal(value, ep)
-}
-
-func (ep *endpoint) MarshalJSON() ([]byte, error) {
-	epMap := make(map[string]interface{})
-
-	epMap["id"] = ep.id
-	epMap["nid"] = ep.nid
-	if ep.ifName != "" {
-		epMap["ifName"] = ep.ifName
-	}
-	if ep.addr != nil {
-		epMap["addr"] = ep.addr.String()
-	}
-	if len(ep.mac) != 0 {
-		epMap["mac"] = ep.mac.String()
-	}
-
-	return json.Marshal(epMap)
-}
-
-func (ep *endpoint) UnmarshalJSON(value []byte) error {
-	var (
-		err   error
-		epMap map[string]interface{}
-	)
-
-	json.Unmarshal(value, &epMap)
-
-	ep.id = epMap["id"].(string)
-	ep.nid = epMap["nid"].(string)
-	if v, ok := epMap["mac"]; ok {
-		if ep.mac, err = net.ParseMAC(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string))
-		}
-	}
-	if v, ok := epMap["addr"]; ok {
-		if ep.addr, err = types.ParseCIDR(v.(string)); err != nil {
-			return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err)
-		}
-	}
-	if v, ok := epMap["ifName"]; ok {
-		ep.ifName = v.(string)
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
deleted file mode 100644
index 039dbd5..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
+++ /dev/null
@@ -1,786 +0,0 @@
-package overlay
-
-import (
-	"encoding/json"
-	"fmt"
-	"net"
-	"os"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"sync"
-
-	"github.com/docker/libnetwork/datastore"
-	"github.com/docker/libnetwork/driverapi"
-	"github.com/docker/libnetwork/netlabel"
-	"github.com/docker/libnetwork/netutils"
-	"github.com/docker/libnetwork/osl"
-	"github.com/docker/libnetwork/resolvconf"
-	"github.com/docker/libnetwork/types"
-	"github.com/sirupsen/logrus"
-)
-
-var (
-	hostMode    bool
-	networkOnce sync.Once
-	networkMu   sync.Mutex
-	vniTbl      = make(map[uint32]string)
-)
-
-type networkTable map[string]*network
-
-type subnet struct {
-	once      *sync.Once
-	vxlanName string
-	brName    string
-	vni       uint32
-	initErr   error
-	subnetIP  *net.IPNet
-	gwIP      *net.IPNet
-}
-
-type subnetJSON struct {
-	SubnetIP string
-	GwIP     string
-	Vni      uint32
-}
-
-type network struct {
-	id        string
-	dbIndex   uint64
-	dbExists  bool
-	sbox      osl.Sandbox
-	endpoints endpointTable
-	driver    *driver
-	joinCnt   int
-	once      *sync.Once
-	initEpoch int
-	initErr   error
-	subnets   []*subnet
-	secure    bool
-	mtu       int
-	sync.Mutex
-}
-
-func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) {
-	return nil, types.NotImplementedErrorf("not implemented")
-}
-
-func (d *driver) NetworkFree(id string) error {
-	return types.NotImplementedErrorf("not implemented")
-}
-
-func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error {
-	if id == "" {
-		return fmt.Errorf("invalid network id")
-	}
-	if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" {
-		return types.BadRequestErrorf("ipv4 pool is empty")
-	}
-
-	// Since we perform lazy configuration make sure we try
-	// configuring the driver when we enter CreateNetwork
-	if err := d.configure(); err != nil {
-		return err
-	}
-
-	n := &network{
-		id:        id,
-		driver:    d,
-		endpoints: endpointTable{},
-		once:      &sync.Once{},
-		subnets:   []*subnet{},
-	}
-
-	vnis := make([]uint32, 0, len(ipV4Data))
-	if gval, ok := option[netlabel.GenericData]; ok {
-		optMap := gval.(map[string]string)
-		if val, ok := optMap[netlabel.OverlayVxlanIDList]; ok {
-			logrus.Debugf("overlay: Received vxlan IDs: %s", val)
-			vniStrings := strings.Split(val, ",")
-			for _, vniStr := range vniStrings {
-				vni, err := strconv.Atoi(vniStr)
-				if err != nil {
-					return fmt.Errorf("invalid vxlan id value %q passed", vniStr)
-				}
-
-				vnis = append(vnis, uint32(vni))
-			}
-		}
-		if _, ok := optMap[secureOption]; ok {
-			n.secure = true
-		}
-		if val, ok := optMap[netlabel.DriverMTU]; ok {
-			var err error
-			if n.mtu, err = strconv.Atoi(val); err != nil {
-				return fmt.Errorf("failed to parse %v: %v", val, err)
-			}
-			if n.mtu < 0 {
-				return fmt.Errorf("invalid MTU value: %v", n.mtu)
-			}
-		}
-	}
-
-	// If we are getting vnis from libnetwork, either we get for
-	// all subnets or none.
-	if len(vnis) != 0 && len(vnis) < len(ipV4Data) {
-		return fmt.Errorf("insufficient vnis(%d) passed to overlay", len(vnis))
-	}
-
-	for i, ipd := range ipV4Data {
-		s := &subnet{
-			subnetIP: ipd.Pool,
-			gwIP:     ipd.Gateway,
-			once:     &sync.Once{},
-		}
-
-		if len(vnis) != 0 {
-			s.vni = vnis[i]
-		}
-
-		n.subnets = append(n.subnets, s)
-	}
-
-	if err := n.writeToStore(); err != nil {
-		return fmt.Errorf("failed to update data store for network %v: %v", n.id, err)
-	}
-
-	// Make sure no rule is on the way from any stale secure network
-	if !n.secure {
-		for _, vni := range vnis {
-			programMangle(vni, false)
-		}
-	}
-
-	if nInfo != nil {
-		if err := nInfo.TableEventRegister(ovPeerTable, driverapi.EndpointObject); err != nil {
-			return err
-		}
-	}
-
-	d.addNetwork(n)
-	return nil
-}
-
-func (d *driver) DeleteNetwork(nid string) error {
-	if nid == "" {
-		return fmt.Errorf("invalid network id")
-	}
-
-	// Make sure driver resources are initialized before proceeding
-	if err := d.configure(); err != nil {
-		return err
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return fmt.Errorf("could not find network with id %s", nid)
-	}
-
-	d.deleteNetwork(nid)
-
-	vnis, err := n.releaseVxlanID()
-	if err != nil {
-		return err
-	}
-
-	if n.secure {
-		for _, vni := range vnis {
-			programMangle(vni, false)
-		}
-	}
-
-	return nil
-}
-
-func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error {
-	return nil
-}
-
-func (d *driver) RevokeExternalConnectivity(nid, eid string) error {
-	return nil
-}
-
-func (n *network) incEndpointCount() {
-	n.Lock()
-	defer n.Unlock()
-	n.joinCnt++
-}
-
-func (n *network) joinSandbox(restore bool) error {
-	// If there is a race between two go routines here only one will win
-	// the other will wait.
-	n.once.Do(func() {
-		// save the error status of initSandbox in n.initErr so that
-		// all the racing go routines are able to know the status.
-		n.initErr = n.initSandbox(restore)
-	})
-
-	return n.initErr
-}
-
-func (n *network) joinSubnetSandbox(s *subnet, restore bool) error {
-	s.once.Do(func() {
-		s.initErr = n.initSubnetSandbox(s, restore)
-	})
-	return s.initErr
-}
-
-func (n *network) leaveSandbox() {
-	n.Lock()
-	defer n.Unlock()
-	n.joinCnt--
-	if n.joinCnt != 0 {
-		return
-	}
-
-	// We are about to destroy sandbox since the container is leaving the network
-	// Reinitialize the once variable so that we will be able to trigger one time
-	// sandbox initialization(again) when another container joins subsequently.
-	n.once = &sync.Once{}
-	for _, s := range n.subnets {
-		s.once = &sync.Once{}
-	}
-
-	n.destroySandbox()
-}
-
-// to be called while holding network lock
-func (n *network) destroySandbox() {
-	if n.sbox != nil {
-		for _, iface := range n.sbox.Info().Interfaces() {
-			if err := iface.Remove(); err != nil {
-				logrus.Debugf("Remove interface %s failed: %v", iface.SrcName(), err)
-			}
-		}
-
-		for _, s := range n.subnets {
-			if s.vxlanName != "" {
-				err := deleteInterface(s.vxlanName)
-				if err != nil {
-					logrus.Warnf("could not cleanup sandbox properly: %v", err)
-				}
-			}
-		}
-
-		n.sbox.Destroy()
-		n.sbox = nil
-	}
-}
-
-func networkOnceInit() {
-	if os.Getenv("_OVERLAY_HOST_MODE") != "" {
-		hostMode = true
-		return
-	}
-
-	err := createVxlan("testvxlan1", 1, 0)
-	if err != nil {
-		logrus.Errorf("Failed to create testvxlan1 interface: %v", err)
-		return
-	}
-
-	defer deleteInterface("testvxlan1")
-}
-
-func (n *network) generateVxlanName(s *subnet) string {
-	id := n.id
-	if len(n.id) > 12 {
-		id = n.id[:12]
-	}
-
-	return "vx_" + id + "_0"
-}
-
-func (n *network) generateBridgeName(s *subnet) string {
-	id := n.id
-	if len(n.id) > 5 {
-		id = n.id[:5]
-	}
-
-	return n.getBridgeNamePrefix(s) + "_" + id + "_0"
-}
-
-func (n *network) getBridgeNamePrefix(s *subnet) string {
-	return "ov_" + fmt.Sprintf("%06x", n.vxlanID(s))
-}
-
-func isOverlap(nw *net.IPNet) bool {
-	var nameservers []string
-
-	if rc, err := resolvconf.Get(); err == nil {
-		nameservers = resolvconf.GetNameserversAsCIDR(rc.Content)
-	}
-
-	if err := netutils.CheckNameserverOverlaps(nameservers, nw); err != nil {
-		return true
-	}
-
-	if err := netutils.CheckRouteOverlaps(nw); err != nil {
-		return true
-	}
-
-	return false
-}
-
-func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) error {
-	sbox := n.sandbox()
-
-	// restore overlay osl sandbox
-	Ifaces := make(map[string][]osl.IfaceOption)
-	brIfaceOption := make([]osl.IfaceOption, 2)
-	brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
-	brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
-	Ifaces[brName+"+br"] = brIfaceOption
-
-	err := sbox.Restore(Ifaces, nil, nil, nil)
-	if err != nil {
-		return err
-	}
-
-	Ifaces = make(map[string][]osl.IfaceOption)
-	vxlanIfaceOption := make([]osl.IfaceOption, 1)
-	vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
-	Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption
-	err = sbox.Restore(Ifaces, nil, nil, nil)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func (n *network) addInterface(srcName, dstPrefix, name string, isBridge bool) error {
-	return nil
-}
-
-func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error {
-
-	if hostMode {
-		// Try to delete stale bridge interface if it exists
-		if err := deleteInterface(brName); err != nil {
-			deleteInterfaceBySubnet(n.getBridgeNamePrefix(s), s)
-		}
-
-		if isOverlap(s.subnetIP) {
-			return fmt.Errorf("overlay subnet %s has conflicts in the host while running in host mode", s.subnetIP.String())
-		}
-	}
-
-	if !hostMode {
-		// Try to find this subnet's vni is being used in some
-		// other namespace by looking at vniTbl that we just
-		// populated in the once init. If a hit is found then
-		// it must a stale namespace from previous
-		// life. Destroy it completely and reclaim resourced.
-		networkMu.Lock()
-		path, ok := vniTbl[n.vxlanID(s)]
-		networkMu.Unlock()
-
-		if ok {
-			os.Remove(path)
-
-			networkMu.Lock()
-			delete(vniTbl, n.vxlanID(s))
-			networkMu.Unlock()
-		}
-	}
-
-	err := createVxlan(vxlanName, n.vxlanID(s), n.maxMTU())
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (n *network) initSubnetSandbox(s *subnet, restore bool) error {
-	brName := n.generateBridgeName(s)
-	vxlanName := n.generateVxlanName(s)
-
-	if restore {
-		n.restoreSubnetSandbox(s, brName, vxlanName)
-	} else {
-		n.setupSubnetSandbox(s, brName, vxlanName)
-	}
-
-	n.Lock()
-	s.vxlanName = vxlanName
-	s.brName = brName
-	n.Unlock()
-
-	return nil
-}
-
-func (n *network) cleanupStaleSandboxes() {
-	filepath.Walk(filepath.Dir(osl.GenerateKey("walk")),
-		func(path string, info os.FileInfo, err error) error {
-			_, fname := filepath.Split(path)
-
-			pList := strings.Split(fname, "-")
-			if len(pList) <= 1 {
-				return nil
-			}
-
-			pattern := pList[1]
-			if strings.Contains(n.id, pattern) {
-				// Now that we have destroyed this
-				// sandbox, remove all references to
-				// it in vniTbl so that we don't
-				// inadvertently destroy the sandbox
-				// created in this life.
-				networkMu.Lock()
-				for vni, tblPath := range vniTbl {
-					if tblPath == path {
-						delete(vniTbl, vni)
-					}
-				}
-				networkMu.Unlock()
-			}
-
-			return nil
-		})
-}
-
-func (n *network) initSandbox(restore bool) error {
-	n.Lock()
-	n.initEpoch++
-	n.Unlock()
-
-	networkOnce.Do(networkOnceInit)
-
-	if !restore {
-		// If there are any stale sandboxes related to this network
-		// from previous daemon life clean it up here
-		n.cleanupStaleSandboxes()
-	}
-
-	// In the restore case network sandbox already exist; but we don't know
-	// what epoch number it was created with. It has to be retrieved by
-	// searching the net namespaces.
-	var key string
-	if restore {
-		key = osl.GenerateKey("-" + n.id)
-	} else {
-		key = osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch) + n.id)
-	}
-
-	sbox, err := osl.NewSandbox(key, !hostMode, restore)
-	if err != nil {
-		return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err)
-	}
-
-	n.setSandbox(sbox)
-
-	if !restore {
-		n.driver.peerDbUpdateSandbox(n.id)
-	}
-
-	return nil
-}
-
-func (d *driver) addNetwork(n *network) {
-	d.Lock()
-	d.networks[n.id] = n
-	d.Unlock()
-}
-
-func (d *driver) deleteNetwork(nid string) {
-	d.Lock()
-	delete(d.networks, nid)
-	d.Unlock()
-}
-
-func (d *driver) network(nid string) *network {
-	d.Lock()
-	networks := d.networks
-	d.Unlock()
-
-	n, ok := networks[nid]
-	if !ok {
-		n = d.getNetworkFromStore(nid)
-		if n != nil {
-			n.driver = d
-			n.endpoints = endpointTable{}
-			n.once = &sync.Once{}
-			networks[nid] = n
-		}
-	}
-
-	return n
-}
-
-func (d *driver) getNetworkFromStore(nid string) *network {
-	if d.store == nil {
-		return nil
-	}
-
-	n := &network{id: nid}
-	if err := d.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
-		return nil
-	}
-
-	return n
-}
-
-func (n *network) sandbox() osl.Sandbox {
-	n.Lock()
-	defer n.Unlock()
-
-	return n.sbox
-}
-
-func (n *network) setSandbox(sbox osl.Sandbox) {
-	n.Lock()
-	n.sbox = sbox
-	n.Unlock()
-}
-
-func (n *network) vxlanID(s *subnet) uint32 {
-	n.Lock()
-	defer n.Unlock()
-
-	return s.vni
-}
-
-func (n *network) setVxlanID(s *subnet, vni uint32) {
-	n.Lock()
-	s.vni = vni
-	n.Unlock()
-}
-
-func (n *network) Key() []string {
-	return []string{"overlay", "network", n.id}
-}
-
-func (n *network) KeyPrefix() []string {
-	return []string{"overlay", "network"}
-}
-
-func (n *network) Value() []byte {
-	m := map[string]interface{}{}
-
-	netJSON := []*subnetJSON{}
-
-	for _, s := range n.subnets {
-		sj := &subnetJSON{
-			SubnetIP: s.subnetIP.String(),
-			GwIP:     s.gwIP.String(),
-			Vni:      s.vni,
-		}
-		netJSON = append(netJSON, sj)
-	}
-
-	m["secure"] = n.secure
-	m["subnets"] = netJSON
-	m["mtu"] = n.mtu
-	b, err := json.Marshal(m)
-	if err != nil {
-		return []byte{}
-	}
-
-	return b
-}
-
-func (n *network) Index() uint64 {
-	return n.dbIndex
-}
-
-func (n *network) SetIndex(index uint64) {
-	n.dbIndex = index
-	n.dbExists = true
-}
-
-func (n *network) Exists() bool {
-	return n.dbExists
-}
-
-func (n *network) Skip() bool {
-	return false
-}
-
-func (n *network) SetValue(value []byte) error {
-	var (
-		m       map[string]interface{}
-		newNet  bool
-		isMap   = true
-		netJSON = []*subnetJSON{}
-	)
-
-	if err := json.Unmarshal(value, &m); err != nil {
-		err := json.Unmarshal(value, &netJSON)
-		if err != nil {
-			return err
-		}
-		isMap = false
-	}
-
-	if len(n.subnets) == 0 {
-		newNet = true
-	}
-
-	if isMap {
-		if val, ok := m["secure"]; ok {
-			n.secure = val.(bool)
-		}
-		if val, ok := m["mtu"]; ok {
-			n.mtu = int(val.(float64))
-		}
-		bytes, err := json.Marshal(m["subnets"])
-		if err != nil {
-			return err
-		}
-		if err := json.Unmarshal(bytes, &netJSON); err != nil {
-			return err
-		}
-	}
-
-	for _, sj := range netJSON {
-		subnetIPstr := sj.SubnetIP
-		gwIPstr := sj.GwIP
-		vni := sj.Vni
-
-		subnetIP, _ := types.ParseCIDR(subnetIPstr)
-		gwIP, _ := types.ParseCIDR(gwIPstr)
-
-		if newNet {
-			s := &subnet{
-				subnetIP: subnetIP,
-				gwIP:     gwIP,
-				vni:      vni,
-				once:     &sync.Once{},
-			}
-			n.subnets = append(n.subnets, s)
-		} else {
-			sNet := n.getMatchingSubnet(subnetIP)
-			if sNet != nil {
-				sNet.vni = vni
-			}
-		}
-	}
-	return nil
-}
-
-func (n *network) DataScope() string {
-	return datastore.GlobalScope
-}
-
-func (n *network) writeToStore() error {
-	if n.driver.store == nil {
-		return nil
-	}
-
-	return n.driver.store.PutObjectAtomic(n)
-}
-
-func (n *network) releaseVxlanID() ([]uint32, error) {
-	if len(n.subnets) == 0 {
-		return nil, nil
-	}
-
-	if n.driver.store != nil {
-		if err := n.driver.store.DeleteObjectAtomic(n); err != nil {
-			if err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound {
-				// In both the above cases we can safely assume that the key has been removed by some other
-				// instance and so simply get out of here
-				return nil, nil
-			}
-
-			return nil, fmt.Errorf("failed to delete network to vxlan id map: %v", err)
-		}
-	}
-	var vnis []uint32
-	for _, s := range n.subnets {
-		if n.driver.vxlanIdm != nil {
-			vni := n.vxlanID(s)
-			vnis = append(vnis, vni)
-			n.driver.vxlanIdm.Release(uint64(vni))
-		}
-
-		n.setVxlanID(s, 0)
-	}
-
-	return vnis, nil
-}
-
-func (n *network) obtainVxlanID(s *subnet) error {
-	//return if the subnet already has a vxlan id assigned
-	if s.vni != 0 {
-		return nil
-	}
-
-	if n.driver.store == nil {
-		return fmt.Errorf("no valid vxlan id and no datastore configured, cannot obtain vxlan id")
-	}
-
-	for {
-		if err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil {
-			return fmt.Errorf("getting network %q from datastore failed %v", n.id, err)
-		}
-
-		if s.vni == 0 {
-			vxlanID, err := n.driver.vxlanIdm.GetID(true)
-			if err != nil {
-				return fmt.Errorf("failed to allocate vxlan id: %v", err)
-			}
-
-			n.setVxlanID(s, uint32(vxlanID))
-			if err := n.writeToStore(); err != nil {
-				n.driver.vxlanIdm.Release(uint64(n.vxlanID(s)))
-				n.setVxlanID(s, 0)
-				if err == datastore.ErrKeyModified {
-					continue
-				}
-				return fmt.Errorf("network %q failed to update data store: %v", n.id, err)
-			}
-			return nil
-		}
-		return nil
-	}
-}
-
-// contains return true if the passed ip belongs to one the network's
-// subnets
-func (n *network) contains(ip net.IP) bool {
-	for _, s := range n.subnets {
-		if s.subnetIP.Contains(ip) {
-			return true
-		}
-	}
-
-	return false
-}
-
-// getSubnetforIP returns the subnet to which the given IP belongs
-func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
-	for _, s := range n.subnets {
-		// first check if the mask lengths are the same
-		i, _ := s.subnetIP.Mask.Size()
-		j, _ := ip.Mask.Size()
-		if i != j {
-			continue
-		}
-		if s.subnetIP.Contains(ip.IP) {
-			return s
-		}
-	}
-	return nil
-}
-
-// getMatchingSubnet return the network's subnet that matches the input
-func (n *network) getMatchingSubnet(ip *net.IPNet) *subnet {
-	if ip == nil {
-		return nil
-	}
-	for _, s := range n.subnets {
-		// first check if the mask lengths are the same
-		i, _ := s.subnetIP.Mask.Size()
-		j, _ := ip.Mask.Size()
-		if i != j {
-			continue
-		}
-		if s.subnetIP.IP.Equal(ip.IP) {
-			return s
-		}
-	}
-	return nil
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_serf.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_serf.go
deleted file mode 100644
index ddc0509..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_serf.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package overlay
-
-import (
-	"fmt"
-	"net"
-	"strings"
-	"time"
-
-	"github.com/hashicorp/serf/serf"
-	"github.com/sirupsen/logrus"
-)
-
-type ovNotify struct {
-	action string
-	ep     *endpoint
-	nw     *network
-}
-
-type logWriter struct{}
-
-func (l *logWriter) Write(p []byte) (int, error) {
-	str := string(p)
-
-	switch {
-	case strings.Contains(str, "[WARN]"):
-		logrus.Warn(str)
-	case strings.Contains(str, "[DEBUG]"):
-		logrus.Debug(str)
-	case strings.Contains(str, "[INFO]"):
-		logrus.Info(str)
-	case strings.Contains(str, "[ERR]"):
-		logrus.Error(str)
-	}
-
-	return len(p), nil
-}
-
-func (d *driver) serfInit() error {
-	var err error
-
-	config := serf.DefaultConfig()
-	config.Init()
-	config.MemberlistConfig.BindAddr = d.advertiseAddress
-
-	d.eventCh = make(chan serf.Event, 4)
-	config.EventCh = d.eventCh
-	config.UserCoalescePeriod = 1 * time.Second
-	config.UserQuiescentPeriod = 50 * time.Millisecond
-
-	config.LogOutput = &logWriter{}
-	config.MemberlistConfig.LogOutput = config.LogOutput
-
-	s, err := serf.Create(config)
-	if err != nil {
-		return fmt.Errorf("failed to create cluster node: %v", err)
-	}
-	defer func() {
-		if err != nil {
-			s.Shutdown()
-		}
-	}()
-
-	d.serfInstance = s
-
-	d.notifyCh = make(chan ovNotify)
-	d.exitCh = make(chan chan struct{})
-
-	go d.startSerfLoop(d.eventCh, d.notifyCh, d.exitCh)
-	return nil
-}
-
-func (d *driver) serfJoin(neighIP string) error {
-	if neighIP == "" {
-		return fmt.Errorf("no neighbor to join")
-	}
-	if _, err := d.serfInstance.Join([]string{neighIP}, false); err != nil {
-		return fmt.Errorf("Failed to join the cluster at neigh IP %s: %v",
-			neighIP, err)
-	}
-	return nil
-}
-
-func (d *driver) notifyEvent(event ovNotify) {
-	ep := event.ep
-
-	ePayload := fmt.Sprintf("%s %s %s %s", event.action, ep.addr.IP.String(),
-		net.IP(ep.addr.Mask).String(), ep.mac.String())
-	eName := fmt.Sprintf("jl %s %s %s", d.serfInstance.LocalMember().Addr.String(),
-		event.nw.id, ep.id)
-
-	if err := d.serfInstance.UserEvent(eName, []byte(ePayload), true); err != nil {
-		logrus.Errorf("Sending user event failed: %v\n", err)
-	}
-}
-
-func (d *driver) processEvent(u serf.UserEvent) {
-	logrus.Debugf("Received user event name:%s, payload:%s\n", u.Name,
-		string(u.Payload))
-
-	var dummy, action, vtepStr, nid, eid, ipStr, maskStr, macStr string
-	if _, err := fmt.Sscan(u.Name, &dummy, &vtepStr, &nid, &eid); err != nil {
-		fmt.Printf("Failed to scan name string: %v\n", err)
-	}
-
-	if _, err := fmt.Sscan(string(u.Payload), &action,
-		&ipStr, &maskStr, &macStr); err != nil {
-		fmt.Printf("Failed to scan value string: %v\n", err)
-	}
-
-	logrus.Debugf("Parsed data = %s/%s/%s/%s/%s/%s\n", nid, eid, vtepStr, ipStr, maskStr, macStr)
-
-	mac, err := net.ParseMAC(macStr)
-	if err != nil {
-		logrus.Errorf("Failed to parse mac: %v\n", err)
-	}
-
-	if d.serfInstance.LocalMember().Addr.String() == vtepStr {
-		return
-	}
-
-	switch action {
-	case "join":
-		if err := d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
-			net.ParseIP(vtepStr), true); err != nil {
-			logrus.Errorf("Peer add failed in the driver: %v\n", err)
-		}
-	case "leave":
-		if err := d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac,
-			net.ParseIP(vtepStr), true); err != nil {
-			logrus.Errorf("Peer delete failed in the driver: %v\n", err)
-		}
-	}
-}
-
-func (d *driver) processQuery(q *serf.Query) {
-	logrus.Debugf("Received query name:%s, payload:%s\n", q.Name,
-		string(q.Payload))
-
-	var nid, ipStr string
-	if _, err := fmt.Sscan(string(q.Payload), &nid, &ipStr); err != nil {
-		fmt.Printf("Failed to scan query payload string: %v\n", err)
-	}
-
-	peerMac, peerIPMask, vtep, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
-	if err != nil {
-		return
-	}
-
-	q.Respond([]byte(fmt.Sprintf("%s %s %s", peerMac.String(), net.IP(peerIPMask).String(), vtep.String())))
-}
-
-func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
-	if d.serfInstance == nil {
-		return nil, nil, nil, fmt.Errorf("could not resolve peer: serf instance not initialized")
-	}
-
-	qPayload := fmt.Sprintf("%s %s", string(nid), peerIP.String())
-	resp, err := d.serfInstance.Query("peerlookup", []byte(qPayload), nil)
-	if err != nil {
-		return nil, nil, nil, fmt.Errorf("resolving peer by querying the cluster failed: %v", err)
-	}
-
-	respCh := resp.ResponseCh()
-	select {
-	case r := <-respCh:
-		var macStr, maskStr, vtepStr string
-		if _, err := fmt.Sscan(string(r.Payload), &macStr, &maskStr, &vtepStr); err != nil {
-			return nil, nil, nil, fmt.Errorf("bad response %q for the resolve query: %v", string(r.Payload), err)
-		}
-
-		mac, err := net.ParseMAC(macStr)
-		if err != nil {
-			return nil, nil, nil, fmt.Errorf("failed to parse mac: %v", err)
-		}
-
-		return mac, net.IPMask(net.ParseIP(maskStr).To4()), net.ParseIP(vtepStr), nil
-
-	case <-time.After(time.Second):
-		return nil, nil, nil, fmt.Errorf("timed out resolving peer by querying the cluster")
-	}
-}
-
-func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify,
-	exitCh chan chan struct{}) {
-
-	for {
-		select {
-		case notify, ok := <-notifyCh:
-			if !ok {
-				break
-			}
-
-			d.notifyEvent(notify)
-		case ch, ok := <-exitCh:
-			if !ok {
-				break
-			}
-
-			if err := d.serfInstance.Leave(); err != nil {
-				logrus.Errorf("failed leaving the cluster: %v\n", err)
-			}
-
-			d.serfInstance.Shutdown()
-			close(ch)
-			return
-		case e, ok := <-eventCh:
-			if !ok {
-				break
-			}
-
-			if e.EventType() == serf.EventQuery {
-				d.processQuery(e.(*serf.Query))
-				break
-			}
-
-			u, ok := e.(serf.UserEvent)
-			if !ok {
-				break
-			}
-			d.processEvent(u)
-		}
-	}
-}
-
-func (d *driver) isSerfAlive() bool {
-	d.Lock()
-	serfInstance := d.serfInstance
-	d.Unlock()
-	if serfInstance == nil || serfInstance.State() != serf.SerfAlive {
-		return false
-	}
-	return true
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_utils.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_utils.go
deleted file mode 100644
index 5e315e7..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_utils.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package overlay
-
-import (
-	"fmt"
-	"os/exec"
-	"strings"
-
-	"github.com/docker/libnetwork/osl"
-)
-
-func validateID(nid, eid string) error {
-	if nid == "" {
-		return fmt.Errorf("invalid network id")
-	}
-
-	if eid == "" {
-		return fmt.Errorf("invalid endpoint id")
-	}
-
-	return nil
-}
-
-func createVxlan(name string, vni uint32, mtu int) error {
-	defer osl.InitOSContext()()
-
-	// Get default interface to plumb the vxlan on
-	routeCmd := "/usr/sbin/ipadm show-addr -p -o addrobj " +
-		"`/usr/sbin/route get default | /usr/bin/grep interface | " +
-		"/usr/bin/awk '{print $2}'`"
-	out, err := exec.Command("/usr/bin/bash", "-c", routeCmd).Output()
-	if err != nil {
-		return fmt.Errorf("cannot get default route: %v", err)
-	}
-
-	defaultInterface := strings.SplitN(string(out), "/", 2)
-	propList := fmt.Sprintf("interface=%s,vni=%d", defaultInterface[0], vni)
-
-	out, err = exec.Command("/usr/sbin/dladm", "create-vxlan", "-t", "-p", propList,
-		name).Output()
-	if err != nil {
-		return fmt.Errorf("error creating vxlan interface: %v %s", err, out)
-	}
-
-	return nil
-}
-
-func deleteInterfaceBySubnet(brPrefix string, s *subnet) error {
-	return nil
-
-}
-
-func deleteInterface(name string) error {
-	defer osl.InitOSContext()()
-
-	out, err := exec.Command("/usr/sbin/dladm", "delete-vxlan", name).Output()
-	if err != nil {
-		return fmt.Errorf("error creating vxlan interface: %v %s", err, out)
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
deleted file mode 100644
index 92b0a4e..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package overlay
-
-//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf  --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto
-
-import (
-	"fmt"
-	"net"
-	"sync"
-
-	"github.com/docker/libnetwork/datastore"
-	"github.com/docker/libnetwork/discoverapi"
-	"github.com/docker/libnetwork/driverapi"
-	"github.com/docker/libnetwork/idm"
-	"github.com/docker/libnetwork/netlabel"
-	"github.com/docker/libnetwork/osl"
-	"github.com/docker/libnetwork/types"
-	"github.com/hashicorp/serf/serf"
-	"github.com/sirupsen/logrus"
-)
-
-// XXX OVERLAY_SOLARIS
-// Might need changes for names/constant values in solaris
-const (
-	networkType  = "overlay"
-	vethPrefix   = "veth"
-	vethLen      = 7
-	vxlanIDStart = 256
-	vxlanIDEnd   = (1 << 24) - 1
-	vxlanPort    = 4789
-	vxlanEncap   = 50
-	secureOption = "encrypted"
-)
-
-var initVxlanIdm = make(chan (bool), 1)
-
-type driver struct {
-	eventCh          chan serf.Event
-	notifyCh         chan ovNotify
-	exitCh           chan chan struct{}
-	bindAddress      string
-	advertiseAddress string
-	neighIP          string
-	config           map[string]interface{}
-	peerDb           peerNetworkMap
-	secMap           *encrMap
-	serfInstance     *serf.Serf
-	networks         networkTable
-	store            datastore.DataStore
-	localStore       datastore.DataStore
-	vxlanIdm         *idm.Idm
-	once             sync.Once
-	joinOnce         sync.Once
-	keys             []*key
-	sync.Mutex
-}
-
-// Init registers a new instance of overlay driver
-func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
-	c := driverapi.Capability{
-		DataScope:         datastore.GlobalScope,
-		ConnectivityScope: datastore.GlobalScope,
-	}
-	d := &driver{
-		networks: networkTable{},
-		peerDb: peerNetworkMap{
-			mp: map[string]*peerMap{},
-		},
-		secMap: &encrMap{nodes: map[string][]*spi{}},
-		config: config,
-	}
-
-	if data, ok := config[netlabel.GlobalKVClient]; ok {
-		var err error
-		dsc, ok := data.(discoverapi.DatastoreConfigData)
-		if !ok {
-			return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
-		}
-		d.store, err = datastore.NewDataStoreFromConfig(dsc)
-		if err != nil {
-			return types.InternalErrorf("failed to initialize data store: %v", err)
-		}
-	}
-
-	if data, ok := config[netlabel.LocalKVClient]; ok {
-		var err error
-		dsc, ok := data.(discoverapi.DatastoreConfigData)
-		if !ok {
-			return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
-		}
-		d.localStore, err = datastore.NewDataStoreFromConfig(dsc)
-		if err != nil {
-			return types.InternalErrorf("failed to initialize local data store: %v", err)
-		}
-	}
-
-	d.restoreEndpoints()
-
-	return dc.RegisterDriver(networkType, d, c)
-}
-
-// Endpoints are stored in the local store. Restore them and reconstruct the overlay sandbox
-func (d *driver) restoreEndpoints() error {
-	if d.localStore == nil {
-		logrus.Warnf("Cannot restore overlay endpoints because local datastore is missing")
-		return nil
-	}
-	kvol, err := d.localStore.List(datastore.Key(overlayEndpointPrefix), &endpoint{})
-	if err != nil && err != datastore.ErrKeyNotFound {
-		return fmt.Errorf("failed to read overlay endpoint from store: %v", err)
-	}
-
-	if err == datastore.ErrKeyNotFound {
-		return nil
-	}
-	for _, kvo := range kvol {
-		ep := kvo.(*endpoint)
-		n := d.network(ep.nid)
-		if n == nil {
-			logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7])
-			logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7])
-			if err := d.deleteEndpointFromStore(ep); err != nil {
-				logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7])
-			}
-			continue
-		}
-		n.addEndpoint(ep)
-
-		s := n.getSubnetforIP(ep.addr)
-		if s == nil {
-			return fmt.Errorf("could not find subnet for endpoint %s", ep.id)
-		}
-
-		if err := n.joinSandbox(true); err != nil {
-			return fmt.Errorf("restore network sandbox failed: %v", err)
-		}
-
-		if err := n.joinSubnetSandbox(s, true); err != nil {
-			return fmt.Errorf("restore subnet sandbox failed for %q: %v", s.subnetIP.String(), err)
-		}
-
-		Ifaces := make(map[string][]osl.IfaceOption)
-		vethIfaceOption := make([]osl.IfaceOption, 1)
-		vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
-		Ifaces["veth+veth"] = vethIfaceOption
-
-		err := n.sbox.Restore(Ifaces, nil, nil, nil)
-		if err != nil {
-			return fmt.Errorf("failed to restore overlay sandbox: %v", err)
-		}
-
-		n.incEndpointCount()
-		d.peerDbAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
-	}
-	return nil
-}
-
-// Fini cleans up the driver resources
-func Fini(drv driverapi.Driver) {
-	d := drv.(*driver)
-
-	if d.exitCh != nil {
-		waitCh := make(chan struct{})
-
-		d.exitCh <- waitCh
-
-		<-waitCh
-	}
-}
-
-func (d *driver) configure() error {
-	if d.store == nil {
-		return nil
-	}
-
-	if d.vxlanIdm == nil {
-		return d.initializeVxlanIdm()
-	}
-
-	return nil
-}
-
-func (d *driver) initializeVxlanIdm() error {
-	var err error
-
-	initVxlanIdm <- true
-	defer func() { <-initVxlanIdm }()
-
-	if d.vxlanIdm != nil {
-		return nil
-	}
-
-	d.vxlanIdm, err = idm.New(d.store, "vxlan-id", vxlanIDStart, vxlanIDEnd)
-	if err != nil {
-		return fmt.Errorf("failed to initialize vxlan id manager: %v", err)
-	}
-
-	return nil
-}
-
-func (d *driver) Type() string {
-	return networkType
-}
-
-func (d *driver) IsBuiltIn() bool {
-	return true
-}
-
-func validateSelf(node string) error {
-	advIP := net.ParseIP(node)
-	if advIP == nil {
-		return fmt.Errorf("invalid self address (%s)", node)
-	}
-
-	addrs, err := net.InterfaceAddrs()
-	if err != nil {
-		return fmt.Errorf("Unable to get interface addresses %v", err)
-	}
-	for _, addr := range addrs {
-		ip, _, err := net.ParseCIDR(addr.String())
-		if err == nil && ip.Equal(advIP) {
-			return nil
-		}
-	}
-	return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String())
-}
-
-func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) {
-	if self && !d.isSerfAlive() {
-		d.Lock()
-		d.advertiseAddress = advertiseAddress
-		d.bindAddress = bindAddress
-		d.Unlock()
-
-		// If there is no cluster store there is no need to start serf.
-		if d.store != nil {
-			if err := validateSelf(advertiseAddress); err != nil {
-				logrus.Warn(err.Error())
-			}
-			err := d.serfInit()
-			if err != nil {
-				logrus.Errorf("initializing serf instance failed: %v", err)
-				d.Lock()
-				d.advertiseAddress = ""
-				d.bindAddress = ""
-				d.Unlock()
-				return
-			}
-		}
-	}
-
-	d.Lock()
-	if !self {
-		d.neighIP = advertiseAddress
-	}
-	neighIP := d.neighIP
-	d.Unlock()
-
-	if d.serfInstance != nil && neighIP != "" {
-		var err error
-		d.joinOnce.Do(func() {
-			err = d.serfJoin(neighIP)
-			if err == nil {
-				d.pushLocalDb()
-			}
-		})
-		if err != nil {
-			logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err)
-			d.Lock()
-			d.joinOnce = sync.Once{}
-			d.Unlock()
-			return
-		}
-	}
-}
-
-func (d *driver) pushLocalEndpointEvent(action, nid, eid string) {
-	n := d.network(nid)
-	if n == nil {
-		logrus.Debugf("Error pushing local endpoint event for network %s", nid)
-		return
-	}
-	ep := n.endpoint(eid)
-	if ep == nil {
-		logrus.Debugf("Error pushing local endpoint event for ep %s / %s", nid, eid)
-		return
-	}
-
-	if !d.isSerfAlive() {
-		return
-	}
-	d.notifyCh <- ovNotify{
-		action: "join",
-		nw:     n,
-		ep:     ep,
-	}
-}
-
-// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
-func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
-	var err error
-	switch dType {
-	case discoverapi.NodeDiscovery:
-		nodeData, ok := data.(discoverapi.NodeDiscoveryData)
-		if !ok || nodeData.Address == "" {
-			return fmt.Errorf("invalid discovery data")
-		}
-		d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self)
-	case discoverapi.DatastoreConfig:
-		if d.store != nil {
-			return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already")
-		}
-		dsc, ok := data.(discoverapi.DatastoreConfigData)
-		if !ok {
-			return types.InternalErrorf("incorrect data in datastore configuration: %v", data)
-		}
-		d.store, err = datastore.NewDataStoreFromConfig(dsc)
-		if err != nil {
-			return types.InternalErrorf("failed to initialize data store: %v", err)
-		}
-	case discoverapi.EncryptionKeysConfig:
-		encrData, ok := data.(discoverapi.DriverEncryptionConfig)
-		if !ok {
-			return fmt.Errorf("invalid encryption key notification data")
-		}
-		keys := make([]*key, 0, len(encrData.Keys))
-		for i := 0; i < len(encrData.Keys); i++ {
-			k := &key{
-				value: encrData.Keys[i],
-				tag:   uint32(encrData.Tags[i]),
-			}
-			keys = append(keys, k)
-		}
-		d.setKeys(keys)
-	case discoverapi.EncryptionKeysUpdate:
-		var newKey, delKey, priKey *key
-		encrData, ok := data.(discoverapi.DriverEncryptionUpdate)
-		if !ok {
-			return fmt.Errorf("invalid encryption key notification data")
-		}
-		if encrData.Key != nil {
-			newKey = &key{
-				value: encrData.Key,
-				tag:   uint32(encrData.Tag),
-			}
-		}
-		if encrData.Primary != nil {
-			priKey = &key{
-				value: encrData.Primary,
-				tag:   uint32(encrData.PrimaryTag),
-			}
-		}
-		if encrData.Prune != nil {
-			delKey = &key{
-				value: encrData.Prune,
-				tag:   uint32(encrData.PruneTag),
-			}
-		}
-		d.updateKeys(newKey, priKey, delKey)
-	default:
-	}
-	return nil
-}
-
-// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
-func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
-	return nil
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.pb.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.pb.go
deleted file mode 100644
index cfa0eee..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.pb.go
+++ /dev/null
@@ -1,468 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: overlay.proto
-// DO NOT EDIT!
-
-/*
-	Package overlay is a generated protocol buffer package.
-
-	It is generated from these files:
-		overlay.proto
-
-	It has these top-level messages:
-		PeerRecord
-*/
-package overlay
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import _ "github.com/gogo/protobuf/gogoproto"
-
-import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
-import reflect "reflect"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
-
-// PeerRecord defines the information corresponding to a peer
-// container in the overlay network.
-type PeerRecord struct {
-	// Endpoint IP is the IP of the container attachment on the
-	// given overlay network.
-	EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
-	// Endpoint MAC is the mac address of the container attachment
-	// on the given overlay network.
-	EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"`
-	// Tunnel Endpoint IP defines the host IP for the host in
-	// which this container is running and can be reached by
-	// building a tunnel to that host IP.
-	TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"`
-}
-
-func (m *PeerRecord) Reset()                    { *m = PeerRecord{} }
-func (*PeerRecord) ProtoMessage()               {}
-func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} }
-
-func init() {
-	proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord")
-}
-func (this *PeerRecord) GoString() string {
-	if this == nil {
-		return "nil"
-	}
-	s := make([]string, 0, 7)
-	s = append(s, "&overlay.PeerRecord{")
-	s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n")
-	s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n")
-	s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n")
-	s = append(s, "}")
-	return strings.Join(s, "")
-}
-func valueToGoStringOverlay(v interface{}, typ string) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
-	if e == nil {
-		return "nil"
-	}
-	s := "map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "}"
-	return s
-}
-func (m *PeerRecord) Marshal() (data []byte, err error) {
-	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
-	if err != nil {
-		return nil, err
-	}
-	return data[:n], nil
-}
-
-func (m *PeerRecord) MarshalTo(data []byte) (int, error) {
-	var i int
-	_ = i
-	var l int
-	_ = l
-	if len(m.EndpointIP) > 0 {
-		data[i] = 0xa
-		i++
-		i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP)))
-		i += copy(data[i:], m.EndpointIP)
-	}
-	if len(m.EndpointMAC) > 0 {
-		data[i] = 0x12
-		i++
-		i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC)))
-		i += copy(data[i:], m.EndpointMAC)
-	}
-	if len(m.TunnelEndpointIP) > 0 {
-		data[i] = 0x1a
-		i++
-		i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP)))
-		i += copy(data[i:], m.TunnelEndpointIP)
-	}
-	return i, nil
-}
-
-func encodeFixed64Overlay(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
-	return offset + 8
-}
-func encodeFixed32Overlay(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	return offset + 4
-}
-func encodeVarintOverlay(data []byte, offset int, v uint64) int {
-	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	data[offset] = uint8(v)
-	return offset + 1
-}
-func (m *PeerRecord) Size() (n int) {
-	var l int
-	_ = l
-	l = len(m.EndpointIP)
-	if l > 0 {
-		n += 1 + l + sovOverlay(uint64(l))
-	}
-	l = len(m.EndpointMAC)
-	if l > 0 {
-		n += 1 + l + sovOverlay(uint64(l))
-	}
-	l = len(m.TunnelEndpointIP)
-	if l > 0 {
-		n += 1 + l + sovOverlay(uint64(l))
-	}
-	return n
-}
-
-func sovOverlay(x uint64) (n int) {
-	for {
-		n++
-		x >>= 7
-		if x == 0 {
-			break
-		}
-	}
-	return n
-}
-func sozOverlay(x uint64) (n int) {
-	return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (this *PeerRecord) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&PeerRecord{`,
-		`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
-		`EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`,
-		`TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func valueToStringOverlay(v interface{}) string {
-	rv := reflect.ValueOf(v)
-	if rv.IsNil() {
-		return "nil"
-	}
-	pv := reflect.Indirect(rv).Interface()
-	return fmt.Sprintf("*%v", pv)
-}
-func (m *PeerRecord) Unmarshal(data []byte) error {
-	l := len(data)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowOverlay
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := data[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOverlay
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := data[iNdEx]
-				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthOverlay
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.EndpointIP = string(data[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOverlay
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := data[iNdEx]
-				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthOverlay
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.EndpointMAC = string(data[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowOverlay
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := data[iNdEx]
-				iNdEx++
-				stringLen |= (uint64(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthOverlay
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.TunnelEndpointIP = string(data[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipOverlay(data[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthOverlay
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipOverlay(data []byte) (n int, err error) {
-	l := len(data)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowOverlay
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := data[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowOverlay
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if data[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowOverlay
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := data[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			iNdEx += length
-			if length < 0 {
-				return 0, ErrInvalidLengthOverlay
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowOverlay
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := data[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipOverlay(data[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowOverlay   = fmt.Errorf("proto: integer overflow")
-)
-
-var fileDescriptorOverlay = []byte{
-	// 195 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d,
-	0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2,
-	0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40,
-	0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a,
-	0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf,
-	0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21,
-	0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e,
-	0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25,
-	0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84,
-	0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1,
-	0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2,
-	0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08,
-	0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.proto b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.proto
deleted file mode 100644
index 45b8c9d..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.proto
+++ /dev/null
@@ -1,27 +0,0 @@
-syntax = "proto3";
-
-import "gogoproto/gogo.proto";
-
-package overlay;
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.stringer_all) = true;
-option (gogoproto.gostring_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.goproto_stringer_all) = false;
-
-// PeerRecord defines the information corresponding to a peer
-// container in the overlay network.
-message PeerRecord {
-	// Endpoint IP is the IP of the container attachment on the
-	// given overlay network.
-	string endpoint_ip = 1 [(gogoproto.customname) = "EndpointIP"];
-	// Endpoint MAC is the mac address of the container attachment
-	// on the given overlay network.
-	string endpoint_mac = 2 [(gogoproto.customname) = "EndpointMAC"];
-	// Tunnel Endpoint IP defines the host IP for the host in
-	// which this container is running and can be reached by
-	// building a tunnel to that host IP.
-	string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
-}
\ No newline at end of file
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/peerdb.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/peerdb.go
deleted file mode 100644
index 23d9a97..0000000
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/peerdb.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package overlay
-
-import (
-	"fmt"
-	"net"
-	"sync"
-
-	"github.com/sirupsen/logrus"
-)
-
-const ovPeerTable = "overlay_peer_table"
-
-type peerKey struct {
-	peerIP  net.IP
-	peerMac net.HardwareAddr
-}
-
-type peerEntry struct {
-	eid        string
-	vtep       net.IP
-	peerIPMask net.IPMask
-	inSandbox  bool
-	isLocal    bool
-}
-
-type peerMap struct {
-	mp map[string]peerEntry
-	sync.Mutex
-}
-
-type peerNetworkMap struct {
-	mp map[string]*peerMap
-	sync.Mutex
-}
-
-func (pKey peerKey) String() string {
-	return fmt.Sprintf("%s %s", pKey.peerIP, pKey.peerMac)
-}
-
-func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error {
-	ipB, err := state.Token(true, nil)
-	if err != nil {
-		return err
-	}
-
-	pKey.peerIP = net.ParseIP(string(ipB))
-
-	macB, err := state.Token(true, nil)
-	if err != nil {
-		return err
-	}
-
-	pKey.peerMac, err = net.ParseMAC(string(macB))
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-var peerDbWg sync.WaitGroup
-
-func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
-	d.peerDb.Lock()
-	nids := []string{}
-	for nid := range d.peerDb.mp {
-		nids = append(nids, nid)
-	}
-	d.peerDb.Unlock()
-
-	for _, nid := range nids {
-		d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
-			return f(nid, pKey, pEntry)
-		})
-	}
-	return nil
-}
-
-func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error {
-	d.peerDb.Lock()
-	pMap, ok := d.peerDb.mp[nid]
-	if !ok {
-		d.peerDb.Unlock()
-		return nil
-	}
-	d.peerDb.Unlock()
-
-	pMap.Lock()
-	for pKeyStr, pEntry := range pMap.mp {
-		var pKey peerKey
-		if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
-			logrus.Warnf("Peer key scan on network %s failed: %v", nid, err)
-		}
-
-		if f(&pKey, &pEntry) {
-			pMap.Unlock()
-			return nil
-		}
-	}
-	pMap.Unlock()
-
-	return nil
-}
-
-func (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
-	var (
-		peerMac    net.HardwareAddr
-		vtep       net.IP
-		peerIPMask net.IPMask
-		found      bool
-	)
-
-	err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
-		if pKey.peerIP.Equal(peerIP) {
-			peerMac = pKey.peerMac
-			peerIPMask = pEntry.peerIPMask
-			vtep = pEntry.vtep
-			found = true
-			return found
-		}
-
-		return found
-	})
-
-	if err != nil {
-		return nil, nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
-	}
-
-	if !found {
-		return nil, nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
-	}
-
-	return peerMac, peerIPMask, vtep, nil
-}
-
-func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
-
-	peerDbWg.Wait()
-
-	d.peerDb.Lock()
-	pMap, ok := d.peerDb.mp[nid]
-	if !ok {
-		d.peerDb.mp[nid] = &peerMap{
-			mp: make(map[string]peerEntry),
-		}
-
-		pMap = d.peerDb.mp[nid]
-	}
-	d.peerDb.Unlock()
-
-	pKey := peerKey{
-		peerIP:  peerIP,
-		peerMac: peerMac,
-	}
-
-	pEntry := peerEntry{
-		eid:        eid,
-		vtep:       vtep,
-		peerIPMask: peerIPMask,
-		isLocal:    isLocal,
-	}
-
-	pMap.Lock()
-	pMap.mp[pKey.String()] = pEntry
-	pMap.Unlock()
-}
-
-func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP) {
-	peerDbWg.Wait()
-
-	d.peerDb.Lock()
-	pMap, ok := d.peerDb.mp[nid]
-	if !ok {
-		d.peerDb.Unlock()
-		return
-	}
-	d.peerDb.Unlock()
-
-	pKey := peerKey{
-		peerIP:  peerIP,
-		peerMac: peerMac,
-	}
-
-	pMap.Lock()
-	delete(pMap.mp, pKey.String())
-	pMap.Unlock()
-}
-
-func (d *driver) peerDbUpdateSandbox(nid string) {
-	d.peerDb.Lock()
-	pMap, ok := d.peerDb.mp[nid]
-	if !ok {
-		d.peerDb.Unlock()
-		return
-	}
-	d.peerDb.Unlock()
-
-	peerDbWg.Add(1)
-
-	var peerOps []func()
-	pMap.Lock()
-	for pKeyStr, pEntry := range pMap.mp {
-		var pKey peerKey
-		if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil {
-			fmt.Printf("peer key scan failed: %v", err)
-		}
-
-		if pEntry.isLocal {
-			continue
-		}
-
-		// Go captures variables by reference. The pEntry could be
-		// pointing to the same memory location for every iteration. Make
-		// a copy of pEntry before capturing it in the following closure.
-		entry := pEntry
-		op := func() {
-			if err := d.peerAdd(nid, entry.eid, pKey.peerIP, entry.peerIPMask,
-				pKey.peerMac, entry.vtep,
-				false); err != nil {
-				fmt.Printf("peerdbupdate in sandbox failed for ip %s and mac %s: %v",
-					pKey.peerIP, pKey.peerMac, err)
-			}
-		}
-
-		peerOps = append(peerOps, op)
-	}
-	pMap.Unlock()
-
-	for _, op := range peerOps {
-		op()
-	}
-
-	peerDbWg.Done()
-}
-
-func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
-
-	if err := validateID(nid, eid); err != nil {
-		return err
-	}
-
-	if updateDb {
-		d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return nil
-	}
-
-	sbox := n.sandbox()
-	if sbox == nil {
-		return nil
-	}
-
-	IP := &net.IPNet{
-		IP:   peerIP,
-		Mask: peerIPMask,
-	}
-
-	s := n.getSubnetforIP(IP)
-	if s == nil {
-		return fmt.Errorf("couldn't find the subnet %q in network %q", IP.String(), n.id)
-	}
-
-	if err := n.obtainVxlanID(s); err != nil {
-		return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err)
-	}
-
-	if err := n.joinSubnetSandbox(s, false); err != nil {
-		return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err)
-	}
-
-	if err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil {
-		logrus.Warn(err)
-	}
-
-	// Add neighbor entry for the peer IP
-	if err := sbox.AddNeighbor(peerIP, peerMac, false, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
-		return fmt.Errorf("could not add neigbor entry into the sandbox: %v", err)
-	}
-
-	// XXX Add fdb entry to the bridge for the peer mac
-
-	return nil
-}
-
-func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error {
-
-	if err := validateID(nid, eid); err != nil {
-		return err
-	}
-
-	if updateDb {
-		d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)
-	}
-
-	n := d.network(nid)
-	if n == nil {
-		return nil
-	}
-
-	sbox := n.sandbox()
-	if sbox == nil {
-		return nil
-	}
-
-	// Delete fdb entry to the bridge for the peer mac
-	if err := sbox.DeleteNeighbor(vtep, peerMac, true); err != nil {
-		return fmt.Errorf("could not delete fdb entry into the sandbox: %v", err)
-	}
-
-	// Delete neighbor entry for the peer IP
-	if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
-		return fmt.Errorf("could not delete neigbor entry into the sandbox: %v", err)
-	}
-
-	if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
-		logrus.Warn(err)
-	}
-
-	return nil
-}
-
-func (d *driver) pushLocalDb() {
-	d.peerDbWalk(func(nid string, pKey *peerKey, pEntry *peerEntry) bool {
-		if pEntry.isLocal {
-			d.pushLocalEndpointEvent("join", nid, pEntry.eid)
-		}
-		return false
-	})
-}
diff --git a/vendor/github.com/docker/libnetwork/drivers_solaris.go b/vendor/github.com/docker/libnetwork/drivers_solaris.go
deleted file mode 100644
index c4fcd96..0000000
--- a/vendor/github.com/docker/libnetwork/drivers_solaris.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package libnetwork
-
-import (
-	"github.com/docker/libnetwork/drivers/null"
-	"github.com/docker/libnetwork/drivers/solaris/bridge"
-	"github.com/docker/libnetwork/drivers/solaris/overlay"
-)
-
-func getInitializers(experimental bool) []initializer {
-	return []initializer{
-		{overlay.Init, "overlay"},
-		{bridge.Init, "bridge"},
-		{null.Init, "null"},
-	}
-}
diff --git a/vendor/github.com/docker/libnetwork/endpoint_cnt.go b/vendor/github.com/docker/libnetwork/endpoint_cnt.go
index c63d06a..7b75274 100644
--- a/vendor/github.com/docker/libnetwork/endpoint_cnt.go
+++ b/vendor/github.com/docker/libnetwork/endpoint_cnt.go
@@ -138,6 +138,15 @@
 }
 
 func (ec *endpointCnt) atomicIncDecEpCnt(inc bool) error {
+	store := ec.n.getController().getStore(ec.DataScope())
+	if store == nil {
+		return fmt.Errorf("store not found for scope %s", ec.DataScope())
+	}
+
+	tmp := &endpointCnt{n: ec.n}
+	if err := store.GetObject(datastore.Key(ec.Key()...), tmp); err != nil {
+		return err
+	}
 retry:
 	ec.Lock()
 	if inc {
@@ -149,11 +158,6 @@
 	}
 	ec.Unlock()
 
-	store := ec.n.getController().getStore(ec.DataScope())
-	if store == nil {
-		return fmt.Errorf("store not found for scope %s", ec.DataScope())
-	}
-
 	if err := ec.n.getController().updateToStore(ec); err != nil {
 		if err == datastore.ErrKeyModified {
 			if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil {
diff --git a/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go b/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go
index 321448a..e245327 100644
--- a/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go
+++ b/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris darwin
+// +build linux freebsd darwin
 
 package builtin
 
diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
index ebcdd80..effbb71 100644
--- a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
+++ b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
@@ -5,12 +5,19 @@
 import (
 	"net"
 	"syscall"
+	"time"
 
 	"fmt"
+
 	"github.com/vishvananda/netlink/nl"
 	"github.com/vishvananda/netns"
 )
 
+const (
+	netlinkRecvSocketsTimeout = 3 * time.Second
+	netlinkSendSocketTimeout  = 30 * time.Second
+)
+
 // Service defines an IPVS service in its entirety.
 type Service struct {
 	// Virtual service address.
@@ -82,6 +89,15 @@
 	if err != nil {
 		return nil, err
 	}
+	// Add operation timeout to avoid deadlocks
+	tv := syscall.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds())
+	if err := sock.SetSendTimeout(&tv); err != nil {
+		return nil, err
+	}
+	tv = syscall.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds())
+	if err := sock.SetReceiveTimeout(&tv); err != nil {
+		return nil, err
+	}
 
 	return &Handle{sock: sock}, nil
 }
diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/docker/libnetwork/ipvs/netlink.go
index 2089283..c062a17 100644
--- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go
+++ b/vendor/github.com/docker/libnetwork/ipvs/netlink.go
@@ -203,10 +203,6 @@
 }
 
 func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
-	var (
-		err error
-	)
-
 	if err := s.Send(req); err != nil {
 		return nil, err
 	}
@@ -222,6 +218,13 @@
 	for {
 		msgs, err := s.Receive()
 		if err != nil {
+			if s.GetFd() == -1 {
+				return nil, fmt.Errorf("Socket got closed on receive")
+			}
+			if err == syscall.EAGAIN {
+				// timeout fired
+				continue
+			}
 			return nil, err
 		}
 		for _, m := range msgs {
diff --git a/vendor/github.com/docker/libnetwork/netutils/utils_solaris.go b/vendor/github.com/docker/libnetwork/netutils/utils_solaris.go
deleted file mode 100644
index dc67101..0000000
--- a/vendor/github.com/docker/libnetwork/netutils/utils_solaris.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// +build solaris
-
-package netutils
-
-import (
-	"fmt"
-	"net"
-	"os/exec"
-	"strings"
-
-	"github.com/docker/libnetwork/ipamutils"
-	"github.com/vishvananda/netlink"
-)
-
-var (
-	networkGetRoutesFct func(netlink.Link, int) ([]netlink.Route, error)
-)
-
-// CheckRouteOverlaps checks whether the passed network overlaps with any existing routes
-func CheckRouteOverlaps(toCheck *net.IPNet) error {
-	return nil
-}
-
-// ElectInterfaceAddresses looks for an interface on the OS with the specified name
-// and returns returns all its IPv4 and IPv6 addresses in CIDR notation.
-// If a failure in retrieving the addresses or no IPv4 address is found, an error is returned.
-// If the interface does not exist, it chooses from a predefined
-// list the first IPv4 address which does not conflict with other
-// interfaces on the system.
-func ElectInterfaceAddresses(name string) ([]*net.IPNet, []*net.IPNet, error) {
-	var (
-		v4Net *net.IPNet
-	)
-
-	out, err := exec.Command("/usr/sbin/ipadm", "show-addr",
-		"-p", "-o", "addrobj,addr").Output()
-	if err != nil {
-		fmt.Println("failed to list interfaces on system")
-		return nil, nil, err
-	}
-	alist := strings.Fields(string(out))
-	for _, a := range alist {
-		linkandaddr := strings.SplitN(a, ":", 2)
-		if len(linkandaddr) != 2 {
-			fmt.Println("failed to check interfaces on system: ", a)
-			continue
-		}
-		gw := fmt.Sprintf("%s_gw0", name)
-		link := strings.Split(linkandaddr[0], "/")[0]
-		addr := linkandaddr[1]
-		if gw != link {
-			continue
-		}
-		_, ipnet, err := net.ParseCIDR(addr)
-		if err != nil {
-			fmt.Println("failed to parse address: ", addr)
-			continue
-		}
-		v4Net = ipnet
-		break
-	}
-	if v4Net == nil {
-		v4Net, err = FindAvailableNetwork(ipamutils.PredefinedBroadNetworks)
-		if err != nil {
-			return nil, nil, err
-		}
-	}
-	return []*net.IPNet{v4Net}, nil, nil
-}
-
-// FindAvailableNetwork returns a network from the passed list which does not
-// overlap with existing interfaces in the system
-func FindAvailableNetwork(list []*net.IPNet) (*net.IPNet, error) {
-	out, err := exec.Command("/usr/sbin/ipadm", "show-addr",
-		"-p", "-o", "addr").Output()
-
-	if err != nil {
-		fmt.Println("failed to list interfaces on system")
-		return nil, err
-	}
-	ipaddrs := strings.Fields(string(out))
-	inuse := []*net.IPNet{}
-	for _, ip := range ipaddrs {
-		_, ipnet, err := net.ParseCIDR(ip)
-		if err != nil {
-			fmt.Println("failed to check interfaces on system: ", ip)
-			continue
-		}
-		inuse = append(inuse, ipnet)
-	}
-	for _, avail := range list {
-		is_avail := true
-		for _, ipnet := range inuse {
-			if NetworkOverlaps(avail, ipnet) {
-				is_avail = false
-				break
-			}
-		}
-		if is_avail {
-			return avail, nil
-		}
-	}
-	return nil, fmt.Errorf("no available network")
-}
diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go
index 1ad4706..318c395 100644
--- a/vendor/github.com/docker/libnetwork/network.go
+++ b/vendor/github.com/docker/libnetwork/network.go
@@ -199,39 +199,40 @@
 }
 
 type network struct {
-	ctrlr        *controller
-	name         string
-	networkType  string
-	id           string
-	created      time.Time
-	scope        string // network data scope
-	labels       map[string]string
-	ipamType     string
-	ipamOptions  map[string]string
-	addrSpace    string
-	ipamV4Config []*IpamConf
-	ipamV6Config []*IpamConf
-	ipamV4Info   []*IpamInfo
-	ipamV6Info   []*IpamInfo
-	enableIPv6   bool
-	postIPv6     bool
-	epCnt        *endpointCnt
-	generic      options.Generic
-	dbIndex      uint64
-	dbExists     bool
-	persist      bool
-	stopWatchCh  chan struct{}
-	drvOnce      *sync.Once
-	resolverOnce sync.Once
-	resolver     []Resolver
-	internal     bool
-	attachable   bool
-	inDelete     bool
-	ingress      bool
-	driverTables []networkDBTable
-	dynamic      bool
-	configOnly   bool
-	configFrom   string
+	ctrlr          *controller
+	name           string
+	networkType    string
+	id             string
+	created        time.Time
+	scope          string // network data scope
+	labels         map[string]string
+	ipamType       string
+	ipamOptions    map[string]string
+	addrSpace      string
+	ipamV4Config   []*IpamConf
+	ipamV6Config   []*IpamConf
+	ipamV4Info     []*IpamInfo
+	ipamV6Info     []*IpamInfo
+	enableIPv6     bool
+	postIPv6       bool
+	epCnt          *endpointCnt
+	generic        options.Generic
+	dbIndex        uint64
+	dbExists       bool
+	persist        bool
+	stopWatchCh    chan struct{}
+	drvOnce        *sync.Once
+	resolverOnce   sync.Once
+	resolver       []Resolver
+	internal       bool
+	attachable     bool
+	inDelete       bool
+	ingress        bool
+	driverTables   []networkDBTable
+	dynamic        bool
+	configOnly     bool
+	configFrom     string
+	loadBalancerIP net.IP
 	sync.Mutex
 }
 
@@ -473,6 +474,7 @@
 	dstN.ingress = n.ingress
 	dstN.configOnly = n.configOnly
 	dstN.configFrom = n.configFrom
+	dstN.loadBalancerIP = n.loadBalancerIP
 
 	// copy labels
 	if dstN.labels == nil {
@@ -589,6 +591,7 @@
 	netMap["ingress"] = n.ingress
 	netMap["configOnly"] = n.configOnly
 	netMap["configFrom"] = n.configFrom
+	netMap["loadBalancerIP"] = n.loadBalancerIP
 	return json.Marshal(netMap)
 }
 
@@ -699,6 +702,9 @@
 	if v, ok := netMap["configFrom"]; ok {
 		n.configFrom = v.(string)
 	}
+	if v, ok := netMap["loadBalancerIP"]; ok {
+		n.loadBalancerIP = net.ParseIP(v.(string))
+	}
 	// Reconcile old networks with the recently added `--ipv6` flag
 	if !n.enableIPv6 {
 		n.enableIPv6 = len(n.ipamV6Info) > 0
@@ -799,6 +805,13 @@
 	}
 }
 
+// NetworkOptionLBEndpoint function returns an option setter for the configuration of the load balancer endpoint for this network
+func NetworkOptionLBEndpoint(ip net.IP) NetworkOption {
+	return func(n *network) {
+		n.loadBalancerIP = ip
+	}
+}
+
 // NetworkOptionDriverOpts function returns an option setter for any driver parameter described by a map
 func NetworkOptionDriverOpts(opts map[string]string) NetworkOption {
 	return func(n *network) {
@@ -944,6 +957,18 @@
 		return &UnknownNetworkError{name: name, id: id}
 	}
 
+	if len(n.loadBalancerIP) != 0 {
+		endpoints := n.Endpoints()
+		if force || len(endpoints) == 1 {
+			n.deleteLoadBalancerSandbox()
+		}
+		//Reload the network from the store to update the epcnt.
+		n, err = c.getNetworkFromStore(id)
+		if err != nil {
+			return &UnknownNetworkError{name: name, id: id}
+		}
+	}
+
 	if !force && n.getEpCnt().EndpointCnt() != 0 {
 		if n.configOnly {
 			return types.ForbiddenErrorf("configuration network %q is in use", n.Name())
@@ -1071,12 +1096,19 @@
 		return nil, types.ForbiddenErrorf("endpoint with name %s already exists in network %s", name, n.Name())
 	}
 
-	ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}}
-	ep.id = stringid.GenerateRandomID()
-
 	n.ctrlr.networkLocker.Lock(n.id)
 	defer n.ctrlr.networkLocker.Unlock(n.id)
 
+	return n.createEndpoint(name, options...)
+
+}
+
+func (n *network) createEndpoint(name string, options ...EndpointOption) (Endpoint, error) {
+	var err error
+
+	ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}}
+	ep.id = stringid.GenerateRandomID()
+
 	// Initialize ep.network with a possibly stale copy of n. We need this to get network from
 	// store. But once we get it from store we will have the most uptodate copy possibly.
 	ep.network = n
@@ -2021,3 +2053,80 @@
 
 	return n.(*network), nil
 }
+
+func (n *network) createLoadBalancerSandbox() error {
+	sandboxName := n.name + "-sbox"
+	sbOptions := []SandboxOption{}
+	if n.ingress {
+		sbOptions = append(sbOptions, OptionIngress())
+	}
+	sb, err := n.ctrlr.NewSandbox(sandboxName, sbOptions...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if e := n.ctrlr.SandboxDestroy(sandboxName); e != nil {
+				logrus.Warnf("could not delete sandbox %s on failure on failure (%v): %v", sandboxName, err, e)
+			}
+		}
+	}()
+
+	endpointName := n.name + "-endpoint"
+	epOptions := []EndpointOption{
+		CreateOptionIpam(n.loadBalancerIP, nil, nil, nil),
+		CreateOptionLoadBalancer(),
+	}
+	ep, err := n.createEndpoint(endpointName, epOptions...)
+	if err != nil {
+		return err
+	}
+	defer func() {
+		if err != nil {
+			if e := ep.Delete(true); e != nil {
+				logrus.Warnf("could not delete endpoint %s on failure on failure (%v): %v", endpointName, err, e)
+			}
+		}
+	}()
+
+	if err := ep.Join(sb, nil); err != nil {
+		return err
+	}
+	return sb.EnableService()
+}
+
+func (n *network) deleteLoadBalancerSandbox() {
+	n.Lock()
+	c := n.ctrlr
+	name := n.name
+	n.Unlock()
+
+	endpointName := name + "-endpoint"
+	sandboxName := name + "-sbox"
+
+	endpoint, err := n.EndpointByName(endpointName)
+	if err != nil {
+		logrus.Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err)
+	} else {
+
+		info := endpoint.Info()
+		if info != nil {
+			sb := info.Sandbox()
+			if sb != nil {
+				if err := sb.DisableService(); err != nil {
+					logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
+					//Ignore error and attempt to delete the load balancer endpoint
+				}
+			}
+		}
+
+		if err := endpoint.Delete(true); err != nil {
+			logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoint.Name(), endpoint.ID(), sandboxName, err)
+			//Ignore error and attempt to delete the sandbox.
+		}
+	}
+
+	if err := c.SandboxDestroy(sandboxName); err != nil {
+		logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
+	}
+}
diff --git a/vendor/github.com/docker/libnetwork/networkdb/cluster.go b/vendor/github.com/docker/libnetwork/networkdb/cluster.go
index 06a7aff..198cace 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/cluster.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/cluster.go
@@ -17,15 +17,10 @@
 )
 
 const (
-	// The garbage collection logic for entries leverage the presence of the network.
-	// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
-	// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
-	reapEntryInterval   = 30 * time.Minute
-	reapNetworkInterval = reapEntryInterval + 5*reapPeriod
-	reapPeriod          = 5 * time.Second
-	retryInterval       = 1 * time.Second
-	nodeReapInterval    = 24 * time.Hour
-	nodeReapPeriod      = 2 * time.Hour
+	reapPeriod       = 5 * time.Second
+	retryInterval    = 1 * time.Second
+	nodeReapInterval = 24 * time.Hour
+	nodeReapPeriod   = 2 * time.Hour
 )
 
 type logWriter struct{}
@@ -260,13 +255,18 @@
 func (nDB *NetworkDB) reapDeadNode() {
 	nDB.Lock()
 	defer nDB.Unlock()
-	for id, n := range nDB.failedNodes {
-		if n.reapTime > 0 {
-			n.reapTime -= nodeReapPeriod
-			continue
+	for _, nodeMap := range []map[string]*node{
+		nDB.failedNodes,
+		nDB.leftNodes,
+	} {
+		for id, n := range nodeMap {
+			if n.reapTime > nodeReapPeriod {
+				n.reapTime -= nodeReapPeriod
+				continue
+			}
+			logrus.Debugf("Garbage collect node %v", n.Name)
+			delete(nodeMap, id)
 		}
-		logrus.Debugf("Removing failed node %v from gossip cluster", n.Name)
-		delete(nDB.failedNodes, id)
 	}
 }
 
@@ -379,7 +379,6 @@
 	thisNodeNetworks := nDB.networks[nDB.config.NodeID]
 	for nid := range thisNodeNetworks {
 		networkNodes[nid] = nDB.networkNodes[nid]
-
 	}
 	printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod
 	printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod
diff --git a/vendor/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
index 6553810..21c3bc0 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
@@ -16,9 +16,12 @@
 	return []byte{}
 }
 
-func (nDB *NetworkDB) getNode(nEvent *NodeEvent) *node {
-	nDB.Lock()
-	defer nDB.Unlock()
+// getNode searches the node inside the tables
+// returns true if the node was respectively in the active list, explicit node leave list or failed list
+func (nDB *NetworkDB) getNode(nEvent *NodeEvent, extract bool) (bool, bool, bool, *node) {
+	var active bool
+	var left bool
+	var failed bool
 
 	for _, nodes := range []map[string]*node{
 		nDB.failedNodes,
@@ -26,35 +29,19 @@
 		nDB.nodes,
 	} {
 		if n, ok := nodes[nEvent.NodeName]; ok {
+			active = &nodes == &nDB.nodes
+			left = &nodes == &nDB.leftNodes
+			failed = &nodes == &nDB.failedNodes
 			if n.ltime >= nEvent.LTime {
-				return nil
+				return active, left, failed, nil
 			}
-			return n
+			if extract {
+				delete(nodes, n.Name)
+			}
+			return active, left, failed, n
 		}
 	}
-	return nil
-}
-
-func (nDB *NetworkDB) checkAndGetNode(nEvent *NodeEvent) *node {
-	nDB.Lock()
-	defer nDB.Unlock()
-
-	for _, nodes := range []map[string]*node{
-		nDB.failedNodes,
-		nDB.leftNodes,
-		nDB.nodes,
-	} {
-		if n, ok := nodes[nEvent.NodeName]; ok {
-			if n.ltime >= nEvent.LTime {
-				return nil
-			}
-
-			delete(nodes, n.Name)
-			return n
-		}
-	}
-
-	return nil
+	return active, left, failed, nil
 }
 
 func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
@@ -62,11 +49,14 @@
 	// time.
 	nDB.networkClock.Witness(nEvent.LTime)
 
-	n := nDB.getNode(nEvent)
+	nDB.RLock()
+	active, left, _, n := nDB.getNode(nEvent, false)
 	if n == nil {
+		nDB.RUnlock()
 		return false
 	}
-	// If its a node leave event for a manager and this is the only manager we
+	nDB.RUnlock()
+	// If it is a node leave event for a manager and this is the only manager we
 	// know of we want the reconnect logic to kick in. In a single manager
 	// cluster manager's gossip can't be bootstrapped unless some other node
 	// connects to it.
@@ -79,28 +69,38 @@
 		}
 	}
 
-	n = nDB.checkAndGetNode(nEvent)
-	if n == nil {
-		return false
-	}
-
 	n.ltime = nEvent.LTime
 
 	switch nEvent.Type {
 	case NodeEventTypeJoin:
-		nDB.Lock()
-		_, found := nDB.nodes[n.Name]
-		nDB.nodes[n.Name] = n
-		nDB.Unlock()
-		if !found {
-			logrus.Infof("Node join event for %s/%s", n.Name, n.Addr)
+		if active {
+			// the node is already marked as active nothing to do
+			return false
 		}
+		nDB.Lock()
+		// Because the lock got released on the previous check we have to do it again and re verify the status of the node
+		// All of this is to avoid a big lock on the function
+		if active, _, _, n = nDB.getNode(nEvent, true); !active && n != nil {
+			n.reapTime = 0
+			nDB.nodes[n.Name] = n
+			logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
+		}
+		nDB.Unlock()
 		return true
 	case NodeEventTypeLeave:
+		if left {
+			// the node is already marked as left nothing to do.
+			return false
+		}
 		nDB.Lock()
-		nDB.leftNodes[n.Name] = n
+		// Because the lock got released on the previous check we have to do it again and re verify the status of the node
+		// All of this is to avoid a big lock on the function
+		if _, left, _, n = nDB.getNode(nEvent, true); !left && n != nil {
+			n.reapTime = nodeReapInterval
+			nDB.leftNodes[n.Name] = n
+			logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
+		}
 		nDB.Unlock()
-		logrus.Infof("Node leave event for %s/%s", n.Name, n.Addr)
 		return true
 	}
 
@@ -140,7 +140,7 @@
 		n.ltime = nEvent.LTime
 		n.leaving = nEvent.Type == NetworkEventTypeLeave
 		if n.leaving {
-			n.reapTime = reapNetworkInterval
+			n.reapTime = nDB.config.reapNetworkInterval
 
 			// The remote node is leaving the network, but not the gossip cluster.
 			// Mark all its entries in deleted state, this will guarantee that
@@ -162,6 +162,12 @@
 		return false
 	}
 
+	// If the node is not known from memberlist we cannot process save any state of it else if it actually
+	// dies we won't receive any notification and we will remain stuck with it
+	if _, ok := nDB.nodes[nEvent.NodeName]; !ok {
+		return false
+	}
+
 	// This remote network join is being seen the first time.
 	nodeNetworks[nEvent.NetworkID] = &network{
 		id:    nEvent.NetworkID,
@@ -216,8 +222,9 @@
 	// This case can happen if the cluster is running different versions of the engine where the old version does not have the
 	// field. If that is not the case, this can be a BUG
 	if e.deleting && e.reapTime == 0 {
-		logrus.Warnf("handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", tEvent)
-		e.reapTime = reapEntryInterval
+		logrus.Warnf("%v(%v) handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?",
+			nDB.config.Hostname, nDB.config.NodeID, tEvent)
+		e.reapTime = nDB.config.reapEntryInterval
 	}
 
 	nDB.Lock()
@@ -229,7 +236,7 @@
 		// If the residual reapTime is lower or equal to 1/6 of the total reapTime don't bother broadcasting it around
 		// most likely the cluster is already aware of it, if not who will sync with this node will catch the state too.
 		// This also avoids that deletion of entries close to their garbage collection ends up circuling around forever
-		return e.reapTime > reapEntryInterval/6
+		return e.reapTime > nDB.config.reapEntryInterval/6
 	}
 
 	var op opType
@@ -465,7 +472,7 @@
 	var gMsg GossipMessage
 	err := proto.Unmarshal(buf, &gMsg)
 	if err != nil {
-		logrus.Errorf("Error unmarshalling push pull messsage: %v", err)
+		logrus.Errorf("Error unmarshalling push pull message: %v", err)
 		return
 	}
 
diff --git a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
index 74aa465..6e11805 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
@@ -21,10 +21,29 @@
 	}
 }
 
+func (e *eventDelegate) purgeReincarnation(mn *memberlist.Node) {
+	for name, node := range e.nDB.failedNodes {
+		if node.Addr.Equal(mn.Addr) {
+			logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+			delete(e.nDB.failedNodes, name)
+			return
+		}
+	}
+
+	for name, node := range e.nDB.leftNodes {
+		if node.Addr.Equal(mn.Addr) {
+			logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+			delete(e.nDB.leftNodes, name)
+			return
+		}
+	}
+}
+
 func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
 	logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr)
 	e.broadcastNodeEvent(mn.Addr, opCreate)
 	e.nDB.Lock()
+	defer e.nDB.Unlock()
 	// In case the node is rejoining after a failure or leave,
 	// wait until an explicit join message arrives before adding
 	// it to the nodes just to make sure this is not a stale
@@ -32,12 +51,15 @@
 	_, fOk := e.nDB.failedNodes[mn.Name]
 	_, lOk := e.nDB.leftNodes[mn.Name]
 	if fOk || lOk {
-		e.nDB.Unlock()
 		return
 	}
 
+	// Every node has a unique ID
+	// Check on the base of the IP address if the new node that joined is actually a new incarnation of a previous
+	// failed or shutdown one
+	e.purgeReincarnation(mn)
+
 	e.nDB.nodes[mn.Name] = &node{Node: *mn}
-	e.nDB.Unlock()
 	logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr)
 }
 
@@ -49,18 +71,28 @@
 	// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
 	// If the node instead left because was going down, then it makes sense to just delete all its state
 	e.nDB.Lock()
-	e.nDB.deleteNetworkEntriesForNode(mn.Name)
+	defer e.nDB.Unlock()
+	e.nDB.deleteNodeFromNetworks(mn.Name)
 	e.nDB.deleteNodeTableEntries(mn.Name)
 	if n, ok := e.nDB.nodes[mn.Name]; ok {
 		delete(e.nDB.nodes, mn.Name)
 
+		// Check if a new incarnation of the same node already joined
+		// In that case this node can simply be removed and no further action are needed
+		for name, node := range e.nDB.nodes {
+			if node.Addr.Equal(mn.Addr) {
+				logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", name, node.Addr, mn.Name, mn.Addr)
+				return
+			}
+		}
+
 		// In case of node failure, keep retrying to reconnect every retryInterval (1sec) for nodeReapInterval (24h)
 		// Explicit leave will have already removed the node from the list of nodes (nDB.nodes) and put it into the leftNodes map
 		n.reapTime = nodeReapInterval
 		e.nDB.failedNodes[mn.Name] = n
 		failed = true
 	}
-	e.nDB.Unlock()
+
 	if failed {
 		logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
 	}
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
index 45bd9cc..9ec6bec 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
@@ -181,6 +181,13 @@
 	// be able to increase this to get more content into each gossip packet.
 	PacketBufferSize int
 
+	// reapEntryInterval duration of a deleted entry before being garbage collected
+	reapEntryInterval time.Duration
+
+	// reapNetworkInterval duration of a delted network before being garbage collected
+	// NOTE this MUST always be higher than reapEntryInterval
+	reapNetworkInterval time.Duration
+
 	// StatsPrintPeriod the period to use to print queue stats
 	// Default is 5min
 	StatsPrintPeriod time.Duration
@@ -220,12 +227,18 @@
 		PacketBufferSize:  1400,
 		StatsPrintPeriod:  5 * time.Minute,
 		HealthPrintPeriod: 1 * time.Minute,
+		reapEntryInterval: 30 * time.Minute,
 	}
 }
 
 // New creates a new instance of NetworkDB using the Config passed by
 // the caller.
 func New(c *Config) (*NetworkDB, error) {
+	// The garbage collection logic for entries leverage the presence of the network.
+	// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
+	// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
+	c.reapNetworkInterval = c.reapEntryInterval + 5*reapPeriod
+
 	nDB := &NetworkDB{
 		config:         c,
 		indexes:        make(map[int]*radix.Tree),
@@ -241,7 +254,7 @@
 	nDB.indexes[byTable] = radix.New()
 	nDB.indexes[byNetwork] = radix.New()
 
-	logrus.Debugf("New memberlist node - Node:%v will use memberlist nodeID:%v", c.Hostname, c.NodeID)
+	logrus.Infof("New memberlist node - Node:%v will use memberlist nodeID:%v with config:%+v", c.Hostname, c.NodeID, c)
 	if err := nDB.clusterInit(); err != nil {
 		return nil, err
 	}
@@ -297,6 +310,10 @@
 				Name: node.Name,
 				IP:   node.Addr.String(),
 			})
+		} else {
+			// Added for testing purposes, this condition should never happen else mean that the network list
+			// is out of sync with the node list
+			peers = append(peers, PeerInfo{})
 		}
 	}
 	return peers
@@ -384,17 +401,23 @@
 	return nil
 }
 
+// TableElem elem
+type TableElem struct {
+	Value []byte
+	owner string
+}
+
 // GetTableByNetwork walks the networkdb by the give table and network id and
 // returns a map of keys and values
-func (nDB *NetworkDB) GetTableByNetwork(tname, nid string) map[string]interface{} {
-	entries := make(map[string]interface{})
+func (nDB *NetworkDB) GetTableByNetwork(tname, nid string) map[string]*TableElem {
+	entries := make(map[string]*TableElem)
 	nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s/%s", tname, nid), func(k string, v interface{}) bool {
 		entry := v.(*entry)
 		if entry.deleting {
 			return false
 		}
 		key := k[strings.LastIndex(k, "/")+1:]
-		entries[key] = entry.value
+		entries[key] = &TableElem{Value: entry.value, owner: entry.node}
 		return false
 	})
 	return entries
@@ -414,7 +437,7 @@
 		node:     nDB.config.NodeID,
 		value:    value,
 		deleting: true,
-		reapTime: reapEntryInterval,
+		reapTime: nDB.config.reapEntryInterval,
 	}
 
 	if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil {
@@ -428,7 +451,7 @@
 	return nil
 }
 
-func (nDB *NetworkDB) deleteNetworkEntriesForNode(deletedNode string) {
+func (nDB *NetworkDB) deleteNodeFromNetworks(deletedNode string) {
 	for nid, nodes := range nDB.networkNodes {
 		updatedNodes := make([]string, 0, len(nodes))
 		for _, node := range nodes {
@@ -487,7 +510,7 @@
 				node:     oldEntry.node,
 				value:    oldEntry.value,
 				deleting: true,
-				reapTime: reapEntryInterval,
+				reapTime: nDB.config.reapEntryInterval,
 			}
 
 			// we arrived at this point in 2 cases:
@@ -530,7 +553,9 @@
 
 		nDB.deleteEntry(nid, tname, key)
 
-		nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
+		if !oldEntry.deleting {
+			nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
+		}
 		return false
 	})
 }
@@ -580,6 +605,9 @@
 	nodeNetworks[nid] = &network{id: nid, ltime: ltime, entriesNumber: entries}
 	nodeNetworks[nid].tableBroadcasts = &memberlist.TransmitLimitedQueue{
 		NumNodes: func() int {
+			//TODO fcrisciani this can be optimized maybe avoiding the lock?
+			// this call is done each GetBroadcasts call to evaluate the number of
+			// replicas for the message
 			nDB.RLock()
 			defer nDB.RUnlock()
 			return len(nDB.networkNodes[nid])
@@ -635,7 +663,7 @@
 
 	logrus.Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid)
 	n.ltime = ltime
-	n.reapTime = reapNetworkInterval
+	n.reapTime = nDB.config.reapNetworkInterval
 	n.leaving = true
 	return nil
 }
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
index d70cec7..3c6032b 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
@@ -1,15 +1,19 @@
 package networkdb
 
 import (
+	"encoding/base64"
 	"fmt"
 	"net/http"
 	"strings"
 
+	"github.com/docker/libnetwork/common"
 	"github.com/docker/libnetwork/diagnose"
+	"github.com/sirupsen/logrus"
 )
 
 const (
 	missingParameter = "missing parameter"
+	dbNotAvailable   = "database not available"
 )
 
 // NetDbPaths2Func TODO
@@ -29,8 +33,16 @@
 func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	_, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("join cluster")
+
 	if len(r.Form["members"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?members=ip1,ip2,...", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?members=ip1,ip2,...", r.URL.Path))
+		log.Error("join cluster failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -38,51 +50,88 @@
 	if ok {
 		err := nDB.Join(strings.Split(r.Form["members"][0], ","))
 		if err != nil {
-			fmt.Fprintf(w, "%s error in the DB join %s\n", r.URL.Path, err)
+			rsp := diagnose.FailCommand(fmt.Errorf("%s error in the DB join %s", r.URL.Path, err))
+			log.WithError(err).Error("join cluster failed")
+			diagnose.HTTPReply(w, rsp, json)
 			return
 		}
 
-		fmt.Fprintf(w, "OK\n")
+		log.Info("join cluster done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	_, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("network peers")
+
 	if len(r.Form["nid"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path))
+		log.Error("network peers failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
 		peers := nDB.Peers(r.Form["nid"][0])
-		fmt.Fprintf(w, "Network:%s Total peers: %d\n", r.Form["nid"], len(peers))
+		rsp := &diagnose.TableObj{Length: len(peers)}
 		for i, peerInfo := range peers {
-			fmt.Fprintf(w, "%d) %s -> %s\n", i, peerInfo.Name, peerInfo.IP)
+			rsp.Elements = append(rsp.Elements, &diagnose.PeerEntryObj{Index: i, Name: peerInfo.Name, IP: peerInfo.IP})
 		}
+		log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("network peers done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+	r.ParseForm()
+	diagnose.DebugHTTPForm(r)
+	_, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("cluster peers")
+
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
 		peers := nDB.ClusterPeers()
-		fmt.Fprintf(w, "Total peers: %d\n", len(peers))
+		rsp := &diagnose.TableObj{Length: len(peers)}
 		for i, peerInfo := range peers {
-			fmt.Fprintf(w, "%d) %s -> %s\n", i, peerInfo.Name, peerInfo.IP)
+			rsp.Elements = append(rsp.Elements, &diagnose.PeerEntryObj{Index: i, Name: peerInfo.Name, IP: peerInfo.IP})
 		}
+		log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("cluster peers done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("create entry")
+
 	if len(r.Form["tname"]) < 1 ||
 		len(r.Form["nid"]) < 1 ||
 		len(r.Form["key"]) < 1 ||
 		len(r.Form["value"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+		log.Error("create entry failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -90,25 +139,48 @@
 	nid := r.Form["nid"][0]
 	key := r.Form["key"][0]
 	value := r.Form["value"][0]
+	decodedValue := []byte(value)
+	if !unsafe {
+		var err error
+		decodedValue, err = base64.StdEncoding.DecodeString(value)
+		if err != nil {
+			log.WithError(err).Error("create entry failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
+			return
+		}
+	}
 
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
-		if err := nDB.CreateEntry(tname, nid, key, []byte(value)); err != nil {
-			diagnose.HTTPReplyError(w, err.Error(), "")
+		if err := nDB.CreateEntry(tname, nid, key, decodedValue); err != nil {
+			rsp := diagnose.FailCommand(err)
+			diagnose.HTTPReply(w, rsp, json)
+			log.WithError(err).Error("create entry failed")
 			return
 		}
-		fmt.Fprintf(w, "OK\n")
+		log.Info("create entry done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("update entry")
+
 	if len(r.Form["tname"]) < 1 ||
 		len(r.Form["nid"]) < 1 ||
 		len(r.Form["key"]) < 1 ||
 		len(r.Form["value"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+		log.Error("update entry failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -116,24 +188,46 @@
 	nid := r.Form["nid"][0]
 	key := r.Form["key"][0]
 	value := r.Form["value"][0]
+	decodedValue := []byte(value)
+	if !unsafe {
+		var err error
+		decodedValue, err = base64.StdEncoding.DecodeString(value)
+		if err != nil {
+			log.WithError(err).Error("update entry failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
+			return
+		}
+	}
 
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
-		if err := nDB.UpdateEntry(tname, nid, key, []byte(value)); err != nil {
-			diagnose.HTTPReplyError(w, err.Error(), "")
+		if err := nDB.UpdateEntry(tname, nid, key, decodedValue); err != nil {
+			log.WithError(err).Error("update entry failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
 			return
 		}
-		fmt.Fprintf(w, "OK\n")
+		log.Info("update entry done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	_, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("delete entry")
+
 	if len(r.Form["tname"]) < 1 ||
 		len(r.Form["nid"]) < 1 ||
 		len(r.Form["key"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+		log.Error("delete entry failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -145,20 +239,32 @@
 	if ok {
 		err := nDB.DeleteEntry(tname, nid, key)
 		if err != nil {
-			diagnose.HTTPReplyError(w, err.Error(), "")
+			log.WithError(err).Error("delete entry failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
 			return
 		}
-		fmt.Fprintf(w, "OK\n")
+		log.Info("delete entry done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("get entry")
+
 	if len(r.Form["tname"]) < 1 ||
 		len(r.Form["nid"]) < 1 ||
 		len(r.Form["key"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+		log.Error("get entry failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -170,18 +276,39 @@
 	if ok {
 		value, err := nDB.GetEntry(tname, nid, key)
 		if err != nil {
-			diagnose.HTTPReplyError(w, err.Error(), "")
+			log.WithError(err).Error("get entry failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
 			return
 		}
-		fmt.Fprintf(w, "key:`%s` value:`%s`\n", key, string(value))
+
+		var encodedValue string
+		if unsafe {
+			encodedValue = string(value)
+		} else {
+			encodedValue = base64.StdEncoding.EncodeToString(value)
+		}
+
+		rsp := &diagnose.TableEntryObj{Key: key, Value: encodedValue}
+		log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("update entry done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	_, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("join network")
+
 	if len(r.Form["nid"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+		log.Error("join network failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -190,18 +317,30 @@
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
 		if err := nDB.JoinNetwork(nid); err != nil {
-			diagnose.HTTPReplyError(w, err.Error(), "")
+			log.WithError(err).Error("join network failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
 			return
 		}
-		fmt.Fprintf(w, "OK\n")
+		log.Info("join network done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	_, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("leave network")
+
 	if len(r.Form["nid"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+		log.Error("leave network failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -210,19 +349,31 @@
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
 		if err := nDB.LeaveNetwork(nid); err != nil {
-			diagnose.HTTPReplyError(w, err.Error(), "")
+			log.WithError(err).Error("leave network failed")
+			diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
 			return
 		}
-		fmt.Fprintf(w, "OK\n")
+		log.Info("leave network done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
 
 func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
 	r.ParseForm()
 	diagnose.DebugHTTPForm(r)
+	unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+	// audit logs
+	log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+	log.Info("get table")
+
 	if len(r.Form["tname"]) < 1 ||
 		len(r.Form["nid"]) < 1 {
-		diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id", r.URL.Path))
+		rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id", r.URL.Path))
+		log.Error("get table failed, wrong input")
+		diagnose.HTTPReply(w, rsp, json)
 		return
 	}
 
@@ -232,11 +383,26 @@
 	nDB, ok := ctx.(*NetworkDB)
 	if ok {
 		table := nDB.GetTableByNetwork(tname, nid)
-		fmt.Fprintf(w, "total elements: %d\n", len(table))
-		i := 0
+		rsp := &diagnose.TableObj{Length: len(table)}
+		var i = 0
 		for k, v := range table {
-			fmt.Fprintf(w, "%d) k:`%s` -> v:`%s`\n", i, k, string(v.([]byte)))
-			i++
+			var encodedValue string
+			if unsafe {
+				encodedValue = string(v.Value)
+			} else {
+				encodedValue = base64.StdEncoding.EncodeToString(v.Value)
+			}
+			rsp.Elements = append(rsp.Elements,
+				&diagnose.TableEntryObj{
+					Index: i,
+					Key:   k,
+					Value: encodedValue,
+					Owner: v.owner,
+				})
 		}
+		log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("get table done")
+		diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+		return
 	}
+	diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
 }
diff --git a/vendor/github.com/docker/libnetwork/osl/interface_solaris.go b/vendor/github.com/docker/libnetwork/osl/interface_solaris.go
deleted file mode 100644
index 9c0141f..0000000
--- a/vendor/github.com/docker/libnetwork/osl/interface_solaris.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package osl
-
-// IfaceOption is a function option type to set interface options
-type IfaceOption func()
diff --git a/vendor/github.com/docker/libnetwork/osl/neigh_solaris.go b/vendor/github.com/docker/libnetwork/osl/neigh_solaris.go
deleted file mode 100644
index ffa8d75..0000000
--- a/vendor/github.com/docker/libnetwork/osl/neigh_solaris.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package osl
-
-// NeighOption is a function option type to set interface options
-type NeighOption func()
diff --git a/vendor/github.com/docker/libnetwork/osl/sandbox_solaris.go b/vendor/github.com/docker/libnetwork/osl/sandbox_solaris.go
deleted file mode 100644
index 9de44e5..0000000
--- a/vendor/github.com/docker/libnetwork/osl/sandbox_solaris.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package osl
-
-// NewSandbox provides a new sandbox instance created in an os specific way
-// provided a key which uniquely identifies the sandbox
-func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) {
-	return nil, nil
-}
-
-// GenerateKey generates a sandbox key based on the passed
-// container id.
-func GenerateKey(containerID string) string {
-	maxLen := 12
-
-	if len(containerID) < maxLen {
-		maxLen = len(containerID)
-	}
-
-	return containerID[:maxLen]
-}
-
-// InitOSContext initializes OS context while configuring network resources
-func InitOSContext() func() {
-	return func() {}
-}
diff --git a/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go b/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go
index 49184d6..51a656c 100644
--- a/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go
+++ b/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!freebsd,!solaris
+// +build !linux,!windows,!freebsd
 
 package osl
 
diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator_solaris.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator_solaris.go
deleted file mode 100644
index ccc20b1..0000000
--- a/vendor/github.com/docker/libnetwork/portallocator/portallocator_solaris.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package portallocator
-
-func getDynamicPortRange() (start int, end int, err error) {
-	return 32768, 65535, nil
-}
diff --git a/vendor/github.com/docker/libnetwork/portmapper/proxy.go b/vendor/github.com/docker/libnetwork/portmapper/proxy.go
index a5bdc55..45df200 100644
--- a/vendor/github.com/docker/libnetwork/portmapper/proxy.go
+++ b/vendor/github.com/docker/libnetwork/portmapper/proxy.go
@@ -10,7 +10,7 @@
 	"time"
 )
 
-const userlandProxyCommandName = "docker-proxy"
+var userlandProxyCommandName = "docker-proxy"
 
 type userlandProxy interface {
 	Start() error
diff --git a/vendor/github.com/docker/libnetwork/portmapper/proxy_solaris.go b/vendor/github.com/docker/libnetwork/portmapper/proxy_solaris.go
deleted file mode 100644
index dc70b5e..0000000
--- a/vendor/github.com/docker/libnetwork/portmapper/proxy_solaris.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package portmapper
-
-import (
-	"net"
-	"os/exec"
-	"strconv"
-)
-
-func newProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int, proxyPath string) (userlandProxy, error) {
-	path := proxyPath
-	if proxyPath == "" {
-		cmd, err := exec.LookPath(userlandProxyCommandName)
-		if err != nil {
-			return nil, err
-		}
-		path = cmd
-	}
-
-	args := []string{
-		path,
-		"-proto", proto,
-		"-host-ip", hostIP.String(),
-		"-host-port", strconv.Itoa(hostPort),
-		"-container-ip", containerIP.String(),
-		"-container-port", strconv.Itoa(containerPort),
-	}
-
-	return &proxyCommand{
-		cmd: &exec.Cmd{
-			Path: path,
-			Args: args,
-		},
-	}, nil
-}
diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_solaris.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_solaris.go
deleted file mode 100644
index 7569e46..0000000
--- a/vendor/github.com/docker/libnetwork/sandbox_externalkey_solaris.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// +build solaris
-
-package libnetwork
-
-import (
-	"io"
-	"net"
-
-	"github.com/docker/libnetwork/types"
-)
-
-// processSetKeyReexec is a private function that must be called only on an reexec path
-// It expects 3 args { [0] = "libnetwork-setkey", [1] = <container-id>, [2] = <controller-id> }
-// It also expects libcontainer.State as a json string in <stdin>
-// Refer to https://github.com/opencontainers/runc/pull/160/ for more information
-func processSetKeyReexec() {
-}
-
-// SetExternalKey provides a convenient way to set an External key to a sandbox
-func SetExternalKey(controllerID string, containerID string, key string) error {
-	return types.NotImplementedErrorf("SetExternalKey isn't supported on non linux systems")
-}
-
-func sendKey(c net.Conn, data setKeyData) error {
-	return types.NotImplementedErrorf("sendKey isn't supported on non linux systems")
-}
-
-func processReturn(r io.Reader) error {
-	return types.NotImplementedErrorf("processReturn isn't supported on non linux systems")
-}
-
-// no-op on non linux systems
-func (c *controller) startExternalKeyListener() error {
-	return nil
-}
-
-func (c *controller) acceptClientConnections(sock string, l net.Listener) {
-}
-
-func (c *controller) processExternalKey(conn net.Conn) error {
-	return types.NotImplementedErrorf("processExternalKey isn't supported on non linux systems")
-}
-
-func (c *controller) stopExternalKeyListener() {
-}
diff --git a/vendor/github.com/docker/libnetwork/store.go b/vendor/github.com/docker/libnetwork/store.go
index 1a897bf..95943f6 100644
--- a/vendor/github.com/docker/libnetwork/store.go
+++ b/vendor/github.com/docker/libnetwork/store.go
@@ -256,6 +256,7 @@
 			if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {
 				return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err)
 			}
+			logrus.Warnf("Error (%v) deleting object %v, retrying....", err, kvObject.Key())
 			goto retry
 		}
 		return err
diff --git a/vendor/github.com/docker/libnetwork/vendor.conf b/vendor/github.com/docker/libnetwork/vendor.conf
index c97f551..bdcb5a1 100644
--- a/vendor/github.com/docker/libnetwork/vendor.conf
+++ b/vendor/github.com/docker/libnetwork/vendor.conf
@@ -1,44 +1,53 @@
-github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
-github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
-github.com/Microsoft/hcsshim v0.6.3
+github.com/Microsoft/go-winio v0.4.5
+github.com/Microsoft/hcsshim v0.6.5
 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
-github.com/boltdb/bolt c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631
+github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
 github.com/codegangsta/cli a65b733b303f0055f8d324d805f393cd3e7a7904
-github.com/coreos/etcd 925d1d74cec8c3b169c52fd4b2dc234a35934fce
-github.com/coreos/go-systemd b4a58d95188dd092ae20072bac14cece0e67c388
+github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
+github.com/containerd/continuity 22694c680ee48fb8f50015b44618517e2bde77e8
+github.com/coreos/etcd v3.2.1
+github.com/coreos/go-semver v0.2.0
+github.com/coreos/go-systemd v4
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
 
-github.com/docker/docker 2cac43e3573893cf8fd816e0ad5615426acb87f4 https://github.com/dmcgowan/docker.git
+github.com/docker/docker a3efe9722f34af5cf4443fe3a5c4e4e3e0457b54
 github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
-github.com/docker/go-units 8e2d4523730c73120e10d4652f36ad6010998f4e
+github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
 
-github.com/godbus/dbus 5f6efc7ef2759c81b7ba876593971bfce311eab3
-github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2
-github.com/golang/protobuf f7137ae6b19afbfd61a94b746fda3b3fe0491874
-github.com/gorilla/context 215affda49addc4c8ef7e2534915df2c8c35c6cd
-github.com/gorilla/mux 8096f47503459bcc74d1f4c487b7e6e42e5746b5
-github.com/hashicorp/consul 954aec66231b79c161a4122b023fbcad13047f79
+github.com/godbus/dbus v4.0.0
+github.com/gogo/protobuf v0.4
+github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
+github.com/gorilla/context v1.1
+github.com/gorilla/mux v1.1
+github.com/hashicorp/consul v0.5.2
 github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
-github.com/hashicorp/go-multierror 2167c8ec40776024589f483a6b836489e47e1049
+github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e
 github.com/hashicorp/memberlist v0.1.0
 github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372
 github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d
 github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
-github.com/mattn/go-shellwords 525bedee691b5a8df547cb5cf9f86b7fb1883e24
-github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
-github.com/opencontainers/runc 8694d576ea3ce3c9e2c804b7f91b4e1e9a575d1c https://github.com/dmcgowan/runc.git
+github.com/mattn/go-shellwords v1.0.3
+github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7
+github.com/mrunalp/fileutils ed869b029674c0e9ce4c0dfa781405c2d9946d08
+github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
+github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
+github.com/opencontainers/runtime-spec v1.0.0
+github.com/opencontainers/selinux v1.0.0-rc1
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
-github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
-github.com/sirupsen/logrus v1.0.1
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
+github.com/sirupsen/logrus v1.0.3
 github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
-github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
+github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
-github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
 github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674
-golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
+golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
 github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
diff --git a/vendor/github.com/fluent/fluent-logger-golang/README.md b/vendor/github.com/fluent/fluent-logger-golang/README.md
index a6b9902..cbb8bdc 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/README.md
+++ b/vendor/github.com/fluent/fluent-logger-golang/README.md
@@ -21,7 +21,7 @@
 
 GoDoc: http://godoc.org/github.com/fluent/fluent-logger-golang/fluent
 
-##Example
+## Example
 
 ```go
 package main
@@ -44,14 +44,14 @@
     "hoge": "hoge",
   }
   error := logger.Post(tag, data)
-  // error := logger.Post(tag, time.Time.Now(), data)
+  // error := logger.PostWithTime(tag, time.Now(), data)
   if error != nil {
     panic(error)
   }
 }
 ```
 
-`data` must be a value like `map[string]literal`, `map[string]interface{}` or `struct`. Logger refers tags `msg` or `codec` of each fields of structs.
+`data` must be a value like `map[string]literal`, `map[string]interface{}`, `struct` or [`msgp.Marshaler`](http://godoc.org/github.com/tinylib/msgp/msgp#Marshaler). Logger refers tags `msg` or `codec` of each fields of structs.
 
 ## Setting config values
 
@@ -59,6 +59,11 @@
 f := fluent.New(fluent.Config{FluentPort: 80, FluentHost: "example.com"})
 ```
 
+### WriteTimeout
+
+Sets the timeout for Write call of logger.Post.
+Since the default is zero value, Write will not time out.
+
 ## Tests
 ```
 go test
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
index 655f623..4693c5c 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
@@ -4,13 +4,14 @@
 	"encoding/json"
 	"errors"
 	"fmt"
-	"io"
 	"math"
 	"net"
 	"reflect"
 	"strconv"
 	"sync"
 	"time"
+
+	"github.com/tinylib/msgp/msgp"
 )
 
 const (
@@ -19,10 +20,14 @@
 	defaultSocketPath             = ""
 	defaultPort                   = 24224
 	defaultTimeout                = 3 * time.Second
+	defaultWriteTimeout           = time.Duration(0) // Write() will not time out
 	defaultBufferLimit            = 8 * 1024 * 1024
 	defaultRetryWait              = 500
 	defaultMaxRetry               = 13
 	defaultReconnectWaitIncreRate = 1.5
+	// Default sub-second precision value to false since it is only compatible
+	// with fluentd versions v0.14 and above.
+	defaultSubSecondPrecision = false
 )
 
 type Config struct {
@@ -31,12 +36,17 @@
 	FluentNetwork    string        `json:"fluent_network"`
 	FluentSocketPath string        `json:"fluent_socket_path"`
 	Timeout          time.Duration `json:"timeout"`
+	WriteTimeout     time.Duration `json:"write_timeout"`
 	BufferLimit      int           `json:"buffer_limit"`
 	RetryWait        int           `json:"retry_wait"`
 	MaxRetry         int           `json:"max_retry"`
 	TagPrefix        string        `json:"tag_prefix"`
 	AsyncConnect     bool          `json:"async_connect"`
 	MarshalAsJSON    bool          `json:"marshal_as_json"`
+
+	// Sub-second precision timestamps are only possible for those using fluentd
+	// v0.14+ and serializing their messages with msgpack.
+	SubSecondPrecision bool `json:"sub_second_precision"`
 }
 
 type Fluent struct {
@@ -46,7 +56,7 @@
 	pending []byte
 
 	muconn       sync.Mutex
-	conn         io.WriteCloser
+	conn         net.Conn
 	reconnecting bool
 }
 
@@ -67,6 +77,9 @@
 	if config.Timeout == 0 {
 		config.Timeout = defaultTimeout
 	}
+	if config.WriteTimeout == 0 {
+		config.WriteTimeout = defaultWriteTimeout
+	}
 	if config.BufferLimit == 0 {
 		config.BufferLimit = defaultBufferLimit
 	}
@@ -90,9 +103,6 @@
 //
 // Examples:
 //
-//  // send string
-//  f.Post("tag_name", "data")
-//
 //  // send map[string]
 //  mapStringData := map[string]string{
 //  	"foo":  "bar",
@@ -124,6 +134,10 @@
 		tag = f.TagPrefix + "." + tag
 	}
 
+	if m, ok := message.(msgp.Marshaler); ok {
+		return f.EncodeAndPostData(tag, tm, m)
+	}
+
 	msg := reflect.ValueOf(message)
 	msgtype := msg.Type()
 
@@ -203,6 +217,9 @@
 		msg := Message{Tag: tag, Time: timeUnix, Record: message}
 		chunk := &MessageChunk{message: msg}
 		data, err = json.Marshal(chunk)
+	} else if f.Config.SubSecondPrecision {
+		msg := &MessageExt{Tag: tag, Time: EventTime(tm), Record: message}
+		data, err = msg.MarshalMsg(nil)
 	} else {
 		msg := &Message{Tag: tag, Time: timeUnix, Record: message}
 		data, err = msg.MarshalMsg(nil)
@@ -297,6 +314,12 @@
 
 	var err error
 	if len(f.pending) > 0 {
+		t := f.Config.WriteTimeout
+		if time.Duration(0) < t {
+			f.conn.SetWriteDeadline(time.Now().Add(t))
+		} else {
+			f.conn.SetWriteDeadline(time.Time{})
+		}
 		_, err = f.conn.Write(f.pending)
 		if err != nil {
 			f.conn.Close()
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
index 268d614..158e22d 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
@@ -2,6 +2,12 @@
 
 package fluent
 
+import (
+	"time"
+
+	"github.com/tinylib/msgp/msgp"
+)
+
 //msgp:tuple Entry
 type Entry struct {
 	Time   int64       `msg:"time"`
@@ -22,3 +28,69 @@
 	Record interface{} `msg:"record"`
 	Option interface{} `msg:"option"`
 }
+
+//msgp:tuple MessageExt
+type MessageExt struct {
+	Tag    string      `msg:"tag"`
+	Time   EventTime   `msg:"time,extension"`
+	Record interface{} `msg:"record"`
+	Option interface{} `msg:"option"`
+}
+
+// EventTime is an extension to the serialized time value. It builds in support
+// for sub-second (nanosecond) precision in serialized timestamps.
+//
+// You can find the full specification for the msgpack message payload here:
+// https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1.
+//
+// You can find more information on msgpack extension types here:
+// https://github.com/tinylib/msgp/wiki/Using-Extensions.
+type EventTime time.Time
+
+const (
+	extensionType = 0
+	length        = 8
+)
+
+func init() {
+	msgp.RegisterExtension(extensionType, func() msgp.Extension { return new(EventTime) })
+}
+
+func (t *EventTime) ExtensionType() int8 { return extensionType }
+
+func (t *EventTime) Len() int { return length }
+
+func (t *EventTime) MarshalBinaryTo(b []byte) error {
+	// Unwrap to Golang time
+	goTime := time.Time(*t)
+
+	// There's no support for timezones in fluentd's protocol for EventTime.
+	// Convert to UTC.
+	utc := goTime.UTC()
+
+	// Warning! Converting seconds to an int32 is a lossy operation. This code
+	// will hit the "Year 2038" problem.
+	sec := int32(utc.Unix())
+	nsec := utc.Nanosecond()
+
+	// Fill the buffer with 4 bytes for the second component of the timestamp.
+	b[0] = byte(sec >> 24)
+	b[1] = byte(sec >> 16)
+	b[2] = byte(sec >> 8)
+	b[3] = byte(sec)
+
+	// Fill the buffer with 4 bytes for the nanosecond component of the
+	// timestamp.
+	b[4] = byte(nsec >> 24)
+	b[5] = byte(nsec >> 16)
+	b[6] = byte(nsec >> 8)
+	b[7] = byte(nsec)
+
+	return nil
+}
+
+// UnmarshalBinary is not implemented since decoding messages is not supported
+// by this library.
+func (t *EventTime) UnmarshalBinary(b []byte) error {
+	return nil
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
index afb9d6d..5b88a68 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
@@ -10,13 +10,13 @@
 
 // DecodeMsg implements msgp.Decodable
 func (z *Entry) DecodeMsg(dc *msgp.Reader) (err error) {
-	var ssz uint32
-	ssz, err = dc.ReadArrayHeader()
+	var zxvk uint32
+	zxvk, err = dc.ReadArrayHeader()
 	if err != nil {
 		return
 	}
-	if ssz != 2 {
-		err = msgp.ArrayError{Wanted: 2, Got: ssz}
+	if zxvk != 2 {
+		err = msgp.ArrayError{Wanted: 2, Got: zxvk}
 		return
 	}
 	z.Time, err = dc.ReadInt64()
@@ -32,9 +32,10 @@
 
 // EncodeMsg implements msgp.Encodable
 func (z Entry) EncodeMsg(en *msgp.Writer) (err error) {
-	err = en.WriteArrayHeader(2)
+	// array header, size 2
+	err = en.Append(0x92)
 	if err != nil {
-		return
+		return err
 	}
 	err = en.WriteInt64(z.Time)
 	if err != nil {
@@ -50,7 +51,8 @@
 // MarshalMsg implements msgp.Marshaler
 func (z Entry) MarshalMsg(b []byte) (o []byte, err error) {
 	o = msgp.Require(b, z.Msgsize())
-	o = msgp.AppendArrayHeader(o, 2)
+	// array header, size 2
+	o = append(o, 0x92)
 	o = msgp.AppendInt64(o, z.Time)
 	o, err = msgp.AppendIntf(o, z.Record)
 	if err != nil {
@@ -61,16 +63,14 @@
 
 // UnmarshalMsg implements msgp.Unmarshaler
 func (z *Entry) UnmarshalMsg(bts []byte) (o []byte, err error) {
-	{
-		var ssz uint32
-		ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
-		if err != nil {
-			return
-		}
-		if ssz != 2 {
-			err = msgp.ArrayError{Wanted: 2, Got: ssz}
-			return
-		}
+	var zbzg uint32
+	zbzg, bts, err = msgp.ReadArrayHeaderBytes(bts)
+	if err != nil {
+		return
+	}
+	if zbzg != 2 {
+		err = msgp.ArrayError{Wanted: 2, Got: zbzg}
+		return
 	}
 	z.Time, bts, err = msgp.ReadInt64Bytes(bts)
 	if err != nil {
@@ -84,51 +84,52 @@
 	return
 }
 
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
 func (z Entry) Msgsize() (s int) {
-	s = msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Record)
+	s = 1 + msgp.Int64Size + msgp.GuessSize(z.Record)
 	return
 }
 
 // DecodeMsg implements msgp.Decodable
 func (z *Forward) DecodeMsg(dc *msgp.Reader) (err error) {
-	var ssz uint32
-	ssz, err = dc.ReadArrayHeader()
+	var zcmr uint32
+	zcmr, err = dc.ReadArrayHeader()
 	if err != nil {
 		return
 	}
-	if ssz != 3 {
-		err = msgp.ArrayError{Wanted: 3, Got: ssz}
+	if zcmr != 3 {
+		err = msgp.ArrayError{Wanted: 3, Got: zcmr}
 		return
 	}
 	z.Tag, err = dc.ReadString()
 	if err != nil {
 		return
 	}
-	var xsz uint32
-	xsz, err = dc.ReadArrayHeader()
+	var zajw uint32
+	zajw, err = dc.ReadArrayHeader()
 	if err != nil {
 		return
 	}
-	if cap(z.Entries) >= int(xsz) {
-		z.Entries = z.Entries[:xsz]
+	if cap(z.Entries) >= int(zajw) {
+		z.Entries = (z.Entries)[:zajw]
 	} else {
-		z.Entries = make([]Entry, xsz)
+		z.Entries = make([]Entry, zajw)
 	}
-	for xvk := range z.Entries {
-		var ssz uint32
-		ssz, err = dc.ReadArrayHeader()
+	for zbai := range z.Entries {
+		var zwht uint32
+		zwht, err = dc.ReadArrayHeader()
 		if err != nil {
 			return
 		}
-		if ssz != 2 {
-			err = msgp.ArrayError{Wanted: 2, Got: ssz}
+		if zwht != 2 {
+			err = msgp.ArrayError{Wanted: 2, Got: zwht}
 			return
 		}
-		z.Entries[xvk].Time, err = dc.ReadInt64()
+		z.Entries[zbai].Time, err = dc.ReadInt64()
 		if err != nil {
 			return
 		}
-		z.Entries[xvk].Record, err = dc.ReadIntf()
+		z.Entries[zbai].Record, err = dc.ReadIntf()
 		if err != nil {
 			return
 		}
@@ -142,9 +143,10 @@
 
 // EncodeMsg implements msgp.Encodable
 func (z *Forward) EncodeMsg(en *msgp.Writer) (err error) {
-	err = en.WriteArrayHeader(3)
+	// array header, size 3
+	err = en.Append(0x93)
 	if err != nil {
-		return
+		return err
 	}
 	err = en.WriteString(z.Tag)
 	if err != nil {
@@ -154,16 +156,17 @@
 	if err != nil {
 		return
 	}
-	for xvk := range z.Entries {
-		err = en.WriteArrayHeader(2)
+	for zbai := range z.Entries {
+		// array header, size 2
+		err = en.Append(0x92)
+		if err != nil {
+			return err
+		}
+		err = en.WriteInt64(z.Entries[zbai].Time)
 		if err != nil {
 			return
 		}
-		err = en.WriteInt64(z.Entries[xvk].Time)
-		if err != nil {
-			return
-		}
-		err = en.WriteIntf(z.Entries[xvk].Record)
+		err = en.WriteIntf(z.Entries[zbai].Record)
 		if err != nil {
 			return
 		}
@@ -178,13 +181,15 @@
 // MarshalMsg implements msgp.Marshaler
 func (z *Forward) MarshalMsg(b []byte) (o []byte, err error) {
 	o = msgp.Require(b, z.Msgsize())
-	o = msgp.AppendArrayHeader(o, 3)
+	// array header, size 3
+	o = append(o, 0x93)
 	o = msgp.AppendString(o, z.Tag)
 	o = msgp.AppendArrayHeader(o, uint32(len(z.Entries)))
-	for xvk := range z.Entries {
-		o = msgp.AppendArrayHeader(o, 2)
-		o = msgp.AppendInt64(o, z.Entries[xvk].Time)
-		o, err = msgp.AppendIntf(o, z.Entries[xvk].Record)
+	for zbai := range z.Entries {
+		// array header, size 2
+		o = append(o, 0x92)
+		o = msgp.AppendInt64(o, z.Entries[zbai].Time)
+		o, err = msgp.AppendIntf(o, z.Entries[zbai].Record)
 		if err != nil {
 			return
 		}
@@ -198,48 +203,44 @@
 
 // UnmarshalMsg implements msgp.Unmarshaler
 func (z *Forward) UnmarshalMsg(bts []byte) (o []byte, err error) {
-	{
-		var ssz uint32
-		ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
-		if err != nil {
-			return
-		}
-		if ssz != 3 {
-			err = msgp.ArrayError{Wanted: 3, Got: ssz}
-			return
-		}
+	var zhct uint32
+	zhct, bts, err = msgp.ReadArrayHeaderBytes(bts)
+	if err != nil {
+		return
+	}
+	if zhct != 3 {
+		err = msgp.ArrayError{Wanted: 3, Got: zhct}
+		return
 	}
 	z.Tag, bts, err = msgp.ReadStringBytes(bts)
 	if err != nil {
 		return
 	}
-	var xsz uint32
-	xsz, bts, err = msgp.ReadArrayHeaderBytes(bts)
+	var zcua uint32
+	zcua, bts, err = msgp.ReadArrayHeaderBytes(bts)
 	if err != nil {
 		return
 	}
-	if cap(z.Entries) >= int(xsz) {
-		z.Entries = z.Entries[:xsz]
+	if cap(z.Entries) >= int(zcua) {
+		z.Entries = (z.Entries)[:zcua]
 	} else {
-		z.Entries = make([]Entry, xsz)
+		z.Entries = make([]Entry, zcua)
 	}
-	for xvk := range z.Entries {
-		{
-			var ssz uint32
-			ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
-			if err != nil {
-				return
-			}
-			if ssz != 2 {
-				err = msgp.ArrayError{Wanted: 2, Got: ssz}
-				return
-			}
-		}
-		z.Entries[xvk].Time, bts, err = msgp.ReadInt64Bytes(bts)
+	for zbai := range z.Entries {
+		var zxhx uint32
+		zxhx, bts, err = msgp.ReadArrayHeaderBytes(bts)
 		if err != nil {
 			return
 		}
-		z.Entries[xvk].Record, bts, err = msgp.ReadIntfBytes(bts)
+		if zxhx != 2 {
+			err = msgp.ArrayError{Wanted: 2, Got: zxhx}
+			return
+		}
+		z.Entries[zbai].Time, bts, err = msgp.ReadInt64Bytes(bts)
+		if err != nil {
+			return
+		}
+		z.Entries[zbai].Record, bts, err = msgp.ReadIntfBytes(bts)
 		if err != nil {
 			return
 		}
@@ -252,10 +253,11 @@
 	return
 }
 
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
 func (z *Forward) Msgsize() (s int) {
-	s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize
-	for xvk := range z.Entries {
-		s += msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Entries[xvk].Record)
+	s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize
+	for zbai := range z.Entries {
+		s += 1 + msgp.Int64Size + msgp.GuessSize(z.Entries[zbai].Record)
 	}
 	s += msgp.GuessSize(z.Option)
 	return
@@ -263,13 +265,13 @@
 
 // DecodeMsg implements msgp.Decodable
 func (z *Message) DecodeMsg(dc *msgp.Reader) (err error) {
-	var ssz uint32
-	ssz, err = dc.ReadArrayHeader()
+	var zlqf uint32
+	zlqf, err = dc.ReadArrayHeader()
 	if err != nil {
 		return
 	}
-	if ssz != 4 {
-		err = msgp.ArrayError{Wanted: 4, Got: ssz}
+	if zlqf != 4 {
+		err = msgp.ArrayError{Wanted: 4, Got: zlqf}
 		return
 	}
 	z.Tag, err = dc.ReadString()
@@ -293,9 +295,10 @@
 
 // EncodeMsg implements msgp.Encodable
 func (z *Message) EncodeMsg(en *msgp.Writer) (err error) {
-	err = en.WriteArrayHeader(4)
+	// array header, size 4
+	err = en.Append(0x94)
 	if err != nil {
-		return
+		return err
 	}
 	err = en.WriteString(z.Tag)
 	if err != nil {
@@ -319,7 +322,8 @@
 // MarshalMsg implements msgp.Marshaler
 func (z *Message) MarshalMsg(b []byte) (o []byte, err error) {
 	o = msgp.Require(b, z.Msgsize())
-	o = msgp.AppendArrayHeader(o, 4)
+	// array header, size 4
+	o = append(o, 0x94)
 	o = msgp.AppendString(o, z.Tag)
 	o = msgp.AppendInt64(o, z.Time)
 	o, err = msgp.AppendIntf(o, z.Record)
@@ -335,16 +339,14 @@
 
 // UnmarshalMsg implements msgp.Unmarshaler
 func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) {
-	{
-		var ssz uint32
-		ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
-		if err != nil {
-			return
-		}
-		if ssz != 4 {
-			err = msgp.ArrayError{Wanted: 4, Got: ssz}
-			return
-		}
+	var zdaf uint32
+	zdaf, bts, err = msgp.ReadArrayHeaderBytes(bts)
+	if err != nil {
+		return
+	}
+	if zdaf != 4 {
+		err = msgp.ArrayError{Wanted: 4, Got: zdaf}
+		return
 	}
 	z.Tag, bts, err = msgp.ReadStringBytes(bts)
 	if err != nil {
@@ -366,7 +368,122 @@
 	return
 }
 
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
 func (z *Message) Msgsize() (s int) {
-	s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
+	s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
+	return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *MessageExt) DecodeMsg(dc *msgp.Reader) (err error) {
+	var zpks uint32
+	zpks, err = dc.ReadArrayHeader()
+	if err != nil {
+		return
+	}
+	if zpks != 4 {
+		err = msgp.ArrayError{Wanted: 4, Got: zpks}
+		return
+	}
+	z.Tag, err = dc.ReadString()
+	if err != nil {
+		return
+	}
+	err = dc.ReadExtension(&z.Time)
+	if err != nil {
+		return
+	}
+	z.Record, err = dc.ReadIntf()
+	if err != nil {
+		return
+	}
+	z.Option, err = dc.ReadIntf()
+	if err != nil {
+		return
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *MessageExt) EncodeMsg(en *msgp.Writer) (err error) {
+	// array header, size 4
+	err = en.Append(0x94)
+	if err != nil {
+		return err
+	}
+	err = en.WriteString(z.Tag)
+	if err != nil {
+		return
+	}
+	err = en.WriteExtension(&z.Time)
+	if err != nil {
+		return
+	}
+	err = en.WriteIntf(z.Record)
+	if err != nil {
+		return
+	}
+	err = en.WriteIntf(z.Option)
+	if err != nil {
+		return
+	}
+	return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *MessageExt) MarshalMsg(b []byte) (o []byte, err error) {
+	o = msgp.Require(b, z.Msgsize())
+	// array header, size 4
+	o = append(o, 0x94)
+	o = msgp.AppendString(o, z.Tag)
+	o, err = msgp.AppendExtension(o, &z.Time)
+	if err != nil {
+		return
+	}
+	o, err = msgp.AppendIntf(o, z.Record)
+	if err != nil {
+		return
+	}
+	o, err = msgp.AppendIntf(o, z.Option)
+	if err != nil {
+		return
+	}
+	return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *MessageExt) UnmarshalMsg(bts []byte) (o []byte, err error) {
+	var zjfb uint32
+	zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts)
+	if err != nil {
+		return
+	}
+	if zjfb != 4 {
+		err = msgp.ArrayError{Wanted: 4, Got: zjfb}
+		return
+	}
+	z.Tag, bts, err = msgp.ReadStringBytes(bts)
+	if err != nil {
+		return
+	}
+	bts, err = msgp.ReadExtensionBytes(bts, &z.Time)
+	if err != nil {
+		return
+	}
+	z.Record, bts, err = msgp.ReadIntfBytes(bts)
+	if err != nil {
+		return
+	}
+	z.Option, bts, err = msgp.ReadIntfBytes(bts)
+	if err != nil {
+		return
+	}
+	o = bts
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *MessageExt) Msgsize() (s int) {
+	s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.ExtensionPrefixSize + z.Time.Len() + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
 	return
 }
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go
new file mode 100644
index 0000000..dcf5baa
--- /dev/null
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go
@@ -0,0 +1,7 @@
+package fluent
+
+//go:generate msgp
+type TestMessage struct {
+	Foo  string `msg:"foo" json:"foo,omitempty"`
+	Hoge string `msg:"hoge" json:"hoge,omitempty"`
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go
new file mode 100644
index 0000000..17a45e2
--- /dev/null
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go
@@ -0,0 +1,125 @@
+package fluent
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+	"github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *TestMessage) DecodeMsg(dc *msgp.Reader) (err error) {
+	var field []byte
+	_ = field
+	var zxvk uint32
+	zxvk, err = dc.ReadMapHeader()
+	if err != nil {
+		return
+	}
+	for zxvk > 0 {
+		zxvk--
+		field, err = dc.ReadMapKeyPtr()
+		if err != nil {
+			return
+		}
+		switch msgp.UnsafeString(field) {
+		case "foo":
+			z.Foo, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		case "hoge":
+			z.Hoge, err = dc.ReadString()
+			if err != nil {
+				return
+			}
+		default:
+			err = dc.Skip()
+			if err != nil {
+				return
+			}
+		}
+	}
+	return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z TestMessage) EncodeMsg(en *msgp.Writer) (err error) {
+	// map header, size 2
+	// write "foo"
+	err = en.Append(0x82, 0xa3, 0x66, 0x6f, 0x6f)
+	if err != nil {
+		return err
+	}
+	err = en.WriteString(z.Foo)
+	if err != nil {
+		return
+	}
+	// write "hoge"
+	err = en.Append(0xa4, 0x68, 0x6f, 0x67, 0x65)
+	if err != nil {
+		return err
+	}
+	err = en.WriteString(z.Hoge)
+	if err != nil {
+		return
+	}
+	return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z TestMessage) MarshalMsg(b []byte) (o []byte, err error) {
+	o = msgp.Require(b, z.Msgsize())
+	// map header, size 2
+	// string "foo"
+	o = append(o, 0x82, 0xa3, 0x66, 0x6f, 0x6f)
+	o = msgp.AppendString(o, z.Foo)
+	// string "hoge"
+	o = append(o, 0xa4, 0x68, 0x6f, 0x67, 0x65)
+	o = msgp.AppendString(o, z.Hoge)
+	return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *TestMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
+	var field []byte
+	_ = field
+	var zbzg uint32
+	zbzg, bts, err = msgp.ReadMapHeaderBytes(bts)
+	if err != nil {
+		return
+	}
+	for zbzg > 0 {
+		zbzg--
+		field, bts, err = msgp.ReadMapKeyZC(bts)
+		if err != nil {
+			return
+		}
+		switch msgp.UnsafeString(field) {
+		case "foo":
+			z.Foo, bts, err = msgp.ReadStringBytes(bts)
+			if err != nil {
+				return
+			}
+		case "hoge":
+			z.Hoge, bts, err = msgp.ReadStringBytes(bts)
+			if err != nil {
+				return
+			}
+		default:
+			bts, err = msgp.Skip(bts)
+			if err != nil {
+				return
+			}
+		}
+	}
+	o = bts
+	return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z TestMessage) Msgsize() (s int) {
+	s = 1 + 4 + msgp.StringPrefixSize + len(z.Foo) + 5 + msgp.StringPrefixSize + len(z.Hoge)
+	return
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
index 8904726..c6ec7e4 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
@@ -1,3 +1,3 @@
 package fluent
 
-const Version = "1.2.1"
+const Version = "1.3.0"
diff --git a/vendor/github.com/stevvooe/ttrpc/LICENSE b/vendor/github.com/stevvooe/ttrpc/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/stevvooe/ttrpc/README.md b/vendor/github.com/stevvooe/ttrpc/README.md
new file mode 100644
index 0000000..b246e57
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/README.md
@@ -0,0 +1,52 @@
+# ttrpc
+
+[![Build Status](https://travis-ci.org/stevvooe/ttrpc.svg?branch=master)](https://travis-ci.org/stevvooe/ttrpc)
+
+GRPC for low-memory environments.
+
+The existing grpc-go project requires a lot of memory overhead for importing
+packages and at runtime. While this is great for many services with low density
+requirements, this can be a problem when running a large number of services on
+a single machine or on a machine with a small amount of memory.
+
+Using the same GRPC definitions, this project reduces the binary size and
+protocol overhead required. We do this by eliding the `net/http`, `net/http2`
+and `grpc` package used by grpc replacing it with a lightweight framing
+protocol. The result are smaller binaries that use less resident memory with
+the same ease of use as GRPC.
+
+Please note that while this project supports generating either end of the
+protocol, the generated service definitions will be incompatible with regular
+GRPC services, as they do not speak the same protocol.
+
+# Usage
+
+Create a gogo vanity binary (see
+[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an
+example with the ttrpc plugin enabled.
+
+It's recommended to use [`protobuild`](https://github.com/stevvooe/protobuild)
+to build the protobufs for this project, but this will work with protoc
+directly, if required.
+
+# Differences from GRPC
+
+- The protocol stack has been replaced with a lighter protocol that doesn't
+  require http, http2 and tls.
+- The client and server interface are identical whereas in GRPC there is a
+  client and server interface that are different.
+- The Go stdlib context package is used instead.
+- No support for streams yet.
+
+# Status
+
+Very new. YMMV.
+
+TODO:
+
+- [X] Plumb error codes and GRPC status
+- [X] Remove use of any type and dependency on typeurl package
+- [X] Ensure that protocol can support streaming in the future
+- [ ] Document protocol layout
+- [ ] Add testing under concurrent load to ensure
+- [ ] Verify connection error handling
diff --git a/vendor/github.com/stevvooe/ttrpc/channel.go b/vendor/github.com/stevvooe/ttrpc/channel.go
new file mode 100644
index 0000000..4a33827
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/channel.go
@@ -0,0 +1,135 @@
+package ttrpc
+
+import (
+	"bufio"
+	"context"
+	"encoding/binary"
+	"io"
+	"sync"
+
+	"github.com/pkg/errors"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	messageHeaderLength = 10
+	messageLengthMax    = 4 << 20
+)
+
+type messageType uint8
+
+const (
+	messageTypeRequest  messageType = 0x1
+	messageTypeResponse messageType = 0x2
+)
+
+// messageHeader represents the fixed-length message header of 10 bytes sent
+// with every request.
+type messageHeader struct {
+	Length   uint32      // length excluding this header. b[:4]
+	StreamID uint32      // identifies which request stream message is a part of. b[4:8]
+	Type     messageType // message type b[8]
+	Flags    uint8       // reserved          b[9]
+}
+
+func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) {
+	_, err := io.ReadFull(r, p[:messageHeaderLength])
+	if err != nil {
+		return messageHeader{}, err
+	}
+
+	return messageHeader{
+		Length:   binary.BigEndian.Uint32(p[:4]),
+		StreamID: binary.BigEndian.Uint32(p[4:8]),
+		Type:     messageType(p[8]),
+		Flags:    p[9],
+	}, nil
+}
+
+func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error {
+	binary.BigEndian.PutUint32(p[:4], mh.Length)
+	binary.BigEndian.PutUint32(p[4:8], mh.StreamID)
+	p[8] = byte(mh.Type)
+	p[9] = mh.Flags
+
+	_, err := w.Write(p[:])
+	return err
+}
+
+var buffers sync.Pool
+
+type channel struct {
+	bw    *bufio.Writer
+	br    *bufio.Reader
+	hrbuf [messageHeaderLength]byte // avoid alloc when reading header
+	hwbuf [messageHeaderLength]byte
+}
+
+func newChannel(w io.Writer, r io.Reader) *channel {
+	return &channel{
+		bw: bufio.NewWriter(w),
+		br: bufio.NewReader(r),
+	}
+}
+
+// recv a message from the channel. The returned buffer contains the message.
+//
+// If a valid grpc status is returned, the message header
+// returned will be valid and caller should send that along to
+// the correct consumer. The bytes on the underlying channel
+// will be discarded.
+func (ch *channel) recv(ctx context.Context) (messageHeader, []byte, error) {
+	mh, err := readMessageHeader(ch.hrbuf[:], ch.br)
+	if err != nil {
+		return messageHeader{}, nil, err
+	}
+
+	if mh.Length > uint32(messageLengthMax) {
+		if _, err := ch.br.Discard(int(mh.Length)); err != nil {
+			return mh, nil, errors.Wrapf(err, "failed to discard after receiving oversized message")
+		}
+
+		return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax)
+	}
+
+	p := ch.getmbuf(int(mh.Length))
+	if _, err := io.ReadFull(ch.br, p); err != nil {
+		return messageHeader{}, nil, errors.Wrapf(err, "failed reading message")
+	}
+
+	return mh, p, nil
+}
+
+func (ch *channel) send(ctx context.Context, streamID uint32, t messageType, p []byte) error {
+	if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil {
+		return err
+	}
+
+	_, err := ch.bw.Write(p)
+	if err != nil {
+		return err
+	}
+
+	return ch.bw.Flush()
+}
+
+func (ch *channel) getmbuf(size int) []byte {
+	// we can't use the standard New method on pool because we want to allocate
+	// based on size.
+	b, ok := buffers.Get().(*[]byte)
+	if !ok || cap(*b) < size {
+		// TODO(stevvooe): It may be better to allocate these in fixed length
+		// buckets to reduce fragmentation but its not clear that would help
+		// with performance. An ilogb approach or similar would work well.
+		bb := make([]byte, size)
+		b = &bb
+	} else {
+		*b = (*b)[:size]
+	}
+	return *b
+}
+
+func (ch *channel) putmbuf(p []byte) {
+	buffers.Put(&p)
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/client.go b/vendor/github.com/stevvooe/ttrpc/client.go
new file mode 100644
index 0000000..ca76afe
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/client.go
@@ -0,0 +1,211 @@
+package ttrpc
+
+import (
+	"context"
+	"net"
+	"sync"
+
+	"github.com/containerd/containerd/log"
+	"github.com/gogo/protobuf/proto"
+	"github.com/pkg/errors"
+	"google.golang.org/grpc/status"
+)
+
+type Client struct {
+	codec   codec
+	conn    net.Conn
+	channel *channel
+	calls   chan *callRequest
+
+	closed    chan struct{}
+	closeOnce sync.Once
+	done      chan struct{}
+	err       error
+}
+
+func NewClient(conn net.Conn) *Client {
+	c := &Client{
+		codec:   codec{},
+		conn:    conn,
+		channel: newChannel(conn, conn),
+		calls:   make(chan *callRequest),
+		closed:  make(chan struct{}),
+		done:    make(chan struct{}),
+	}
+
+	go c.run()
+	return c
+}
+
+type callRequest struct {
+	ctx  context.Context
+	req  *Request
+	resp *Response  // response will be written back here
+	errs chan error // error written here on completion
+}
+
+func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error {
+	payload, err := c.codec.Marshal(req)
+	if err != nil {
+		return err
+	}
+
+	var (
+		creq = &Request{
+			Service: service,
+			Method:  method,
+			Payload: payload,
+		}
+
+		cresp = &Response{}
+	)
+
+	if err := c.dispatch(ctx, creq, cresp); err != nil {
+		return err
+	}
+
+	if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil {
+		return err
+	}
+
+	if cresp.Status == nil {
+		return errors.New("no status provided on response")
+	}
+
+	return status.ErrorProto(cresp.Status)
+}
+
+func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error {
+	errs := make(chan error, 1)
+	call := &callRequest{
+		req:  req,
+		resp: resp,
+		errs: errs,
+	}
+
+	select {
+	case c.calls <- call:
+	case <-c.done:
+		return c.err
+	}
+
+	select {
+	case err := <-errs:
+		return err
+	case <-c.done:
+		return c.err
+	}
+}
+
+func (c *Client) Close() error {
+	c.closeOnce.Do(func() {
+		close(c.closed)
+	})
+
+	return nil
+}
+
+type message struct {
+	messageHeader
+	p   []byte
+	err error
+}
+
+func (c *Client) run() {
+	var (
+		streamID    uint32 = 1
+		waiters            = make(map[uint32]*callRequest)
+		calls              = c.calls
+		incoming           = make(chan *message)
+		shutdown           = make(chan struct{})
+		shutdownErr error
+	)
+
+	go func() {
+		defer close(shutdown)
+
+		// start one more goroutine to recv messages without blocking.
+		for {
+			mh, p, err := c.channel.recv(context.TODO())
+			if err != nil {
+				_, ok := status.FromError(err)
+				if !ok {
+					// treat all errors that are not an rpc status as terminal.
+					// all others poison the connection.
+					shutdownErr = err
+					return
+				}
+			}
+			select {
+			case incoming <- &message{
+				messageHeader: mh,
+				p:             p[:mh.Length],
+				err:           err,
+			}:
+			case <-c.done:
+				return
+			}
+		}
+	}()
+
+	defer c.conn.Close()
+	defer close(c.done)
+
+	for {
+		select {
+		case call := <-calls:
+			if err := c.send(call.ctx, streamID, messageTypeRequest, call.req); err != nil {
+				call.errs <- err
+				continue
+			}
+
+			waiters[streamID] = call
+			streamID += 2 // enforce odd client initiated request ids
+		case msg := <-incoming:
+			call, ok := waiters[msg.StreamID]
+			if !ok {
+				log.L.Errorf("ttrpc: received message for unknown channel %v", msg.StreamID)
+				continue
+			}
+
+			call.errs <- c.recv(call.resp, msg)
+			delete(waiters, msg.StreamID)
+		case <-shutdown:
+			shutdownErr = errors.Wrapf(shutdownErr, "ttrpc: client shutting down")
+			c.err = shutdownErr
+			for _, waiter := range waiters {
+				waiter.errs <- shutdownErr
+			}
+			c.Close()
+			return
+		case <-c.closed:
+			// broadcast the shutdown error to the remaining waiters.
+			for _, waiter := range waiters {
+				waiter.errs <- shutdownErr
+			}
+			return
+		}
+	}
+}
+
+func (c *Client) send(ctx context.Context, streamID uint32, mtype messageType, msg interface{}) error {
+	p, err := c.codec.Marshal(msg)
+	if err != nil {
+		return err
+	}
+
+	return c.channel.send(ctx, streamID, mtype, p)
+}
+
+func (c *Client) recv(resp *Response, msg *message) error {
+	if msg.err != nil {
+		return msg.err
+	}
+
+	if msg.Type != messageTypeResponse {
+		return errors.New("unkown message type received")
+	}
+
+	defer c.channel.putmbuf(msg.p)
+	return proto.Unmarshal(msg.p, resp)
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/codec.go b/vendor/github.com/stevvooe/ttrpc/codec.go
new file mode 100644
index 0000000..7956a72
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/codec.go
@@ -0,0 +1,26 @@
+package ttrpc
+
+import (
+	"github.com/gogo/protobuf/proto"
+	"github.com/pkg/errors"
+)
+
+type codec struct{}
+
+func (c codec) Marshal(msg interface{}) ([]byte, error) {
+	switch v := msg.(type) {
+	case proto.Message:
+		return proto.Marshal(v)
+	default:
+		return nil, errors.Errorf("ttrpc: cannot marshal unknown type: %T", msg)
+	}
+}
+
+func (c codec) Unmarshal(p []byte, msg interface{}) error {
+	switch v := msg.(type) {
+	case proto.Message:
+		return proto.Unmarshal(p, v)
+	default:
+		return errors.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg)
+	}
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/config.go b/vendor/github.com/stevvooe/ttrpc/config.go
new file mode 100644
index 0000000..23bc603
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/config.go
@@ -0,0 +1,23 @@
+package ttrpc
+
+import "github.com/pkg/errors"
+
+type serverConfig struct {
+	handshaker Handshaker
+}
+
+type ServerOpt func(*serverConfig) error
+
+// WithServerHandshaker can be passed to NewServer to ensure that the
+// handshaker is called before every connection attempt.
+//
+// Only one handshaker is allowed per server.
+func WithServerHandshaker(handshaker Handshaker) ServerOpt {
+	return func(c *serverConfig) error {
+		if c.handshaker != nil {
+			return errors.New("only one handshaker allowed per server")
+		}
+		c.handshaker = handshaker
+		return nil
+	}
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/handshake.go b/vendor/github.com/stevvooe/ttrpc/handshake.go
new file mode 100644
index 0000000..a08ae8e
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/handshake.go
@@ -0,0 +1,34 @@
+package ttrpc
+
+import (
+	"context"
+	"net"
+)
+
+// Handshaker defines the interface for connection handshakes performed on the
+// server or client when first connecting.
+type Handshaker interface {
+	// Handshake should confirm or decorate a connection that may be incoming
+	// to a server or outgoing from a client.
+	//
+	// If this returns without an error, the caller should use the connection
+	// in place of the original connection.
+	//
+	// The second return value can contain credential specific data, such as
+	// unix socket credentials or TLS information.
+	//
+	// While we currently only have implementations on the server-side, this
+	// interface should be sufficient to implement similar handshakes on the
+	// client-side.
+	Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
+}
+
+type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
+
+func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+	return fn(ctx, conn)
+}
+
+func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+	return conn, nil, nil
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/server.go b/vendor/github.com/stevvooe/ttrpc/server.go
new file mode 100644
index 0000000..edfca0c
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/server.go
@@ -0,0 +1,441 @@
+package ttrpc
+
+import (
+	"context"
+	"io"
+	"math/rand"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/containerd/containerd/log"
+	"github.com/pkg/errors"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+var (
+	ErrServerClosed = errors.New("ttrpc: server close")
+)
+
+type Server struct {
+	config   *serverConfig
+	services *serviceSet
+	codec    codec
+
+	mu          sync.Mutex
+	listeners   map[net.Listener]struct{}
+	connections map[*serverConn]struct{} // all connections to current state
+	done        chan struct{}            // marks point at which we stop serving requests
+}
+
+func NewServer(opts ...ServerOpt) (*Server, error) {
+	config := &serverConfig{}
+	for _, opt := range opts {
+		if err := opt(config); err != nil {
+			return nil, err
+		}
+	}
+
+	return &Server{
+		config:      config,
+		services:    newServiceSet(),
+		done:        make(chan struct{}),
+		listeners:   make(map[net.Listener]struct{}),
+		connections: make(map[*serverConn]struct{}),
+	}, nil
+}
+
+func (s *Server) Register(name string, methods map[string]Method) {
+	s.services.register(name, methods)
+}
+
+func (s *Server) Serve(l net.Listener) error {
+	s.addListener(l)
+	defer s.closeListener(l)
+
+	var (
+		ctx        = context.Background()
+		backoff    time.Duration
+		handshaker = s.config.handshaker
+	)
+
+	if handshaker == nil {
+		handshaker = handshakerFunc(noopHandshake)
+	}
+
+	for {
+		conn, err := l.Accept()
+		if err != nil {
+			select {
+			case <-s.done:
+				return ErrServerClosed
+			default:
+			}
+
+			if terr, ok := err.(interface {
+				Temporary() bool
+			}); ok && terr.Temporary() {
+				if backoff == 0 {
+					backoff = time.Millisecond
+				} else {
+					backoff *= 2
+				}
+
+				if max := time.Second; backoff > max {
+					backoff = max
+				}
+
+				sleep := time.Duration(rand.Int63n(int64(backoff)))
+				log.L.WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep)
+				time.Sleep(sleep)
+				continue
+			}
+
+			return err
+		}
+
+		backoff = 0
+
+		approved, handshake, err := handshaker.Handshake(ctx, conn)
+		if err != nil {
+			log.L.WithError(err).Errorf("ttrpc: refusing connection after handshake")
+			conn.Close()
+			continue
+		}
+
+		sc := s.newConn(approved, handshake)
+		go sc.run(ctx)
+	}
+}
+
+func (s *Server) Shutdown(ctx context.Context) error {
+	s.mu.Lock()
+	lnerr := s.closeListeners()
+	select {
+	case <-s.done:
+	default:
+		// protected by mutex
+		close(s.done)
+	}
+	s.mu.Unlock()
+
+	ticker := time.NewTicker(200 * time.Millisecond)
+	defer ticker.Stop()
+	for {
+		if s.closeIdleConns() {
+			return lnerr
+		}
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case <-ticker.C:
+		}
+	}
+}
+
+// Close the server without waiting for active connections.
+func (s *Server) Close() error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	select {
+	case <-s.done:
+	default:
+		// protected by mutex
+		close(s.done)
+	}
+
+	err := s.closeListeners()
+	for c := range s.connections {
+		c.close()
+		delete(s.connections, c)
+	}
+
+	return err
+}
+
+func (s *Server) addListener(l net.Listener) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.listeners[l] = struct{}{}
+}
+
+func (s *Server) closeListener(l net.Listener) error {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	return s.closeListenerLocked(l)
+}
+
+func (s *Server) closeListenerLocked(l net.Listener) error {
+	defer delete(s.listeners, l)
+	return l.Close()
+}
+
+func (s *Server) closeListeners() error {
+	var err error
+	for l := range s.listeners {
+		if cerr := s.closeListenerLocked(l); cerr != nil && err == nil {
+			err = cerr
+		}
+	}
+	return err
+}
+
+func (s *Server) addConnection(c *serverConn) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	s.connections[c] = struct{}{}
+}
+
+func (s *Server) closeIdleConns() bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	quiescent := true
+	for c := range s.connections {
+		st, ok := c.getState()
+		if !ok || st != connStateIdle {
+			quiescent = false
+			continue
+		}
+		c.close()
+		delete(s.connections, c)
+	}
+	return quiescent
+}
+
+type connState int
+
+const (
+	connStateActive = iota + 1 // outstanding requests
+	connStateIdle              // no requests
+	connStateClosed            // closed connection
+)
+
+func (cs connState) String() string {
+	switch cs {
+	case connStateActive:
+		return "active"
+	case connStateIdle:
+		return "idle"
+	case connStateClosed:
+		return "closed"
+	default:
+		return "unknown"
+	}
+}
+
+func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
+	c := &serverConn{
+		server:    s,
+		conn:      conn,
+		handshake: handshake,
+		shutdown:  make(chan struct{}),
+	}
+	c.setState(connStateIdle)
+	s.addConnection(c)
+	return c
+}
+
+type serverConn struct {
+	server    *Server
+	conn      net.Conn
+	handshake interface{} // data from handshake, not used for now
+	state     atomic.Value
+
+	shutdownOnce sync.Once
+	shutdown     chan struct{} // forced shutdown, used by close
+}
+
+func (c *serverConn) getState() (connState, bool) {
+	cs, ok := c.state.Load().(connState)
+	return cs, ok
+}
+
+func (c *serverConn) setState(newstate connState) {
+	c.state.Store(newstate)
+}
+
+func (c *serverConn) close() error {
+	c.shutdownOnce.Do(func() {
+		close(c.shutdown)
+	})
+
+	return nil
+}
+
+func (c *serverConn) run(sctx context.Context) {
+	type (
+		request struct {
+			id  uint32
+			req *Request
+		}
+
+		response struct {
+			id   uint32
+			resp *Response
+		}
+	)
+
+	var (
+		ch          = newChannel(c.conn, c.conn)
+		ctx, cancel = context.WithCancel(sctx)
+		active      int
+		state       connState = connStateIdle
+		responses             = make(chan response)
+		requests              = make(chan request)
+		recvErr               = make(chan error, 1)
+		shutdown              = c.shutdown
+		done                  = make(chan struct{})
+	)
+
+	defer c.conn.Close()
+	defer cancel()
+	defer close(done)
+
+	go func(recvErr chan error) {
+		defer close(recvErr)
+		sendImmediate := func(id uint32, st *status.Status) bool {
+			select {
+			case responses <- response{
+				// even though we've had an invalid stream id, we send it
+				// back on the same stream id so the client knows which
+				// stream id was bad.
+				id: id,
+				resp: &Response{
+					Status: st.Proto(),
+				},
+			}:
+				return true
+			case <-c.shutdown:
+				return false
+			case <-done:
+				return false
+			}
+		}
+
+		for {
+			select {
+			case <-c.shutdown:
+				return
+			case <-done:
+				return
+			default: // proceed
+			}
+
+			mh, p, err := ch.recv(ctx)
+			if err != nil {
+				status, ok := status.FromError(err)
+				if !ok {
+					recvErr <- err
+					return
+				}
+
+				// in this case, we send an error for that particular message
+				// when the status is defined.
+				if !sendImmediate(mh.StreamID, status) {
+					return
+				}
+
+				continue
+			}
+
+			if mh.Type != messageTypeRequest {
+				// we must ignore this for future compat.
+				continue
+			}
+
+			var req Request
+			if err := c.server.codec.Unmarshal(p, &req); err != nil {
+				ch.putmbuf(p)
+				if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) {
+					return
+				}
+				continue
+			}
+			ch.putmbuf(p)
+
+			if mh.StreamID%2 != 1 {
+				// enforce odd client initiated identifiers.
+				if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) {
+					return
+				}
+				continue
+			}
+
+			// Forward the request to the main loop. We don't wait on s.done
+			// because we have already accepted the client request.
+			select {
+			case requests <- request{
+				id:  mh.StreamID,
+				req: &req,
+			}:
+			case <-done:
+				return
+			}
+		}
+	}(recvErr)
+
+	for {
+		newstate := state
+		switch {
+		case active > 0:
+			newstate = connStateActive
+			shutdown = nil
+		case active == 0:
+			newstate = connStateIdle
+			shutdown = c.shutdown // only enable this branch in idle mode
+		}
+
+		if newstate != state {
+			c.setState(newstate)
+			state = newstate
+		}
+
+		select {
+		case request := <-requests:
+			active++
+			go func(id uint32) {
+				p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload)
+				resp := &Response{
+					Status:  status.Proto(),
+					Payload: p,
+				}
+
+				select {
+				case responses <- response{
+					id:   id,
+					resp: resp,
+				}:
+				case <-done:
+				}
+			}(request.id)
+		case response := <-responses:
+			p, err := c.server.codec.Marshal(response.resp)
+			if err != nil {
+				log.L.WithError(err).Error("failed marshaling response")
+				return
+			}
+
+			if err := ch.send(ctx, response.id, messageTypeResponse, p); err != nil {
+				log.L.WithError(err).Error("failed sending message on channel")
+				return
+			}
+
+			active--
+		case err := <-recvErr:
+			// TODO(stevvooe): Not wildly clear what we should do in this
+			// branch. Basically, it means that we are no longer receiving
+			// requests due to a terminal error.
+			recvErr = nil // connection is now "closing"
+			if err != nil && err != io.EOF {
+				log.L.WithError(err).Error("error receiving message")
+			}
+		case <-shutdown:
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/services.go b/vendor/github.com/stevvooe/ttrpc/services.go
new file mode 100644
index 0000000..b9a749e
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/services.go
@@ -0,0 +1,134 @@
+package ttrpc
+
+import (
+	"context"
+	"io"
+	"os"
+	"path"
+
+	"github.com/gogo/protobuf/proto"
+	"github.com/pkg/errors"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error)
+
+type ServiceDesc struct {
+	Methods map[string]Method
+
+	// TODO(stevvooe): Add stream support.
+}
+
+type serviceSet struct {
+	services map[string]ServiceDesc
+}
+
+func newServiceSet() *serviceSet {
+	return &serviceSet{
+		services: make(map[string]ServiceDesc),
+	}
+}
+
+func (s *serviceSet) register(name string, methods map[string]Method) {
+	if _, ok := s.services[name]; ok {
+		panic(errors.Errorf("duplicate service %v registered", name))
+	}
+
+	s.services[name] = ServiceDesc{
+		Methods: methods,
+	}
+}
+
+func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) {
+	p, err := s.dispatch(ctx, serviceName, methodName, p)
+	st, ok := status.FromError(err)
+	if !ok {
+		st = status.New(convertCode(err), err.Error())
+	}
+
+	return p, st
+}
+
+func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) {
+	method, err := s.resolve(serviceName, methodName)
+	if err != nil {
+		return nil, err
+	}
+
+	unmarshal := func(obj interface{}) error {
+		switch v := obj.(type) {
+		case proto.Message:
+			if err := proto.Unmarshal(p, v); err != nil {
+				return status.Errorf(codes.Internal, "ttrpc: error unmarshaling payload: %v", err.Error())
+			}
+		default:
+			return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v)
+		}
+		return nil
+	}
+
+	resp, err := method(ctx, unmarshal)
+	if err != nil {
+		return nil, err
+	}
+
+	switch v := resp.(type) {
+	case proto.Message:
+		r, err := proto.Marshal(v)
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error())
+		}
+
+		return r, nil
+	default:
+		return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v)
+	}
+}
+
+func (s *serviceSet) resolve(service, method string) (Method, error) {
+	srv, ok := s.services[service]
+	if !ok {
+		return nil, status.Errorf(codes.NotFound, "service %v", service)
+	}
+
+	mthd, ok := srv.Methods[method]
+	if !ok {
+		return nil, status.Errorf(codes.NotFound, "method %v", method)
+	}
+
+	return mthd, nil
+}
+
+// convertCode maps stdlib go errors into grpc space.
+//
+// This is ripped from the grpc-go code base.
+func convertCode(err error) codes.Code {
+	switch err {
+	case nil:
+		return codes.OK
+	case io.EOF:
+		return codes.OutOfRange
+	case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+		return codes.FailedPrecondition
+	case os.ErrInvalid:
+		return codes.InvalidArgument
+	case context.Canceled:
+		return codes.Canceled
+	case context.DeadlineExceeded:
+		return codes.DeadlineExceeded
+	}
+	switch {
+	case os.IsExist(err):
+		return codes.AlreadyExists
+	case os.IsNotExist(err):
+		return codes.NotFound
+	case os.IsPermission(err):
+		return codes.PermissionDenied
+	}
+	return codes.Unknown
+}
+
+func fullPath(service, method string) string {
+	return "/" + path.Join("/", service, method)
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/types.go b/vendor/github.com/stevvooe/ttrpc/types.go
new file mode 100644
index 0000000..a522b0c
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/types.go
@@ -0,0 +1,26 @@
+package ttrpc
+
+import (
+	"fmt"
+
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+)
+
+type Request struct {
+	Service string `protobuf:"bytes,1,opt,name=service,proto3"`
+	Method  string `protobuf:"bytes,2,opt,name=method,proto3"`
+	Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"`
+}
+
+func (r *Request) Reset()         { *r = Request{} }
+func (r *Request) String() string { return fmt.Sprintf("%+#v", r) }
+func (r *Request) ProtoMessage()  {}
+
+type Response struct {
+	Status  *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"`
+	Payload []byte      `protobuf:"bytes,2,opt,name=payload,proto3"`
+}
+
+func (r *Response) Reset()         { *r = Response{} }
+func (r *Response) String() string { return fmt.Sprintf("%+#v", r) }
+func (r *Response) ProtoMessage()  {}
diff --git a/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go
new file mode 100644
index 0000000..812d927
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go
@@ -0,0 +1,92 @@
+package ttrpc
+
+import (
+	"context"
+	"net"
+	"os"
+	"syscall"
+
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+type UnixCredentialsFunc func(*unix.Ucred) error
+
+func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+	uc, err := requireUnixSocket(conn)
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket")
+	}
+
+	rs, err := uc.SyscallConn()
+	if err != nil {
+		return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed")
+	}
+	var (
+		ucred    *unix.Ucred
+		ucredErr error
+	)
+	if err := rs.Control(func(fd uintptr) {
+		ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED)
+	}); err != nil {
+		return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed")
+	}
+
+	if ucredErr != nil {
+		return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials")
+	}
+
+	if err := fn(ucred); err != nil {
+		return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed")
+	}
+
+	return uc, ucred, nil
+}
+
+// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID.
+//
+// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an
+// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001.
+// So calling this function with uid=0 allows a connection from effective UID 0 but rejects
+// a connection from effective UID 1001.
+//
+// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)."
+func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc {
+	return func(ucred *unix.Ucred) error {
+		return requireUidGid(ucred, uid, gid)
+	}
+}
+
+func UnixSocketRequireRoot() UnixCredentialsFunc {
+	return UnixSocketRequireUidGid(0, 0)
+}
+
+// UnixSocketRequireSameUser resolves the current effective unix user and returns a
+// UnixCredentialsFunc that will validate incoming unix connections against the
+// current credentials.
+//
+// This is useful when using abstract sockets that are accessible by all users.
+func UnixSocketRequireSameUser() UnixCredentialsFunc {
+	euid, egid := os.Geteuid(), os.Getegid()
+	return UnixSocketRequireUidGid(euid, egid)
+}
+
+func requireRoot(ucred *unix.Ucred) error {
+	return requireUidGid(ucred, 0, 0)
+}
+
+func requireUidGid(ucred *unix.Ucred, uid, gid int) error {
+	if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) {
+		return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials")
+	}
+	return nil
+}
+
+func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) {
+	uc, ok := conn.(*net.UnixConn)
+	if !ok {
+		return nil, errors.New("a unix socket connection is required")
+	}
+
+	return uc, nil
+}
diff --git a/vendor/github.com/tinylib/msgp/README.md b/vendor/github.com/tinylib/msgp/README.md
index a7cc849..1328cca 100644
--- a/vendor/github.com/tinylib/msgp/README.md
+++ b/vendor/github.com/tinylib/msgp/README.md
@@ -1,15 +1,12 @@
 MessagePack Code Generator [![Build Status](https://travis-ci.org/tinylib/msgp.svg?branch=master)](https://travis-ci.org/tinylib/msgp)
 =======
 
-[![forthebadge](http://forthebadge.com/badges/uses-badges.svg)](http://forthebadge.com)
-[![forthebadge](http://forthebadge.com/badges/ages-12.svg)](http://forthebadge.com)
-
-This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). It is targeted at the `go generate` [tool](http://tip.golang.org/cmd/go/#hdr-Generate_Go_files_by_processing_source). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
+This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
 
 ### Why?
 
 - Use Go as your schema language
-- Speeeeeed (400MB/s on modern hardware)
+- Performance
 - [JSON interop](http://godoc.org/github.com/tinylib/msgp/msgp#CopyToJSON)
 - [User-defined extensions](http://github.com/tinylib/msgp/wiki/Using-Extensions)
 - Type safety
@@ -17,8 +14,6 @@
 
 ### Quickstart
 
-Note: you need at least go 1.3 to compile this package, and at least go 1.4 to use `go generate`.
-
 In a source file, include the following directive:
 
 ```go
@@ -45,7 +40,7 @@
 
 By default, the code generator will satisfy `msgp.Sizer`, `msgp.Encodable`, `msgp.Decodable`, 
 `msgp.Marshaler`, and `msgp.Unmarshaler`. Carefully-designed applications can use these methods to do
-marshalling/unmarshalling with zero allocations.
+marshalling/unmarshalling with zero heap allocations.
 
 While `msgp.Marshaler` and `msgp.Unmarshaler` are quite similar to the standard library's
 `json.Marshaler` and `json.Unmarshaler`, `msgp.Encodable` and `msgp.Decodable` are useful for 
@@ -62,6 +57,7 @@
  - Generation of both `[]byte`-oriented and `io.Reader/io.Writer`-oriented methods
  - Support for arbitrary type system extensions
  - [Preprocessor directives](http://github.com/tinylib/msgp/wiki/Preprocessor-Directives)
+ - File-based dependency model means fast codegen regardless of source tree size.
 
 Consider the following:
 ```go
@@ -84,21 +80,23 @@
 
 ### Status
 
-Alpha. I _will_ break stuff. There is an open milestone for Beta stability (targeted for January.) Only the `/msgp` sub-directory will have a stability guarantee.
+Mostly stable, in that no breaking changes have been made to the `/msgp` library in more than a year. Newer versions
+of the code may generate different code than older versions for performance reasons. I (@philhofer) am aware of a
+number of stability-critical commercial applications that use this code with good results. But, caveat emptor.
 
 You can read more about how `msgp` maps MessagePack types onto Go types [in the wiki](http://github.com/tinylib/msgp/wiki).
 
 Here some of the known limitations/restrictions:
 
- - Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
- - Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
- - Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
- - _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
+- Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
+- Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
+- Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
+- _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
 
 If the output compiles, then there's a pretty good chance things are fine. (Plus, we generate tests for you.) *Please, please, please* file an issue if you think the generator is writing broken code.
 
 ### Performance
 
-If you like benchmarks, see [here.](https://github.com/alecthomas/go_serialization_benchmarks)
+If you like benchmarks, see [here](http://bravenewgeek.com/so-you-wanna-go-fast/) and [here](https://github.com/alecthomas/go_serialization_benchmarks).
 
-As one might expect, the generated methods that deal with `[]byte` are faster, but the `io.Reader/Writer` methods are generally more memory-efficient for large (> 2KB) objects.
+As one might expect, the generated methods that deal with `[]byte` are faster for small objects, but the `io.Reader/Writer` methods are generally more memory-efficient (and, at some point, faster) for large (> 2KB) objects.
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
new file mode 100644
index 0000000..6c6bb37
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
@@ -0,0 +1,24 @@
+// +build linux,!appengine
+
+package msgp
+
+import (
+	"os"
+	"syscall"
+)
+
+func adviseRead(mem []byte) {
+	syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
+}
+
+func adviseWrite(mem []byte) {
+	syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
+}
+
+func fallocate(f *os.File, sz int64) error {
+	err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
+	if err == syscall.ENOTSUP {
+		return f.Truncate(sz)
+	}
+	return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
new file mode 100644
index 0000000..da65ea5
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
@@ -0,0 +1,17 @@
+// +build !linux appengine
+
+package msgp
+
+import (
+	"os"
+)
+
+// TODO: darwin, BSD support
+
+func adviseRead(mem []byte) {}
+
+func adviseWrite(mem []byte) {}
+
+func fallocate(f *os.File, sz int64) error {
+	return f.Truncate(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/appengine.go b/vendor/github.com/tinylib/msgp/msgp/appengine.go
new file mode 100644
index 0000000..bff9e76
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package msgp
+
+// let's just assume appengine
+// uses 64-bit hardware...
+const smallint = false
+
+func UnsafeString(b []byte) string {
+	return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+	return []byte(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go
index 35583ba..a0434c7 100644
--- a/vendor/github.com/tinylib/msgp/msgp/circular.go
+++ b/vendor/github.com/tinylib/msgp/msgp/circular.go
@@ -1,20 +1,21 @@
 package msgp
 
-import (
-	"testing"
-)
+type timer interface {
+	StartTimer()
+	StopTimer()
+}
 
 // EndlessReader is an io.Reader
 // that loops over the same data
 // endlessly. It is used for benchmarking.
 type EndlessReader struct {
-	tb     *testing.B
+	tb     timer
 	data   []byte
 	offset int
 }
 
 // NewEndlessReader returns a new endless reader
-func NewEndlessReader(b []byte, tb *testing.B) *EndlessReader {
+func NewEndlessReader(b []byte, tb timer) *EndlessReader {
 	return &EndlessReader{tb: tb, data: b, offset: 0}
 }
 
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go
index 32a0ada..588b18f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/extension.go
+++ b/vendor/github.com/tinylib/msgp/msgp/extension.go
@@ -226,7 +226,7 @@
 // peek at the extension type, assuming the next
 // kind to be read is Extension
 func (m *Reader) peekExtensionType() (int8, error) {
-	p, err := m.r.Peek(2)
+	p, err := m.R.Peek(2)
 	if err != nil {
 		return 0, err
 	}
@@ -238,7 +238,7 @@
 		return int8(p[1]), nil
 	}
 	size := spec.size
-	p, err = m.r.Peek(int(size))
+	p, err = m.R.Peek(int(size))
 	if err != nil {
 		return 0, err
 	}
@@ -273,7 +273,7 @@
 // e.Type() is not the same as the wire type.
 func (m *Reader) ReadExtension(e Extension) (err error) {
 	var p []byte
-	p, err = m.r.Peek(2)
+	p, err = m.R.Peek(2)
 	if err != nil {
 		return
 	}
@@ -286,13 +286,13 @@
 			err = errExt(int8(p[1]), e.ExtensionType())
 			return
 		}
-		p, err = m.r.Peek(3)
+		p, err = m.R.Peek(3)
 		if err != nil {
 			return
 		}
 		err = e.UnmarshalBinary(p[2:])
 		if err == nil {
-			_, err = m.r.Skip(3)
+			_, err = m.R.Skip(3)
 		}
 		return
 
@@ -301,13 +301,13 @@
 			err = errExt(int8(p[1]), e.ExtensionType())
 			return
 		}
-		p, err = m.r.Peek(4)
+		p, err = m.R.Peek(4)
 		if err != nil {
 			return
 		}
 		err = e.UnmarshalBinary(p[2:])
 		if err == nil {
-			_, err = m.r.Skip(4)
+			_, err = m.R.Skip(4)
 		}
 		return
 
@@ -316,13 +316,13 @@
 			err = errExt(int8(p[1]), e.ExtensionType())
 			return
 		}
-		p, err = m.r.Peek(6)
+		p, err = m.R.Peek(6)
 		if err != nil {
 			return
 		}
 		err = e.UnmarshalBinary(p[2:])
 		if err == nil {
-			_, err = m.r.Skip(6)
+			_, err = m.R.Skip(6)
 		}
 		return
 
@@ -331,13 +331,13 @@
 			err = errExt(int8(p[1]), e.ExtensionType())
 			return
 		}
-		p, err = m.r.Peek(10)
+		p, err = m.R.Peek(10)
 		if err != nil {
 			return
 		}
 		err = e.UnmarshalBinary(p[2:])
 		if err == nil {
-			_, err = m.r.Skip(10)
+			_, err = m.R.Skip(10)
 		}
 		return
 
@@ -346,18 +346,18 @@
 			err = errExt(int8(p[1]), e.ExtensionType())
 			return
 		}
-		p, err = m.r.Peek(18)
+		p, err = m.R.Peek(18)
 		if err != nil {
 			return
 		}
 		err = e.UnmarshalBinary(p[2:])
 		if err == nil {
-			_, err = m.r.Skip(18)
+			_, err = m.R.Skip(18)
 		}
 		return
 
 	case mext8:
-		p, err = m.r.Peek(3)
+		p, err = m.R.Peek(3)
 		if err != nil {
 			return
 		}
@@ -369,7 +369,7 @@
 		off = 3
 
 	case mext16:
-		p, err = m.r.Peek(4)
+		p, err = m.R.Peek(4)
 		if err != nil {
 			return
 		}
@@ -381,7 +381,7 @@
 		off = 4
 
 	case mext32:
-		p, err = m.r.Peek(6)
+		p, err = m.R.Peek(6)
 		if err != nil {
 			return
 		}
@@ -397,13 +397,13 @@
 		return
 	}
 
-	p, err = m.r.Peek(read + off)
+	p, err = m.R.Peek(read + off)
 	if err != nil {
 		return
 	}
 	err = e.UnmarshalBinary(p[off:])
 	if err == nil {
-		_, err = m.r.Skip(read + off)
+		_, err = m.R.Skip(read + off)
 	}
 	return
 }
diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go
new file mode 100644
index 0000000..8e7370e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file.go
@@ -0,0 +1,92 @@
+// +build linux darwin dragonfly freebsd netbsd openbsd
+// +build !appengine
+
+package msgp
+
+import (
+	"os"
+	"syscall"
+)
+
+// ReadFile reads a file into 'dst' using
+// a read-only memory mapping. Consequently,
+// the file must be mmap-able, and the
+// Unmarshaler should never write to
+// the source memory. (Methods generated
+// by the msgp tool obey that constraint, but
+// user-defined implementations may not.)
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+func ReadFile(dst Unmarshaler, file *os.File) error {
+	stat, err := file.Stat()
+	if err != nil {
+		return err
+	}
+	data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+	if err != nil {
+		return err
+	}
+	adviseRead(data)
+	_, err = dst.UnmarshalMsg(data)
+	uerr := syscall.Munmap(data)
+	if err == nil {
+		err = uerr
+	}
+	return err
+}
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+	Marshaler
+	Sizer
+}
+
+// WriteFile writes a file from 'src' using
+// memory mapping. It overwrites the entire
+// contents of the previous file.
+// The mapping size is calculated
+// using the `Msgsize()` method
+// of 'src', so it must produce a result
+// equal to or greater than the actual encoded
+// size of the object. Otherwise,
+// a fault (SIGBUS) will occur.
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+// NOTE: The performance of this call
+// is highly OS- and filesystem-dependent.
+// Users should take care to test that this
+// performs as expected in a production environment.
+// (Linux users should run a kernel and filesystem
+// that support fallocate(2) for the best results.)
+func WriteFile(src MarshalSizer, file *os.File) error {
+	sz := src.Msgsize()
+	err := fallocate(file, int64(sz))
+	if err != nil {
+		return err
+	}
+	data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+	if err != nil {
+		return err
+	}
+	adviseWrite(data)
+	chunk := data[:0]
+	chunk, err = src.MarshalMsg(chunk)
+	if err != nil {
+		return err
+	}
+	uerr := syscall.Munmap(data)
+	if uerr != nil {
+		return uerr
+	}
+	return file.Truncate(int64(len(chunk)))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go
new file mode 100644
index 0000000..6e654db
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go
@@ -0,0 +1,47 @@
+// +build windows appengine
+
+package msgp
+
+import (
+	"io/ioutil"
+	"os"
+)
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+	Marshaler
+	Sizer
+}
+
+func ReadFile(dst Unmarshaler, file *os.File) error {
+	if u, ok := dst.(Decodable); ok {
+		return u.DecodeMsg(NewReader(file))
+	}
+
+	data, err := ioutil.ReadAll(file)
+	if err != nil {
+		return err
+	}
+	_, err = dst.UnmarshalMsg(data)
+	return err
+}
+
+func WriteFile(src MarshalSizer, file *os.File) error {
+	if e, ok := src.(Encodable); ok {
+		w := NewWriter(file)
+		err := e.EncodeMsg(w)
+		if err == nil {
+			err = w.Flush()
+		}
+		return err
+	}
+
+	raw, err := src.MarshalMsg(nil)
+	if err != nil {
+		return err
+	}
+	_, err = file.Write(raw)
+	return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go
index 5c799ff..4325860 100644
--- a/vendor/github.com/tinylib/msgp/msgp/json.go
+++ b/vendor/github.com/tinylib/msgp/msgp/json.go
@@ -66,7 +66,7 @@
 	if jsw, ok := w.(jsWriter); ok {
 		j = jsw
 	} else {
-		bf = bufio.NewWriterSize(w, 512)
+		bf = bufio.NewWriter(w)
 		j = bf
 	}
 	var nn int
@@ -333,7 +333,7 @@
 
 func rwString(dst jsWriter, src *Reader) (n int, err error) {
 	var p []byte
-	p, err = src.r.Peek(1)
+	p, err = src.R.Peek(1)
 	if err != nil {
 		return
 	}
@@ -342,25 +342,25 @@
 
 	if isfixstr(lead) {
 		read = int(rfixstr(lead))
-		src.r.Skip(1)
+		src.R.Skip(1)
 		goto write
 	}
 
 	switch lead {
 	case mstr8:
-		p, err = src.r.Next(2)
+		p, err = src.R.Next(2)
 		if err != nil {
 			return
 		}
 		read = int(uint8(p[1]))
 	case mstr16:
-		p, err = src.r.Next(3)
+		p, err = src.R.Next(3)
 		if err != nil {
 			return
 		}
 		read = int(big.Uint16(p[1:]))
 	case mstr32:
-		p, err = src.r.Next(5)
+		p, err = src.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -370,7 +370,7 @@
 		return
 	}
 write:
-	p, err = src.r.Next(read)
+	p, err = src.R.Next(read)
 	if err != nil {
 		return
 	}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go
index 69afc8a..ad07ef9 100644
--- a/vendor/github.com/tinylib/msgp/msgp/number.go
+++ b/vendor/github.com/tinylib/msgp/msgp/number.go
@@ -1,11 +1,105 @@
 package msgp
 
 import (
+	"math"
 	"strconv"
 )
 
 // The portable parts of the Number implementation
 
+// Number can be
+// an int64, uint64, float32,
+// or float64 internally.
+// It can decode itself
+// from any of the native
+// messagepack number types.
+// The zero-value of Number
+// is Int(0). Using the equality
+// operator with Number compares
+// both the type and the value
+// of the number.
+type Number struct {
+	// internally, this
+	// is just a tagged union.
+	// the raw bits of the number
+	// are stored the same way regardless.
+	bits uint64
+	typ  Type
+}
+
+// AsInt sets the number to an int64.
+func (n *Number) AsInt(i int64) {
+
+	// we always store int(0)
+	// as {0, InvalidType} in
+	// order to preserve
+	// the behavior of the == operator
+	if i == 0 {
+		n.typ = InvalidType
+		n.bits = 0
+		return
+	}
+
+	n.typ = IntType
+	n.bits = uint64(i)
+}
+
+// AsUint sets the number to a uint64.
+func (n *Number) AsUint(u uint64) {
+	n.typ = UintType
+	n.bits = u
+}
+
+// AsFloat32 sets the value of the number
+// to a float32.
+func (n *Number) AsFloat32(f float32) {
+	n.typ = Float32Type
+	n.bits = uint64(math.Float32bits(f))
+}
+
+// AsFloat64 sets the value of the
+// number to a float64.
+func (n *Number) AsFloat64(f float64) {
+	n.typ = Float64Type
+	n.bits = math.Float64bits(f)
+}
+
+// Int casts the number as an int64, and
+// returns whether or not that was the
+// underlying type.
+func (n *Number) Int() (int64, bool) {
+	return int64(n.bits), n.typ == IntType || n.typ == InvalidType
+}
+
+// Uint casts the number as a uint64, and returns
+// whether or not that was the underlying type.
+func (n *Number) Uint() (uint64, bool) {
+	return n.bits, n.typ == UintType
+}
+
+// Float casts the number to a float64, and
+// returns whether or not that was the underlying
+// type (either a float64 or a float32).
+func (n *Number) Float() (float64, bool) {
+	switch n.typ {
+	case Float32Type:
+		return float64(math.Float32frombits(uint32(n.bits))), true
+	case Float64Type:
+		return math.Float64frombits(n.bits), true
+	default:
+		return 0.0, false
+	}
+}
+
+// Type will return one of:
+// Float64Type, Float32Type, UintType, or IntType.
+func (n *Number) Type() Type {
+	if n.typ == InvalidType {
+		return IntType
+	}
+	return n.typ
+}
+
 // DecodeMsg implements msgp.Decodable
 func (n *Number) DecodeMsg(r *Reader) error {
 	typ, err := r.NextType()
@@ -83,6 +177,38 @@
 	}
 }
 
+// MarshalMsg implements msgp.Marshaler
+func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
+	switch n.typ {
+	case IntType:
+		return AppendInt64(b, int64(n.bits)), nil
+	case UintType:
+		return AppendUint64(b, uint64(n.bits)), nil
+	case Float64Type:
+		return AppendFloat64(b, math.Float64frombits(n.bits)), nil
+	case Float32Type:
+		return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
+	default:
+		return AppendInt64(b, 0), nil
+	}
+}
+
+// EncodeMsg implements msgp.Encodable
+func (n *Number) EncodeMsg(w *Writer) error {
+	switch n.typ {
+	case IntType:
+		return w.WriteInt64(int64(n.bits))
+	case UintType:
+		return w.WriteUint64(n.bits)
+	case Float64Type:
+		return w.WriteFloat64(math.Float64frombits(n.bits))
+	case Float32Type:
+		return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
+	default:
+		return w.WriteInt64(0)
+	}
+}
+
 // Msgsize implements msgp.Sizer
 func (n *Number) Msgsize() int {
 	switch n.typ {
@@ -121,6 +247,7 @@
 	}
 }
 
+// String implements fmt.Stringer
 func (n *Number) String() string {
 	switch n.typ {
 	case InvalidType:
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_appengine.go b/vendor/github.com/tinylib/msgp/msgp/number_appengine.go
deleted file mode 100644
index c94140d..0000000
--- a/vendor/github.com/tinylib/msgp/msgp/number_appengine.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// +build appengine
-
-package msgp
-
-// let's just assume appengine
-// uses 64-bit hardware...
-const smallint = false
-
-func UnsafeString(b []byte) string {
-	return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
-	return []byte(s)
-}
-
-type Number struct {
-	ibits uint64  // zero or bits
-	fbits float64 // zero or bits
-	typ   Type    // zero or type
-}
-
-func (n *Number) AsFloat64(f float64) {
-	n.typ = Float64Type
-	n.fbits = f
-	n.ibits = 0
-}
-
-func (n *Number) AsFloat32(f float32) {
-	n.typ = Float32Type
-	n.fbits = float64(f)
-	n.ibits = 0
-}
-
-func (n *Number) AsInt(i int64) {
-	n.fbits = 0
-	if i == 0 {
-		n.typ = InvalidType
-		n.ibits = 0
-		return
-	}
-	n.ibits = uint64(i)
-	n.typ = IntType
-}
-
-func (n *Number) AsUint(u uint64) {
-	n.ibits = u
-	n.fbits = 0
-	n.typ = UintType
-}
-
-func (n *Number) Float() (float64, bool) {
-	return n.fbits, n.typ == Float64Type || n.typ == Float32Type
-}
-
-func (n *Number) Int() (int64, bool) {
-	return int64(n.ibits), n.typ == IntType
-}
-
-func (n *Number) Uint() (uint64, bool) {
-	return n.ibits, n.typ == UintType
-}
-
-func (n *Number) Type() Type {
-	if n.typ == InvalidType {
-		return IntType
-	}
-	return n.typ
-}
-
-func (n *Number) MarshalMsg(o []byte) ([]byte, error) {
-	switch n.typ {
-	case InvalidType:
-		return AppendInt64(o, 0), nil
-	case IntType:
-		return AppendInt64(o, int64(n.ibits)), nil
-	case UintType:
-		return AppendUint64(o, n.ibits), nil
-	case Float32Type:
-		return AppendFloat32(o, float32(n.fbits)), nil
-	case Float64Type:
-		return AppendFloat64(o, n.fbits), nil
-	}
-	panic("unreachable code!")
-}
-
-func (n *Number) EncodeMsg(w *Writer) error {
-	switch n.typ {
-	case InvalidType:
-		return w.WriteInt64(0)
-	case IntType:
-		return w.WriteInt64(int64(n.ibits))
-	case UintType:
-		return w.WriteUint64(n.ibits)
-	case Float32Type:
-		return w.WriteFloat32(float32(n.fbits))
-	case Float64Type:
-		return w.WriteFloat64(n.fbits)
-	}
-	panic("unreachable code!")
-}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go b/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go
deleted file mode 100644
index 8ea0462..0000000
--- a/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// +build !appengine
-
-package msgp
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-const (
-	// spec says int and uint are always
-	// the same size, but that int/uint
-	// size may not be machine word size
-	smallint = unsafe.Sizeof(int(0)) == 4
-)
-
-// UnsafeString returns the byte slice as a volatile string
-// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
-// THIS IS EVIL CODE.
-// YOU HAVE BEEN WARNED.
-func UnsafeString(b []byte) string {
-	return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)}))
-}
-
-// UnsafeBytes returns the string as a byte slice
-// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
-// THIS IS EVIL CODE.
-// YOU HAVE BEEN WARNED.
-func UnsafeBytes(s string) []byte {
-	return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
-		Len:  len(s),
-		Cap:  len(s),
-		Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
-	}))
-}
-
-// Number can be
-// an int64, uint64, float32,
-// or float64 internally.
-// It can decode itself
-// from any of the native
-// messagepack number types.
-// The zero-value of Number
-// is Int(0). Using the equality
-// operator with Number compares
-// both the type and the value
-// of the number.
-type Number struct {
-	// internally, this
-	// is just a tagged union.
-	// the raw bits of the number
-	// are stored the same way regardless.
-	bits uint64
-	typ  Type
-}
-
-// AsFloat64 sets the number to
-// a float64.
-func (n *Number) AsFloat64(f float64) {
-	n.typ = Float64Type
-	n.bits = *(*uint64)(unsafe.Pointer(&f))
-}
-
-// AsInt sets the number to an int64.
-func (n *Number) AsInt(i int64) {
-
-	// we always store int(0)
-	// as {0, InvalidType} in
-	// order to preserve
-	// the behavior of the == operator
-	if i == 0 {
-		n.typ = InvalidType
-		n.bits = 0
-		return
-	}
-
-	n.typ = IntType
-	n.bits = uint64(i)
-}
-
-// AsUint sets the number to a uint64.
-func (n *Number) AsUint(u uint64) {
-	n.typ = UintType
-	n.bits = u
-}
-
-// AsFloat32 sets the number to a float32.
-func (n *Number) AsFloat32(f float32) {
-	n.typ = Float32Type
-	g := float64(f)
-	n.bits = *(*uint64)(unsafe.Pointer(&g))
-}
-
-// Type will return one of:
-// Float64Type, Float32Type, UintType, or IntType.
-func (n *Number) Type() Type {
-	if n.typ == InvalidType {
-		return IntType
-	}
-	return n.typ
-}
-
-// Float casts the number of the float,
-// and returns whether or not that was
-// the underlying type. (This is legal
-// for both float32 and float64 types.)
-func (n *Number) Float() (float64, bool) {
-	return *(*float64)(unsafe.Pointer(&n.bits)), n.typ == Float64Type || n.typ == Float32Type
-}
-
-// Int casts the number as an int64, and
-// returns whether or not that was the
-// underlying type.
-func (n *Number) Int() (int64, bool) {
-	return int64(n.bits), n.typ == IntType || n.typ == InvalidType
-}
-
-// Uint casts the number as a uint64, and returns
-// whether or not that was the underlying type.
-func (n *Number) Uint() (uint64, bool) {
-	return n.bits, n.typ == UintType
-}
-
-// EncodeMsg implements msgp.Encodable
-func (n *Number) EncodeMsg(w *Writer) error {
-	switch n.typ {
-	case InvalidType:
-		return w.WriteInt(0)
-	case IntType:
-		return w.WriteInt64(int64(n.bits))
-	case UintType:
-		return w.WriteUint64(n.bits)
-	case Float64Type:
-		return w.WriteFloat64(*(*float64)(unsafe.Pointer(&n.bits)))
-	case Float32Type:
-		return w.WriteFloat32(float32(*(*float64)(unsafe.Pointer(&n.bits))))
-	default:
-		// this should never ever happen
-		panic("(*Number).typ is invalid")
-	}
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
-	switch n.typ {
-	case InvalidType:
-		return AppendInt(b, 0), nil
-	case IntType:
-		return AppendInt64(b, int64(n.bits)), nil
-	case UintType:
-		return AppendUint64(b, n.bits), nil
-	case Float64Type:
-		return AppendFloat64(b, *(*float64)(unsafe.Pointer(&n.bits))), nil
-	case Float32Type:
-		return AppendFloat32(b, float32(*(*float64)(unsafe.Pointer(&n.bits)))), nil
-	default:
-		panic("(*Number).typ is invalid")
-	}
-}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go
index c34482e..20cd1ef 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read.go
@@ -1,11 +1,12 @@
 package msgp
 
 import (
-	"github.com/philhofer/fwd"
 	"io"
 	"math"
 	"sync"
 	"time"
+
+	"github.com/philhofer/fwd"
 )
 
 // where we keep old *Readers
@@ -111,10 +112,10 @@
 // reader will be buffered.
 func NewReader(r io.Reader) *Reader {
 	p := readerPool.Get().(*Reader)
-	if p.r == nil {
-		p.r = fwd.NewReader(r)
+	if p.R == nil {
+		p.R = fwd.NewReader(r)
 	} else {
-		p.r.Reset(r)
+		p.R.Reset(r)
 	}
 	return p
 }
@@ -122,39 +123,96 @@
 // NewReaderSize returns a *Reader with a buffer of the given size.
 // (This is vastly preferable to passing the decoder a reader that is already buffered.)
 func NewReaderSize(r io.Reader, sz int) *Reader {
-	return &Reader{r: fwd.NewReaderSize(r, sz)}
+	return &Reader{R: fwd.NewReaderSize(r, sz)}
 }
 
 // Reader wraps an io.Reader and provides
 // methods to read MessagePack-encoded values
 // from it. Readers are buffered.
 type Reader struct {
-	r       *fwd.Reader
+	// R is the buffered reader
+	// that the Reader uses
+	// to decode MessagePack.
+	// The Reader itself
+	// is stateless; all the
+	// buffering is done
+	// within R.
+	R       *fwd.Reader
 	scratch []byte
 }
 
 // Read implements `io.Reader`
 func (m *Reader) Read(p []byte) (int, error) {
-	return m.r.Read(p)
+	return m.R.Read(p)
+}
+
+// CopyNext reads the next object from m without decoding it and writes it to w.
+// It avoids unnecessary copies internally.
+func (m *Reader) CopyNext(w io.Writer) (int64, error) {
+	sz, o, err := getNextSize(m.R)
+	if err != nil {
+		return 0, err
+	}
+
+	var n int64
+	// Opportunistic optimization: if we can fit the whole thing in the m.R
+	// buffer, then just get a pointer to that, and pass it to w.Write,
+	// avoiding an allocation.
+	if int(sz) <= m.R.BufferSize() {
+		var nn int
+		var buf []byte
+		buf, err = m.R.Next(int(sz))
+		if err != nil {
+			if err == io.ErrUnexpectedEOF {
+				err = ErrShortBytes
+			}
+			return 0, err
+		}
+		nn, err = w.Write(buf)
+		n += int64(nn)
+	} else {
+		// Fall back to io.CopyN.
+		// May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer)
+		n, err = io.CopyN(w, m.R, int64(sz))
+		if err == io.ErrUnexpectedEOF {
+			err = ErrShortBytes
+		}
+	}
+	if err != nil {
+		return n, err
+	} else if n < int64(sz) {
+		return n, io.ErrShortWrite
+	}
+
+	// for maps and slices, read elements
+	for x := uintptr(0); x < o; x++ {
+		var n2 int64
+		n2, err = m.CopyNext(w)
+		if err != nil {
+			return n, err
+		}
+		n += n2
+	}
+	return n, nil
 }
 
 // ReadFull implements `io.ReadFull`
 func (m *Reader) ReadFull(p []byte) (int, error) {
-	return m.r.ReadFull(p)
+	return m.R.ReadFull(p)
 }
 
 // Reset resets the underlying reader.
-func (m *Reader) Reset(r io.Reader) { m.r.Reset(r) }
+func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) }
 
 // Buffered returns the number of bytes currently in the read buffer.
-func (m *Reader) Buffered() int { return m.r.Buffered() }
+func (m *Reader) Buffered() int { return m.R.Buffered() }
 
 // BufferSize returns the capacity of the read buffer.
-func (m *Reader) BufferSize() int { return m.r.BufferSize() }
+func (m *Reader) BufferSize() int { return m.R.BufferSize() }
 
 // NextType returns the next object type to be decoded.
 func (m *Reader) NextType() (Type, error) {
-	p, err := m.r.Peek(1)
+	p, err := m.R.Peek(1)
 	if err != nil {
 		return InvalidType, err
 	}
@@ -182,12 +240,14 @@
 // IsNil returns whether or not
 // the next byte is a null messagepack byte
 func (m *Reader) IsNil() bool {
-	p, err := m.r.Peek(1)
+	p, err := m.R.Peek(1)
 	return err == nil && p[0] == mnil
 }
 
+// getNextSize returns the size of the next object on the wire.
 // returns (obj size, obj elements, error)
 // only maps and arrays have non-zero obj elements
+// for maps and arrays, obj size does not include elements
 //
 // use uintptr b/c it's guaranteed to be large enough
 // to hold whatever we can fit in memory.
@@ -243,8 +303,8 @@
 	// we can use the faster
 	// method if we have enough
 	// buffered data
-	if m.r.Buffered() >= 5 {
-		p, err = m.r.Peek(5)
+	if m.R.Buffered() >= 5 {
+		p, err = m.R.Peek(5)
 		if err != nil {
 			return err
 		}
@@ -253,7 +313,7 @@
 			return err
 		}
 	} else {
-		v, o, err = getNextSize(m.r)
+		v, o, err = getNextSize(m.R)
 		if err != nil {
 			return err
 		}
@@ -261,7 +321,7 @@
 
 	// 'v' is always non-zero
 	// if err == nil
-	_, err = m.r.Skip(int(v))
+	_, err = m.R.Skip(int(v))
 	if err != nil {
 		return err
 	}
@@ -284,26 +344,26 @@
 func (m *Reader) ReadMapHeader() (sz uint32, err error) {
 	var p []byte
 	var lead byte
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
 	lead = p[0]
 	if isfixmap(lead) {
 		sz = uint32(rfixmap(lead))
-		_, err = m.r.Skip(1)
+		_, err = m.R.Skip(1)
 		return
 	}
 	switch lead {
 	case mmap16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
 		sz = uint32(big.Uint16(p[1:]))
 		return
 	case mmap32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -338,7 +398,7 @@
 // method; writing into the returned slice may
 // corrupt future reads.
 func (m *Reader) ReadMapKeyPtr() ([]byte, error) {
-	p, err := m.r.Peek(1)
+	p, err := m.R.Peek(1)
 	if err != nil {
 		return nil, err
 	}
@@ -346,24 +406,24 @@
 	var read int
 	if isfixstr(lead) {
 		read = int(rfixstr(lead))
-		m.r.Skip(1)
+		m.R.Skip(1)
 		goto fill
 	}
 	switch lead {
 	case mstr8, mbin8:
-		p, err = m.r.Next(2)
+		p, err = m.R.Next(2)
 		if err != nil {
 			return nil, err
 		}
 		read = int(p[1])
 	case mstr16, mbin16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return nil, err
 		}
 		read = int(big.Uint16(p[1:]))
 	case mstr32, mbin32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return nil, err
 		}
@@ -375,7 +435,7 @@
 	if read == 0 {
 		return nil, ErrShortBytes
 	}
-	return m.r.Next(read)
+	return m.R.Next(read)
 }
 
 // ReadArrayHeader reads the next object as an
@@ -384,19 +444,19 @@
 func (m *Reader) ReadArrayHeader() (sz uint32, err error) {
 	var lead byte
 	var p []byte
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
 	lead = p[0]
 	if isfixarray(lead) {
 		sz = uint32(rfixarray(lead))
-		_, err = m.r.Skip(1)
+		_, err = m.R.Skip(1)
 		return
 	}
 	switch lead {
 	case marray16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
@@ -404,7 +464,7 @@
 		return
 
 	case marray32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -419,14 +479,14 @@
 
 // ReadNil reads a 'nil' MessagePack byte from the reader
 func (m *Reader) ReadNil() error {
-	p, err := m.r.Peek(1)
+	p, err := m.R.Peek(1)
 	if err != nil {
 		return err
 	}
 	if p[0] != mnil {
 		return badPrefix(NilType, p[0])
 	}
-	_, err = m.r.Skip(1)
+	_, err = m.R.Skip(1)
 	return err
 }
 
@@ -435,7 +495,7 @@
 // it will be up-cast to a float64.)
 func (m *Reader) ReadFloat64() (f float64, err error) {
 	var p []byte
-	p, err = m.r.Peek(9)
+	p, err = m.R.Peek(9)
 	if err != nil {
 		// we'll allow a coversion from float32 to float64,
 		// since we don't lose any precision
@@ -455,14 +515,14 @@
 		return
 	}
 	f = math.Float64frombits(getMuint64(p))
-	_, err = m.r.Skip(9)
+	_, err = m.R.Skip(9)
 	return
 }
 
 // ReadFloat32 reads a float32 from the reader
 func (m *Reader) ReadFloat32() (f float32, err error) {
 	var p []byte
-	p, err = m.r.Peek(5)
+	p, err = m.R.Peek(5)
 	if err != nil {
 		return
 	}
@@ -471,14 +531,14 @@
 		return
 	}
 	f = math.Float32frombits(getMuint32(p))
-	_, err = m.r.Skip(5)
+	_, err = m.R.Skip(5)
 	return
 }
 
 // ReadBool reads a bool from the reader
 func (m *Reader) ReadBool() (b bool, err error) {
 	var p []byte
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
@@ -490,7 +550,7 @@
 		err = badPrefix(BoolType, p[0])
 		return
 	}
-	_, err = m.r.Skip(1)
+	_, err = m.R.Skip(1)
 	return
 }
 
@@ -498,7 +558,7 @@
 func (m *Reader) ReadInt64() (i int64, err error) {
 	var p []byte
 	var lead byte
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
@@ -506,17 +566,17 @@
 
 	if isfixint(lead) {
 		i = int64(rfixint(lead))
-		_, err = m.r.Skip(1)
+		_, err = m.R.Skip(1)
 		return
 	} else if isnfixint(lead) {
 		i = int64(rnfixint(lead))
-		_, err = m.r.Skip(1)
+		_, err = m.R.Skip(1)
 		return
 	}
 
 	switch lead {
 	case mint8:
-		p, err = m.r.Next(2)
+		p, err = m.R.Next(2)
 		if err != nil {
 			return
 		}
@@ -524,7 +584,7 @@
 		return
 
 	case mint16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
@@ -532,7 +592,7 @@
 		return
 
 	case mint32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -540,7 +600,7 @@
 		return
 
 	case mint64:
-		p, err = m.r.Next(9)
+		p, err = m.R.Next(9)
 		if err != nil {
 			return
 		}
@@ -607,19 +667,19 @@
 func (m *Reader) ReadUint64() (u uint64, err error) {
 	var p []byte
 	var lead byte
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
 	lead = p[0]
 	if isfixint(lead) {
 		u = uint64(rfixint(lead))
-		_, err = m.r.Skip(1)
+		_, err = m.R.Skip(1)
 		return
 	}
 	switch lead {
 	case muint8:
-		p, err = m.r.Next(2)
+		p, err = m.R.Next(2)
 		if err != nil {
 			return
 		}
@@ -627,7 +687,7 @@
 		return
 
 	case muint16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
@@ -635,7 +695,7 @@
 		return
 
 	case muint32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -643,7 +703,7 @@
 		return
 
 	case muint64:
-		p, err = m.r.Next(9)
+		p, err = m.R.Next(9)
 		if err != nil {
 			return
 		}
@@ -707,6 +767,10 @@
 	return
 }
 
+// ReadByte is analogous to ReadUint8.
+//
+// NOTE: this is *not* an implementation
+// of io.ByteReader.
 func (m *Reader) ReadByte() (b byte, err error) {
 	var in uint64
 	in, err = m.ReadUint64()
@@ -724,7 +788,7 @@
 func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
 	var p []byte
 	var lead byte
-	p, err = m.r.Peek(2)
+	p, err = m.R.Peek(2)
 	if err != nil {
 		return
 	}
@@ -733,15 +797,15 @@
 	switch lead {
 	case mbin8:
 		read = int64(p[1])
-		m.r.Skip(2)
+		m.R.Skip(2)
 	case mbin16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
 		read = int64(big.Uint16(p[1:]))
 	case mbin32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -755,16 +819,55 @@
 	} else {
 		b = scratch[0:read]
 	}
-	_, err = m.r.ReadFull(b)
+	_, err = m.R.ReadFull(b)
 	return
 }
 
+// ReadBytesHeader reads the size header
+// of a MessagePack 'bin' object. The user
+// is responsible for dealing with the next
+// 'sz' bytes from the reader in an application-specific
+// way.
+func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
+	var p []byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	switch p[0] {
+	case mbin8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		sz = uint32(p[1])
+		return
+	case mbin16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint16(p[1:]))
+		return
+	case mbin32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint32(p[1:]))
+		return
+	default:
+		err = badPrefix(BinType, p[0])
+		return
+	}
+}
+
 // ReadExactBytes reads a MessagePack 'bin'-encoded
 // object off of the wire into the provided slice. An
 // ArrayError will be returned if the object is not
 // exactly the length of the input slice.
 func (m *Reader) ReadExactBytes(into []byte) error {
-	p, err := m.r.Peek(2)
+	p, err := m.R.Peek(2)
 	if err != nil {
 		return err
 	}
@@ -776,14 +879,14 @@
 		read = int64(p[1])
 		skip = 2
 	case mbin16:
-		p, err = m.r.Peek(3)
+		p, err = m.R.Peek(3)
 		if err != nil {
 			return err
 		}
 		read = int64(big.Uint16(p[1:]))
 		skip = 3
 	case mbin32:
-		p, err = m.r.Peek(5)
+		p, err = m.R.Peek(5)
 		if err != nil {
 			return err
 		}
@@ -795,8 +898,8 @@
 	if read != int64(len(into)) {
 		return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)}
 	}
-	m.r.Skip(skip)
-	_, err = m.r.ReadFull(into)
+	m.R.Skip(skip)
+	_, err = m.R.ReadFull(into)
 	return err
 }
 
@@ -806,7 +909,7 @@
 func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
 	var p []byte
 	var lead byte
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
@@ -815,25 +918,25 @@
 
 	if isfixstr(lead) {
 		read = int64(rfixstr(lead))
-		m.r.Skip(1)
+		m.R.Skip(1)
 		goto fill
 	}
 
 	switch lead {
 	case mstr8:
-		p, err = m.r.Next(2)
+		p, err = m.R.Next(2)
 		if err != nil {
 			return
 		}
 		read = int64(uint8(p[1]))
 	case mstr16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
 		read = int64(big.Uint16(p[1:]))
 	case mstr32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -848,16 +951,60 @@
 	} else {
 		b = scratch[0:read]
 	}
-	_, err = m.r.ReadFull(b)
+	_, err = m.R.ReadFull(b)
 	return
 }
 
+// ReadStringHeader reads a string header
+// off of the wire. The user is then responsible
+// for dealing with the next 'sz' bytes from
+// the reader in an application-specific manner.
+func (m *Reader) ReadStringHeader() (sz uint32, err error) {
+	var p []byte
+	p, err = m.R.Peek(1)
+	if err != nil {
+		return
+	}
+	lead := p[0]
+	if isfixstr(lead) {
+		sz = uint32(rfixstr(lead))
+		m.R.Skip(1)
+		return
+	}
+	switch lead {
+	case mstr8:
+		p, err = m.R.Next(2)
+		if err != nil {
+			return
+		}
+		sz = uint32(p[1])
+		return
+	case mstr16:
+		p, err = m.R.Next(3)
+		if err != nil {
+			return
+		}
+		sz = uint32(big.Uint16(p[1:]))
+		return
+	case mstr32:
+		p, err = m.R.Next(5)
+		if err != nil {
+			return
+		}
+		sz = big.Uint32(p[1:])
+		return
+	default:
+		err = badPrefix(StrType, lead)
+		return
+	}
+}
+
 // ReadString reads a utf-8 string from the reader
 func (m *Reader) ReadString() (s string, err error) {
 	var p []byte
 	var lead byte
 	var read int64
-	p, err = m.r.Peek(1)
+	p, err = m.R.Peek(1)
 	if err != nil {
 		return
 	}
@@ -865,25 +1012,25 @@
 
 	if isfixstr(lead) {
 		read = int64(rfixstr(lead))
-		m.r.Skip(1)
+		m.R.Skip(1)
 		goto fill
 	}
 
 	switch lead {
 	case mstr8:
-		p, err = m.r.Next(2)
+		p, err = m.R.Next(2)
 		if err != nil {
 			return
 		}
 		read = int64(uint8(p[1]))
 	case mstr16:
-		p, err = m.r.Next(3)
+		p, err = m.R.Next(3)
 		if err != nil {
 			return
 		}
 		read = int64(big.Uint16(p[1:]))
 	case mstr32:
-		p, err = m.r.Next(5)
+		p, err = m.R.Next(5)
 		if err != nil {
 			return
 		}
@@ -915,7 +1062,7 @@
 	// thus escape analysis *must* conclude that
 	// 'out' escapes.
 	out := make([]byte, read)
-	_, err = m.r.ReadFull(out)
+	_, err = m.R.ReadFull(out)
 	if err != nil {
 		return
 	}
@@ -926,7 +1073,7 @@
 // ReadComplex64 reads a complex64 from the reader
 func (m *Reader) ReadComplex64() (f complex64, err error) {
 	var p []byte
-	p, err = m.r.Peek(10)
+	p, err = m.R.Peek(10)
 	if err != nil {
 		return
 	}
@@ -940,14 +1087,14 @@
 	}
 	f = complex(math.Float32frombits(big.Uint32(p[2:])),
 		math.Float32frombits(big.Uint32(p[6:])))
-	_, err = m.r.Skip(10)
+	_, err = m.R.Skip(10)
 	return
 }
 
 // ReadComplex128 reads a complex128 from the reader
 func (m *Reader) ReadComplex128() (f complex128, err error) {
 	var p []byte
-	p, err = m.r.Peek(18)
+	p, err = m.R.Peek(18)
 	if err != nil {
 		return
 	}
@@ -961,7 +1108,7 @@
 	}
 	f = complex(math.Float64frombits(big.Uint64(p[2:])),
 		math.Float64frombits(big.Uint64(p[10:])))
-	_, err = m.r.Skip(18)
+	_, err = m.R.Skip(18)
 	return
 }
 
@@ -996,7 +1143,7 @@
 // The returned time's location will be set to time.Local.
 func (m *Reader) ReadTime() (t time.Time, err error) {
 	var p []byte
-	p, err = m.r.Peek(15)
+	p, err = m.R.Peek(15)
 	if err != nil {
 		return
 	}
@@ -1010,7 +1157,7 @@
 	}
 	sec, nsec := getUnix(p[3:])
 	t = time.Unix(sec, int64(nsec)).Local()
-	_, err = m.r.Skip(15)
+	_, err = m.R.Skip(15)
 	return
 }
 
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
index 732fa68..78e466f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
@@ -117,13 +117,13 @@
 }
 
 func appendNext(f *Reader, d *[]byte) error {
-	amt, o, err := getNextSize(f.r)
+	amt, o, err := getNextSize(f.R)
 	if err != nil {
 		return err
 	}
 	var i int
 	*d, i = ensure(*d, int(amt))
-	_, err = f.r.ReadFull((*d)[i:])
+	_, err = f.R.ReadFull((*d)[i:])
 	if err != nil {
 		return err
 	}
@@ -576,7 +576,7 @@
 	return uint(u), b, err
 }
 
-// ReadByteBytes is analagous to ReadUint8Bytes
+// ReadByteBytes is analogous to ReadUint8Bytes
 func ReadByteBytes(b []byte) (byte, []byte, error) {
 	return ReadUint8Bytes(b)
 }
@@ -784,6 +784,22 @@
 	return string(v), o, err
 }
 
+// ReadStringAsBytes reads a 'str' object
+// into a slice of bytes. 'v' is the value of
+// the 'str' object, which may reside in memory
+// pointed to by 'scratch.' 'o' is the remaining bytes
+// in 'b.''
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (not 'str' type)
+// - InvalidPrefixError (unknown type marker)
+func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+	var tmp []byte
+	tmp, o, err = ReadStringZC(b)
+	v = append(scratch[:0], tmp...)
+	return
+}
+
 // ReadComplex128Bytes reads a complex128
 // extension object from 'b' and returns the
 // remaining bytes.
@@ -922,14 +938,14 @@
 
 	case ArrayType:
 		var sz uint32
-		sz, b, err = ReadArrayHeaderBytes(b)
+		sz, o, err = ReadArrayHeaderBytes(b)
 		if err != nil {
 			return
 		}
 		j := make([]interface{}, int(sz))
 		i = j
 		for d := range j {
-			j[d], b, err = ReadIntfBytes(b)
+			j[d], o, err = ReadIntfBytes(o)
 			if err != nil {
 				return
 			}
diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
new file mode 100644
index 0000000..4bcf321
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
@@ -0,0 +1,41 @@
+// +build !appengine
+
+package msgp
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// NOTE:
+// all of the definition in this file
+// should be repeated in appengine.go,
+// but without using unsafe
+
+const (
+	// spec says int and uint are always
+	// the same size, but that int/uint
+	// size may not be machine word size
+	smallint = unsafe.Sizeof(int(0)) == 4
+)
+
+// UnsafeString returns the byte slice as a volatile string
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeString(b []byte) string {
+	sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+	return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: sh.Data, Len: sh.Len}))
+}
+
+// UnsafeBytes returns the string as a byte slice
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeBytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+		Len:  len(s),
+		Cap:  len(s),
+		Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
+	}))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go
index 216697f..da9099c 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write.go
@@ -10,13 +10,6 @@
 	"time"
 )
 
-func abs(i int64) int64 {
-	if i < 0 {
-		return -i
-	}
-	return i
-}
-
 // Sizer is an interface implemented
 // by types that can estimate their
 // size when MessagePack encoded.
@@ -59,15 +52,26 @@
 // it will cause undefined behavior.
 func freeW(w *Writer) { pushWriter(w) }
 
-// Require ensures that cap(old)-len(old) >= extra
+// Require ensures that cap(old)-len(old) >= extra.
 func Require(old []byte, extra int) []byte {
-	if cap(old)-len(old) >= extra {
+	l := len(old)
+	c := cap(old)
+	r := l + extra
+	if c >= r {
 		return old
-	}
-	if len(old) == 0 {
+	} else if l == 0 {
 		return make([]byte, 0, extra)
 	}
-	n := make([]byte, len(old), cap(old)-len(old)+extra)
+	// the new size is the greater
+	// of double the old capacity
+	// and the sum of the old length
+	// and the number of new bytes
+	// necessary.
+	c <<= 1
+	if c < r {
+		c = r
+	}
+	n := make([]byte, l, c)
 	copy(n, old)
 	return n
 }
@@ -184,6 +188,17 @@
 	return wl, nil
 }
 
+func (mw *Writer) Append(b ...byte) error {
+	if mw.avail() < len(b) {
+		err := mw.flush()
+		if err != nil {
+			return err
+		}
+	}
+	mw.wloc += copy(mw.buf[mw.wloc:], b)
+	return nil
+}
+
 // push one byte onto the buffer
 //
 // NOTE: this is a hot code path
@@ -289,9 +304,9 @@
 // size to the writer
 func (mw *Writer) WriteMapHeader(sz uint32) error {
 	switch {
-	case sz < 16:
+	case sz <= 15:
 		return mw.push(wfixmap(uint8(sz)))
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		return mw.prefix16(mmap16, uint16(sz))
 	default:
 		return mw.prefix32(mmap32, sz)
@@ -302,9 +317,9 @@
 // given size to the writer
 func (mw *Writer) WriteArrayHeader(sz uint32) error {
 	switch {
-	case sz < 16:
+	case sz <= 15:
 		return mw.push(wfixarray(uint8(sz)))
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		return mw.prefix16(marray16, uint16(sz))
 	default:
 		return mw.prefix32(marray32, sz)
@@ -328,17 +343,26 @@
 
 // WriteInt64 writes an int64 to the writer
 func (mw *Writer) WriteInt64(i int64) error {
-	a := abs(i)
+	if i >= 0 {
+		switch {
+		case i <= math.MaxInt8:
+			return mw.push(wfixint(uint8(i)))
+		case i <= math.MaxInt16:
+			return mw.prefix16(mint16, uint16(i))
+		case i <= math.MaxInt32:
+			return mw.prefix32(mint32, uint32(i))
+		default:
+			return mw.prefix64(mint64, uint64(i))
+		}
+	}
 	switch {
-	case i < 0 && i > -32:
+	case i >= -32:
 		return mw.push(wnfixint(int8(i)))
-	case i >= 0 && i < 128:
-		return mw.push(wfixint(uint8(i)))
-	case a < math.MaxInt8:
+	case i >= math.MinInt8:
 		return mw.prefix8(mint8, uint8(i))
-	case a < math.MaxInt16:
+	case i >= math.MinInt16:
 		return mw.prefix16(mint16, uint16(i))
-	case a < math.MaxInt32:
+	case i >= math.MinInt32:
 		return mw.prefix32(mint32, uint32(i))
 	default:
 		return mw.prefix64(mint64, uint64(i))
@@ -360,20 +384,20 @@
 // WriteUint64 writes a uint64 to the writer
 func (mw *Writer) WriteUint64(u uint64) error {
 	switch {
-	case u < (1 << 7):
+	case u <= (1<<7)-1:
 		return mw.push(wfixint(uint8(u)))
-	case u < math.MaxUint8:
+	case u <= math.MaxUint8:
 		return mw.prefix8(muint8, uint8(u))
-	case u < math.MaxUint16:
+	case u <= math.MaxUint16:
 		return mw.prefix16(muint16, uint16(u))
-	case u < math.MaxUint32:
+	case u <= math.MaxUint32:
 		return mw.prefix32(muint32, uint32(u))
 	default:
 		return mw.prefix64(muint64, u)
 	}
 }
 
-// WriteByte is analagous to WriteUint8
+// WriteByte is analogous to WriteUint8
 func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
 
 // WriteUint8 writes a uint8 to the writer
@@ -393,9 +417,9 @@
 	sz := uint32(len(b))
 	var err error
 	switch {
-	case sz < math.MaxUint8:
+	case sz <= math.MaxUint8:
 		err = mw.prefix8(mbin8, uint8(sz))
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		err = mw.prefix16(mbin16, uint16(sz))
 	default:
 		err = mw.prefix32(mbin32, sz)
@@ -407,6 +431,20 @@
 	return err
 }
 
+// WriteBytesHeader writes just the size header
+// of a MessagePack 'bin' object. The user is responsible
+// for then writing 'sz' more bytes into the stream.
+func (mw *Writer) WriteBytesHeader(sz uint32) error {
+	switch {
+	case sz <= math.MaxUint8:
+		return mw.prefix8(mbin8, uint8(sz))
+	case sz <= math.MaxUint16:
+		return mw.prefix16(mbin16, uint16(sz))
+	default:
+		return mw.prefix32(mbin32, sz)
+	}
+}
+
 // WriteBool writes a bool to the writer
 func (mw *Writer) WriteBool(b bool) error {
 	if b {
@@ -421,11 +459,11 @@
 	sz := uint32(len(s))
 	var err error
 	switch {
-	case sz < 32:
+	case sz <= 31:
 		err = mw.push(wfixstr(uint8(sz)))
-	case sz < math.MaxUint8:
+	case sz <= math.MaxUint8:
 		err = mw.prefix8(mstr8, uint8(sz))
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		err = mw.prefix16(mstr16, uint16(sz))
 	default:
 		err = mw.prefix32(mstr32, sz)
@@ -436,6 +474,45 @@
 	return mw.writeString(s)
 }
 
+// WriteStringHeader writes just the string size
+// header of a MessagePack 'str' object. The user
+// is responsible for writing 'sz' more valid UTF-8
+// bytes to the stream.
+func (mw *Writer) WriteStringHeader(sz uint32) error {
+	switch {
+	case sz <= 31:
+		return mw.push(wfixstr(uint8(sz)))
+	case sz <= math.MaxUint8:
+		return mw.prefix8(mstr8, uint8(sz))
+	case sz <= math.MaxUint16:
+		return mw.prefix16(mstr16, uint16(sz))
+	default:
+		return mw.prefix32(mstr32, sz)
+	}
+}
+
+// WriteStringFromBytes writes a 'str' object
+// from a []byte.
+func (mw *Writer) WriteStringFromBytes(str []byte) error {
+	sz := uint32(len(str))
+	var err error
+	switch {
+	case sz <= 31:
+		err = mw.push(wfixstr(uint8(sz)))
+	case sz <= math.MaxUint8:
+		err = mw.prefix8(mstr8, uint8(sz))
+	case sz <= math.MaxUint16:
+		err = mw.prefix16(mstr16, uint16(sz))
+	default:
+		err = mw.prefix32(mstr32, sz)
+	}
+	if err != nil {
+		return err
+	}
+	_, err = mw.Write(str)
+	return err
+}
+
 // WriteComplex64 writes a complex64 to the writer
 func (mw *Writer) WriteComplex64(f complex64) error {
 	o, err := mw.require(10)
@@ -509,7 +586,7 @@
 // elapsed since "zero" Unix time, followed by 4 bytes
 // for a big-endian 32-bit signed integer denoting
 // the nanosecond offset of the time. This encoding
-// is intended to ease portability accross languages.
+// is intended to ease portability across languages.
 // (Note that this is *not* the standard time.Time
 // binary encoding, because its implementation relies
 // heavily on the internal representation used by the
@@ -612,7 +689,7 @@
 }
 
 func (mw *Writer) writeMap(v reflect.Value) (err error) {
-	if v.Elem().Kind() != reflect.String {
+	if v.Type().Key().Kind() != reflect.String {
 		return errors.New("msgp: map keys must be strings")
 	}
 	ks := v.MapKeys()
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
index 658102e..eaa03c4 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
@@ -22,10 +22,10 @@
 // given size to the slice
 func AppendMapHeader(b []byte, sz uint32) []byte {
 	switch {
-	case sz < 16:
+	case sz <= 15:
 		return append(b, wfixmap(uint8(sz)))
 
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		o, n := ensure(b, 3)
 		prefixu16(o[n:], mmap16, uint16(sz))
 		return o
@@ -41,10 +41,10 @@
 // the given size to the slice
 func AppendArrayHeader(b []byte, sz uint32) []byte {
 	switch {
-	case sz < 16:
+	case sz <= 15:
 		return append(b, wfixarray(uint8(sz)))
 
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		o, n := ensure(b, 3)
 		prefixu16(o[n:], marray16, uint16(sz))
 		return o
@@ -75,29 +75,39 @@
 
 // AppendInt64 appends an int64 to the slice
 func AppendInt64(b []byte, i int64) []byte {
-	a := abs(i)
+	if i >= 0 {
+		switch {
+		case i <= math.MaxInt8:
+			return append(b, wfixint(uint8(i)))
+		case i <= math.MaxInt16:
+			o, n := ensure(b, 3)
+			putMint16(o[n:], int16(i))
+			return o
+		case i <= math.MaxInt32:
+			o, n := ensure(b, 5)
+			putMint32(o[n:], int32(i))
+			return o
+		default:
+			o, n := ensure(b, 9)
+			putMint64(o[n:], i)
+			return o
+		}
+	}
 	switch {
-	case i < 0 && i > -32:
+	case i >= -32:
 		return append(b, wnfixint(int8(i)))
-
-	case i >= 0 && i < 128:
-		return append(b, wfixint(uint8(i)))
-
-	case a < math.MaxInt8:
+	case i >= math.MinInt8:
 		o, n := ensure(b, 2)
 		putMint8(o[n:], int8(i))
 		return o
-
-	case a < math.MaxInt16:
+	case i >= math.MinInt16:
 		o, n := ensure(b, 3)
 		putMint16(o[n:], int16(i))
 		return o
-
-	case a < math.MaxInt32:
+	case i >= math.MinInt32:
 		o, n := ensure(b, 5)
 		putMint32(o[n:], int32(i))
 		return o
-
 	default:
 		o, n := ensure(b, 9)
 		putMint64(o[n:], i)
@@ -120,20 +130,20 @@
 // AppendUint64 appends a uint64 to the slice
 func AppendUint64(b []byte, u uint64) []byte {
 	switch {
-	case u < (1 << 7):
+	case u <= (1<<7)-1:
 		return append(b, wfixint(uint8(u)))
 
-	case u < math.MaxUint8:
+	case u <= math.MaxUint8:
 		o, n := ensure(b, 2)
 		putMuint8(o[n:], uint8(u))
 		return o
 
-	case u < math.MaxUint16:
+	case u <= math.MaxUint16:
 		o, n := ensure(b, 3)
 		putMuint16(o[n:], uint16(u))
 		return o
 
-	case u < math.MaxUint32:
+	case u <= math.MaxUint32:
 		o, n := ensure(b, 5)
 		putMuint32(o[n:], uint32(u))
 		return o
@@ -152,7 +162,7 @@
 // AppendUint8 appends a uint8 to the slice
 func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
 
-// AppendByte is analagous to AppendUint8
+// AppendByte is analogous to AppendUint8
 func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
 
 // AppendUint16 appends a uint16 to the slice
@@ -167,11 +177,11 @@
 	var o []byte
 	var n int
 	switch {
-	case sz < math.MaxUint8:
+	case sz <= math.MaxUint8:
 		o, n = ensure(b, 2+sz)
 		prefixu8(o[n:], mbin8, uint8(sz))
 		n += 2
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		o, n = ensure(b, 3+sz)
 		prefixu16(o[n:], mbin16, uint16(sz))
 		n += 3
@@ -197,15 +207,15 @@
 	var n int
 	var o []byte
 	switch {
-	case sz < 32:
+	case sz <= 31:
 		o, n = ensure(b, 1+sz)
 		o[n] = wfixstr(uint8(sz))
 		n++
-	case sz < math.MaxUint8:
+	case sz <= math.MaxUint8:
 		o, n = ensure(b, 2+sz)
 		prefixu8(o[n:], mstr8, uint8(sz))
 		n += 2
-	case sz < math.MaxUint16:
+	case sz <= math.MaxUint16:
 		o, n = ensure(b, 3+sz)
 		prefixu16(o[n:], mstr16, uint16(sz))
 		n += 3
@@ -217,6 +227,33 @@
 	return o[:n+copy(o[n:], s)]
 }
 
+// AppendStringFromBytes appends a []byte
+// as a MessagePack 'str' to the slice 'b.'
+func AppendStringFromBytes(b []byte, str []byte) []byte {
+	sz := len(str)
+	var n int
+	var o []byte
+	switch {
+	case sz <= 31:
+		o, n = ensure(b, 1+sz)
+		o[n] = wfixstr(uint8(sz))
+		n++
+	case sz <= math.MaxUint8:
+		o, n = ensure(b, 2+sz)
+		prefixu8(o[n:], mstr8, uint8(sz))
+		n += 2
+	case sz <= math.MaxUint16:
+		o, n = ensure(b, 3+sz)
+		prefixu16(o[n:], mstr16, uint16(sz))
+		n += 3
+	default:
+		o, n = ensure(b, 5+sz)
+		prefixu32(o[n:], mstr32, uint32(sz))
+		n += 5
+	}
+	return o[:n+copy(o[n:], str)]
+}
+
 // AppendComplex64 appends a complex64 to the slice as a MessagePack extension
 func AppendComplex64(b []byte, c complex64) []byte {
 	o, n := ensure(b, Complex64Size)
@@ -362,7 +399,12 @@
 			}
 		}
 		return b, nil
-
+	case reflect.Ptr:
+		if v.IsNil() {
+			return AppendNil(b), err
+		}
+		b, err = AppendIntf(b, v.Elem().Interface())
+		return b, err
 	default:
 		return b, &ErrUnsupportedType{T: v.Type()}
 	}
diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md
index 2367fae..0b61be2 100644
--- a/vendor/github.com/vishvananda/netlink/README.md
+++ b/vendor/github.com/vishvananda/netlink/README.md
@@ -38,15 +38,18 @@
 package main
 
 import (
-    "net"
+    "fmt"
     "github.com/vishvananda/netlink"
 )
 
 func main() {
     la := netlink.NewLinkAttrs()
     la.Name = "foo"
-    mybridge := &netlink.Bridge{la}}
-    _ := netlink.LinkAdd(mybridge)
+    mybridge := &netlink.Bridge{LinkAttrs: la}
+    err := netlink.LinkAdd(mybridge)
+    if err != nil  {
+        fmt.Printf("could not add %s: %v\n", la.Name, err)
+    }
     eth1, _ := netlink.LinkByName("eth1")
     netlink.LinkSetMaster(eth1, mybridge)
 }
@@ -63,7 +66,6 @@
 package main
 
 import (
-    "net"
     "github.com/vishvananda/netlink"
 )
 
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
index f33242a..8808b42 100644
--- a/vendor/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -2,7 +2,6 @@
 
 import (
 	"fmt"
-	"log"
 	"net"
 	"strings"
 	"syscall"
@@ -65,7 +64,7 @@
 	msg := nl.NewIfAddrmsg(family)
 	msg.Index = uint32(base.Index)
 	msg.Scope = uint8(addr.Scope)
-	prefixlen, _ := addr.Mask.Size()
+	prefixlen, masklen := addr.Mask.Size()
 	msg.Prefixlen = uint8(prefixlen)
 	req.AddData(msg)
 
@@ -103,9 +102,14 @@
 		}
 	}
 
-	if addr.Broadcast != nil {
-		req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast))
+	if addr.Broadcast == nil {
+		calcBroadcast := make(net.IP, masklen/8)
+		for i := range localAddrData {
+			calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i]
+		}
+		addr.Broadcast = calcBroadcast
 	}
+	req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast))
 
 	if addr.Label != "" {
 		labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label))
@@ -232,16 +236,34 @@
 // AddrSubscribe takes a chan down which notifications will be sent
 // when addresses change.  Close the 'done' chan to stop subscription.
 func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
-	return addrSubscribe(netns.None(), netns.None(), ch, done)
+	return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil)
 }
 
 // AddrSubscribeAt works like AddrSubscribe plus it allows the caller
 // to choose the network namespace in which to subscribe (ns).
 func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
-	return addrSubscribe(ns, netns.None(), ch, done)
+	return addrSubscribeAt(ns, netns.None(), ch, done, nil)
 }
 
-func addrSubscribe(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
+// AddrSubscribeOptions contains a set of options to use with
+// AddrSubscribeWithOptions.
+type AddrSubscribeOptions struct {
+	Namespace     *netns.NsHandle
+	ErrorCallback func(error)
+}
+
+// AddrSubscribeWithOptions work like AddrSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error {
+	if options.Namespace == nil {
+		none := netns.None()
+		options.Namespace = &none
+	}
+	return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)
+}
+
+func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error {
 	s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
 	if err != nil {
 		return err
@@ -257,20 +279,26 @@
 		for {
 			msgs, err := s.Receive()
 			if err != nil {
-				log.Printf("netlink.AddrSubscribe: Receive() error: %v", err)
+				if cberr != nil {
+					cberr(err)
+				}
 				return
 			}
 			for _, m := range msgs {
 				msgType := m.Header.Type
 				if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR {
-					log.Printf("netlink.AddrSubscribe: bad message type: %d", msgType)
-					continue
+					if cberr != nil {
+						cberr(fmt.Errorf("bad message type: %d", msgType))
+					}
+					return
 				}
 
 				addr, _, ifindex, err := parseAddr(m.Data)
 				if err != nil {
-					log.Printf("netlink.AddrSubscribe: could not parse address: %v", err)
-					continue
+					if cberr != nil {
+						cberr(fmt.Errorf("could not parse address: %v", err))
+					}
+					return
 				}
 
 				ch <- AddrUpdate{LinkAddress: *addr.IPNet,
diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go
new file mode 100644
index 0000000..a65d6a1
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go
@@ -0,0 +1,115 @@
+package netlink
+
+import (
+	"fmt"
+	"syscall"
+
+	"github.com/vishvananda/netlink/nl"
+)
+
+// BridgeVlanList gets a map of device id to bridge vlan infos.
+// Equivalent to: `bridge vlan show`
+func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
+	return pkgHandle.BridgeVlanList()
+}
+
+// BridgeVlanList gets a map of device id to bridge vlan infos.
+// Equivalent to: `bridge vlan show`
+func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
+	req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+	msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+	req.AddData(msg)
+	req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN))))
+
+	msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+	if err != nil {
+		return nil, err
+	}
+	ret := make(map[int32][]*nl.BridgeVlanInfo)
+	for _, m := range msgs {
+		msg := nl.DeserializeIfInfomsg(m)
+
+		attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+		if err != nil {
+			return nil, err
+		}
+		for _, attr := range attrs {
+			switch attr.Attr.Type {
+			case nl.IFLA_AF_SPEC:
+				//nested attr
+				nestAttrs, err := nl.ParseRouteAttr(attr.Value)
+				if err != nil {
+					return nil, fmt.Errorf("failed to parse nested attr %v", err)
+				}
+				for _, nestAttr := range nestAttrs {
+					switch nestAttr.Attr.Type {
+					case nl.IFLA_BRIDGE_VLAN_INFO:
+						vlanInfo := nl.DeserializeBridgeVlanInfo(nestAttr.Value)
+						ret[msg.Index] = append(ret[msg.Index], vlanInfo)
+					}
+				}
+			}
+		}
+	}
+	return ret, nil
+}
+
+// BridgeVlanAdd adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
+	return pkgHandle.BridgeVlanAdd(link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanAdd adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
+	return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanDel adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
+	return pkgHandle.BridgeVlanDel(link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanDel adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
+	return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master)
+}
+
+func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error {
+	base := link.Attrs()
+	h.ensureIndex(base)
+	req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	br := nl.NewRtAttr(nl.IFLA_AF_SPEC, nil)
+	var flags uint16
+	if self {
+		flags |= nl.BRIDGE_FLAGS_SELF
+	}
+	if master {
+		flags |= nl.BRIDGE_FLAGS_MASTER
+	}
+	if flags > 0 {
+		nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags))
+	}
+	vlanInfo := &nl.BridgeVlanInfo{Vid: vid}
+	if pvid {
+		vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID
+	}
+	if untagged {
+		vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED
+	}
+	nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+	req.AddData(br)
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
index 20df903..ecf0445 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -22,7 +22,11 @@
 	// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2
 	ConntrackExpectTable = 2
 )
-
+const (
+	// For Parsing Mark
+	TCP_PROTO = 6
+	UDP_PROTO = 17
+)
 const (
 	// backward compatibility with golang 1.6 which does not have io.SeekCurrent
 	seekCurrent = 1
@@ -56,7 +60,7 @@
 
 // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
 // conntrack -D [table] parameters         Delete conntrack or expectation
-func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
 	return pkgHandle.ConntrackDeleteFilter(table, family, filter)
 }
 
@@ -88,7 +92,7 @@
 
 // ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
 // conntrack -D [table] parameters         Delete conntrack or expectation
-func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
 	res, err := h.dumpConntrackTable(table, family)
 	if err != nil {
 		return 0, err
@@ -142,15 +146,16 @@
 	FamilyType uint8
 	Forward    ipTuple
 	Reverse    ipTuple
+	Mark       uint32
 }
 
 func (s *ConntrackFlow) String() string {
 	// conntrack cmd output:
-	// udp      17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001
-	return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d",
+	// udp      17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 mark=0
+	return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d mark=%d",
 		nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol,
 		s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort,
-		s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort)
+		s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Mark)
 }
 
 // This method parse the ip tuple structure
@@ -160,7 +165,7 @@
 // <len, NLA_F_NESTED|nl.CTA_TUPLE_PROTO, 1 byte for the protocol, 3 bytes of padding>
 // <len, CTA_PROTO_SRC_PORT, 2 bytes for the source port, 2 bytes of padding>
 // <len, CTA_PROTO_DST_PORT, 2 bytes for the source port, 2 bytes of padding>
-func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) {
+func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 {
 	for i := 0; i < 2; i++ {
 		_, t, _, v := parseNfAttrTLV(reader)
 		switch t {
@@ -189,6 +194,7 @@
 		// Skip some padding 2 byte
 		reader.Seek(2, seekCurrent)
 	}
+	return tpl.Protocol
 }
 
 func parseNfAttrTLV(r *bytes.Reader) (isNested bool, attrType, len uint16, value []byte) {
@@ -216,6 +222,7 @@
 
 func parseRawData(data []byte) *ConntrackFlow {
 	s := &ConntrackFlow{}
+	var proto uint8
 	// First there is the Nfgenmsg header
 	// consume only the family field
 	reader := bytes.NewReader(data)
@@ -234,7 +241,7 @@
 		nested, t, l := parseNfAttrTL(reader)
 		if nested && t == nl.CTA_TUPLE_ORIG {
 			if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
-				parseIpTuple(reader, &s.Forward)
+				proto = parseIpTuple(reader, &s.Forward)
 			}
 		} else if nested && t == nl.CTA_TUPLE_REPLY {
 			if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
@@ -248,7 +255,19 @@
 			}
 		}
 	}
-
+	if proto == TCP_PROTO {
+		reader.Seek(64, seekCurrent)
+		_, t, _, v := parseNfAttrTLV(reader)
+		if t == nl.CTA_MARK {
+			s.Mark = uint32(v[3])
+		}
+	} else if proto == UDP_PROTO {
+		reader.Seek(16, seekCurrent)
+		_, t, _, v := parseNfAttrTLV(reader)
+		if t == nl.CTA_MARK {
+			s.Mark = uint32(v[3])
+		}
+	}
 	return s
 }
 
@@ -290,6 +309,12 @@
 	ConntrackNatAnyIP         // -any-nat ip    Source or destination NAT ip
 )
 
+type CustomConntrackFilter interface {
+	// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches
+	// the filter or false otherwise
+	MatchConntrackFlow(flow *ConntrackFlow) bool
+}
+
 type ConntrackFilter struct {
 	ipFilter map[ConntrackFilterType]net.IP
 }
@@ -342,3 +367,5 @@
 
 	return match
 }
+
+var _ CustomConntrackFilter = (*ConntrackFilter)(nil)
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
index 938b28b..1120c79 100644
--- a/vendor/github.com/vishvananda/netlink/filter.go
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -2,8 +2,6 @@
 
 import (
 	"fmt"
-
-	"github.com/vishvananda/netlink/nl"
 )
 
 type Filter interface {
@@ -184,14 +182,6 @@
 	}
 }
 
-// Constants used in TcU32Sel.Flags.
-const (
-	TC_U32_TERMINAL  = nl.TC_U32_TERMINAL
-	TC_U32_OFFSET    = nl.TC_U32_OFFSET
-	TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET
-	TC_U32_EAT       = nl.TC_U32_EAT
-)
-
 // Sel of the U32 filters that contains multiple TcU32Key. This is the copy
 // and the frontend representation of nl.TcU32Sel. It is serialized into canonical
 // nl.TcU32Sel with the appropriate endianness.
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
index dc0f90a..5025bd5 100644
--- a/vendor/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -11,6 +11,14 @@
 	"github.com/vishvananda/netlink/nl"
 )
 
+// Constants used in TcU32Sel.Flags.
+const (
+	TC_U32_TERMINAL  = nl.TC_U32_TERMINAL
+	TC_U32_OFFSET    = nl.TC_U32_OFFSET
+	TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET
+	TC_U32_EAT       = nl.TC_U32_EAT
+)
+
 // Fw filter filters on firewall marks
 // NOTE: this is in filter_linux because it refers to nl.TcPolice which
 //       is defined in nl/tc_linux.go
@@ -128,9 +136,11 @@
 	req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type())))
 
 	options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
-	if u32, ok := filter.(*U32); ok {
+
+	switch filter := filter.(type) {
+	case *U32:
 		// Convert TcU32Sel into nl.TcU32Sel as it is without copy.
-		sel := (*nl.TcU32Sel)(unsafe.Pointer(u32.Sel))
+		sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel))
 		if sel == nil {
 			// match all
 			sel = &nl.TcU32Sel{
@@ -158,56 +168,56 @@
 		}
 		sel.Nkeys = uint8(len(sel.Keys))
 		nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
-		if u32.ClassId != 0 {
-			nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(u32.ClassId))
+		if filter.ClassId != 0 {
+			nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId))
 		}
 		actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
 		// backwards compatibility
-		if u32.RedirIndex != 0 {
-			u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...)
+		if filter.RedirIndex != 0 {
+			filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...)
 		}
-		if err := EncodeActions(actionsAttr, u32.Actions); err != nil {
+		if err := EncodeActions(actionsAttr, filter.Actions); err != nil {
 			return err
 		}
-	} else if fw, ok := filter.(*Fw); ok {
-		if fw.Mask != 0 {
+	case *Fw:
+		if filter.Mask != 0 {
 			b := make([]byte, 4)
-			native.PutUint32(b, fw.Mask)
+			native.PutUint32(b, filter.Mask)
 			nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b)
 		}
-		if fw.InDev != "" {
-			nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(fw.InDev))
+		if filter.InDev != "" {
+			nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev))
 		}
-		if (fw.Police != nl.TcPolice{}) {
+		if (filter.Police != nl.TcPolice{}) {
 
 			police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil)
-			nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, fw.Police.Serialize())
-			if (fw.Police.Rate != nl.TcRateSpec{}) {
-				payload := SerializeRtab(fw.Rtab)
+			nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, filter.Police.Serialize())
+			if (filter.Police.Rate != nl.TcRateSpec{}) {
+				payload := SerializeRtab(filter.Rtab)
 				nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload)
 			}
-			if (fw.Police.PeakRate != nl.TcRateSpec{}) {
-				payload := SerializeRtab(fw.Ptab)
+			if (filter.Police.PeakRate != nl.TcRateSpec{}) {
+				payload := SerializeRtab(filter.Ptab)
 				nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload)
 			}
 		}
-		if fw.ClassId != 0 {
+		if filter.ClassId != 0 {
 			b := make([]byte, 4)
-			native.PutUint32(b, fw.ClassId)
+			native.PutUint32(b, filter.ClassId)
 			nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b)
 		}
-	} else if bpf, ok := filter.(*BpfFilter); ok {
+	case *BpfFilter:
 		var bpfFlags uint32
-		if bpf.ClassId != 0 {
-			nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(bpf.ClassId))
+		if filter.ClassId != 0 {
+			nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId))
 		}
-		if bpf.Fd >= 0 {
-			nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(bpf.Fd))))
+		if filter.Fd >= 0 {
+			nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd))))
 		}
-		if bpf.Name != "" {
-			nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(bpf.Name))
+		if filter.Name != "" {
+			nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name))
 		}
-		if bpf.DirectAction {
+		if filter.DirectAction {
 			bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT
 		}
 		nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags))
diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go
index a04ceae..d37b087 100644
--- a/vendor/github.com/vishvananda/netlink/handle_linux.go
+++ b/vendor/github.com/vishvananda/netlink/handle_linux.go
@@ -45,12 +45,27 @@
 	}
 	tv := syscall.NsecToTimeval(to.Nanoseconds())
 	for _, sh := range h.sockets {
-		fd := sh.Socket.GetFd()
-		err := syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, &tv)
-		if err != nil {
+		if err := sh.Socket.SetSendTimeout(&tv); err != nil {
 			return err
 		}
-		err = syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, &tv)
+		if err := sh.Socket.SetReceiveTimeout(&tv); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// SetSocketReceiveBufferSize sets the receive buffer size for each
+// socket in the netlink handle. The maximum value is capped by
+// /proc/sys/net/core/rmem_max.
+func (h *Handle) SetSocketReceiveBufferSize(size int, force bool) error {
+	opt := syscall.SO_RCVBUF
+	if force {
+		opt = syscall.SO_RCVBUFFORCE
+	}
+	for _, sh := range h.sockets {
+		fd := sh.Socket.GetFd()
+		err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, opt, size)
 		if err != nil {
 			return err
 		}
@@ -58,6 +73,24 @@
 	return nil
 }
 
+// GetSocketReceiveBufferSize gets the receiver buffer size for each
+// socket in the netlink handle. The retrieved value should be the
+// double to the one set for SetSocketReceiveBufferSize.
+func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) {
+	results := make([]int, len(h.sockets))
+	i := 0
+	for _, sh := range h.sockets {
+		fd := sh.Socket.GetFd()
+		size, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF)
+		if err != nil {
+			return nil, err
+		}
+		results[i] = size
+		i++
+	}
+	return results, nil
+}
+
 // NewHandle returns a netlink handle on the network namespace
 // specified by ns. If ns=netns.None(), current network namespace
 // will be assumed
diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
index 32cf022..7da21a6 100644
--- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
@@ -145,6 +145,10 @@
 	return ErrNotImplemented
 }
 
+func (h *Handle) LinkSetTxQLen(link Link, qlen int) error {
+	return ErrNotImplemented
+}
+
 func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
 	return ErrNotImplemented
 }
diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go
index 547e92e..5aa3a17 100644
--- a/vendor/github.com/vishvananda/netlink/link.go
+++ b/vendor/github.com/vishvananda/netlink/link.go
@@ -37,6 +37,7 @@
 	EncapType    string
 	Protinfo     *Protinfo
 	OperState    LinkOperState
+	NetNsID      int
 }
 
 // LinkOperState represents the values of the IFLA_OPERSTATE link
@@ -171,6 +172,7 @@
 	Fd       int
 	Attached bool
 	Flags    uint32
+	ProgId   uint32
 }
 
 // Device links cannot be created via netlink. These links
@@ -339,6 +341,7 @@
 	UDPCSum      bool
 	NoAge        bool
 	GBP          bool
+	FlowBased    bool
 	Age          int
 	Limit        int
 	Port         int
@@ -684,6 +687,7 @@
 	EncapType  uint16
 	EncapFlags uint16
 	Link       uint32
+	FlowBased  bool
 }
 
 func (gretap *Gretap) Attrs() *LinkAttrs {
@@ -729,6 +733,28 @@
 	return "vti"
 }
 
+type Gretun struct {
+	LinkAttrs
+	Link     uint32
+	IFlags   uint16
+	OFlags   uint16
+	IKey     uint32
+	OKey     uint32
+	Local    net.IP
+	Remote   net.IP
+	Ttl      uint8
+	Tos      uint8
+	PMtuDisc uint8
+}
+
+func (gretun *Gretun) Attrs() *LinkAttrs {
+	return &gretun.LinkAttrs
+}
+
+func (gretun *Gretun) Type() string {
+	return "gre"
+}
+
 type Vrf struct {
 	LinkAttrs
 	Table uint32
diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go
index 1c1bc52..e94fd97 100644
--- a/vendor/github.com/vishvananda/netlink/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/link_linux.go
@@ -379,6 +379,74 @@
 	return err
 }
 
+// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf spoofchk $check`
+func LinkSetVfSpoofchk(link Link, vf int, check bool) error {
+	return pkgHandle.LinkSetVfSpoofchk(link, vf, check)
+}
+
+// LinkSetVfSpookfchk enables/disables spoof check on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf spoofchk $check`
+func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error {
+	var setting uint32
+	base := link.Attrs()
+	h.ensureIndex(base)
+	req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+	info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+	if check {
+		setting = 1
+	}
+	vfmsg := nl.VfSpoofchk{
+		Vf:      uint32(vf),
+		Setting: setting,
+	}
+	nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize())
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
+// LinkSetVfTrust enables/disables trust state on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf trust $state`
+func LinkSetVfTrust(link Link, vf int, state bool) error {
+	return pkgHandle.LinkSetVfTrust(link, vf, state)
+}
+
+// LinkSetVfTrust enables/disables trust state on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf trust $state`
+func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error {
+	var setting uint32
+	base := link.Attrs()
+	h.ensureIndex(base)
+	req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+	info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+	if state {
+		setting = 1
+	}
+	vfmsg := nl.VfTrust{
+		Vf:      uint32(vf),
+		Setting: setting,
+	}
+	nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize())
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
 // LinkSetMaster sets the master of the link device.
 // Equivalent to: `ip link set $link master $master`
 func LinkSetMaster(link Link, master *Bridge) error {
@@ -500,6 +568,12 @@
 // LinkSetXdpFd adds a bpf function to the driver. The fd must be a bpf
 // program loaded with bpf(type=BPF_PROG_TYPE_XDP)
 func LinkSetXdpFd(link Link, fd int) error {
+	return LinkSetXdpFdWithFlags(link, fd, 0)
+}
+
+// LinkSetXdpFdWithFlags adds a bpf function to the driver with the given
+// options. The fd must be a bpf program loaded with bpf(type=BPF_PROG_TYPE_XDP)
+func LinkSetXdpFdWithFlags(link Link, fd, flags int) error {
 	base := link.Attrs()
 	ensureIndex(base)
 	req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
@@ -508,7 +582,7 @@
 	msg.Index = int32(base.Index)
 	req.AddData(msg)
 
-	addXdpAttrs(&LinkXdp{Fd: fd}, req)
+	addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req)
 
 	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
 	return err
@@ -528,7 +602,13 @@
 
 func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
 	data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+	if vxlan.FlowBased {
+		vxlan.VxlanId = 0
+	}
+
 	nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+
 	if vxlan.VtepDevIndex != 0 {
 		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
 	}
@@ -569,6 +649,9 @@
 	if vxlan.GBP {
 		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, []byte{})
 	}
+	if vxlan.FlowBased {
+		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased))
+	}
 	if vxlan.NoAge {
 		nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
 	} else if vxlan.Age > 0 {
@@ -818,16 +901,17 @@
 	linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
 	nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
 
-	if vlan, ok := link.(*Vlan); ok {
+	switch link := link.(type) {
+	case *Vlan:
 		b := make([]byte, 2)
-		native.PutUint16(b, uint16(vlan.VlanId))
+		native.PutUint16(b, uint16(link.VlanId))
 		data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
 		nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
-	} else if veth, ok := link.(*Veth); ok {
+	case *Veth:
 		data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
 		peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
 		nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
-		nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
+		nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName))
 		if base.TxQLen >= 0 {
 			nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
 		}
@@ -835,35 +919,37 @@
 			nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
 		}
 
-	} else if vxlan, ok := link.(*Vxlan); ok {
-		addVxlanAttrs(vxlan, linkInfo)
-	} else if bond, ok := link.(*Bond); ok {
-		addBondAttrs(bond, linkInfo)
-	} else if ipv, ok := link.(*IPVlan); ok {
+	case *Vxlan:
+		addVxlanAttrs(link, linkInfo)
+	case *Bond:
+		addBondAttrs(link, linkInfo)
+	case *IPVlan:
 		data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
-		nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
-	} else if macv, ok := link.(*Macvlan); ok {
-		if macv.Mode != MACVLAN_MODE_DEFAULT {
+		nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode)))
+	case *Macvlan:
+		if link.Mode != MACVLAN_MODE_DEFAULT {
 			data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
-			nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+			nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
 		}
-	} else if macv, ok := link.(*Macvtap); ok {
-		if macv.Mode != MACVLAN_MODE_DEFAULT {
+	case *Macvtap:
+		if link.Mode != MACVLAN_MODE_DEFAULT {
 			data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
-			nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+			nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
 		}
-	} else if gretap, ok := link.(*Gretap); ok {
-		addGretapAttrs(gretap, linkInfo)
-	} else if iptun, ok := link.(*Iptun); ok {
-		addIptunAttrs(iptun, linkInfo)
-	} else if vti, ok := link.(*Vti); ok {
-		addVtiAttrs(vti, linkInfo)
-	} else if vrf, ok := link.(*Vrf); ok {
-		addVrfAttrs(vrf, linkInfo)
-	} else if bridge, ok := link.(*Bridge); ok {
-		addBridgeAttrs(bridge, linkInfo)
-	} else if gtp, ok := link.(*GTP); ok {
-		addGTPAttrs(gtp, linkInfo)
+	case *Gretap:
+		addGretapAttrs(link, linkInfo)
+	case *Iptun:
+		addIptunAttrs(link, linkInfo)
+	case *Gretun:
+		addGretunAttrs(link, linkInfo)
+	case *Vti:
+		addVtiAttrs(link, linkInfo)
+	case *Vrf:
+		addVrfAttrs(link, linkInfo)
+	case *Bridge:
+		addBridgeAttrs(link, linkInfo)
+	case *GTP:
+		addGTPAttrs(link, linkInfo)
 	}
 
 	req.AddData(linkInfo)
@@ -1093,6 +1179,8 @@
 						link = &Gretap{}
 					case "ipip":
 						link = &Iptun{}
+					case "gre":
+						link = &Gretun{}
 					case "vti":
 						link = &Vti{}
 					case "vrf":
@@ -1124,6 +1212,8 @@
 						parseGretapData(link, data)
 					case "ipip":
 						parseIptunData(link, data)
+					case "gre":
+						parseGretunData(link, data)
 					case "vti":
 						parseVtiData(link, data)
 					case "vrf":
@@ -1178,6 +1268,8 @@
 			}
 		case syscall.IFLA_OPERSTATE:
 			base.OperState = LinkOperState(uint8(attr.Value[0]))
+		case nl.IFLA_LINK_NETNSID:
+			base.NetNsID = int(native.Uint32(attr.Value[0:4]))
 		}
 	}
 
@@ -1239,16 +1331,34 @@
 // LinkSubscribe takes a chan down which notifications will be sent
 // when links change.  Close the 'done' chan to stop subscription.
 func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error {
-	return linkSubscribe(netns.None(), netns.None(), ch, done)
+	return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil)
 }
 
 // LinkSubscribeAt works like LinkSubscribe plus it allows the caller
 // to choose the network namespace in which to subscribe (ns).
 func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
-	return linkSubscribe(ns, netns.None(), ch, done)
+	return linkSubscribeAt(ns, netns.None(), ch, done, nil)
 }
 
-func linkSubscribe(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
+// LinkSubscribeOptions contains a set of options to use with
+// LinkSubscribeWithOptions.
+type LinkSubscribeOptions struct {
+	Namespace     *netns.NsHandle
+	ErrorCallback func(error)
+}
+
+// LinkSubscribeWithOptions work like LinkSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error {
+	if options.Namespace == nil {
+		none := netns.None()
+		options.Namespace = &none
+	}
+	return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)
+}
+
+func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error {
 	s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK)
 	if err != nil {
 		return err
@@ -1264,12 +1374,18 @@
 		for {
 			msgs, err := s.Receive()
 			if err != nil {
+				if cberr != nil {
+					cberr(err)
+				}
 				return
 			}
 			for _, m := range msgs {
 				ifmsg := nl.DeserializeIfInfomsg(m.Data)
 				link, err := LinkDeserialize(&m.Header, m.Data)
 				if err != nil {
+					if cberr != nil {
+						cberr(err)
+					}
 					return
 				}
 				ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link}
@@ -1363,6 +1479,33 @@
 	return nil
 }
 
+// LinkSetTxQLen sets the transaction queue length for the link.
+// Equivalent to: `ip link set $link txqlen $qlen`
+func LinkSetTxQLen(link Link, qlen int) error {
+	return pkgHandle.LinkSetTxQLen(link, qlen)
+}
+
+// LinkSetTxQLen sets the transaction queue length for the link.
+// Equivalent to: `ip link set $link txqlen $qlen`
+func (h *Handle) LinkSetTxQLen(link Link, qlen int) error {
+	base := link.Attrs()
+	h.ensureIndex(base)
+	req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+	msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+	msg.Index = int32(base.Index)
+	req.AddData(msg)
+
+	b := make([]byte, 4)
+	native.PutUint32(b, uint32(qlen))
+
+	data := nl.NewRtAttr(syscall.IFLA_TXQLEN, b)
+	req.AddData(data)
+
+	_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+	return err
+}
+
 func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
 	vlan := link.(*Vlan)
 	for _, datum := range data {
@@ -1407,6 +1550,8 @@
 			vxlan.UDPCSum = int8(datum.Value[0]) != 0
 		case nl.IFLA_VXLAN_GBP:
 			vxlan.GBP = true
+		case nl.IFLA_VXLAN_FLOWBASED:
+			vxlan.FlowBased = int8(datum.Value[0]) != 0
 		case nl.IFLA_VXLAN_AGEING:
 			vxlan.Age = int(native.Uint32(datum.Value[0:4]))
 			vxlan.NoAge = vxlan.Age == 0
@@ -1547,6 +1692,12 @@
 func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) {
 	data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
 
+	if gretap.FlowBased {
+		// In flow based mode, no other attributes need to be configured
+		nl.NewRtAttrChild(data, nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased))
+		return
+	}
+
 	ip := gretap.Local.To4()
 	if ip != nil {
 		nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
@@ -1613,6 +1764,69 @@
 			gre.EncapType = native.Uint16(datum.Value[0:2])
 		case nl.IFLA_GRE_ENCAP_FLAGS:
 			gre.EncapFlags = native.Uint16(datum.Value[0:2])
+		case nl.IFLA_GRE_COLLECT_METADATA:
+			gre.FlowBased = int8(datum.Value[0]) != 0
+		}
+	}
+}
+
+func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) {
+	data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+	ip := gre.Local.To4()
+	if ip != nil {
+		nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
+	}
+	ip = gre.Remote.To4()
+	if ip != nil {
+		nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip))
+	}
+
+	if gre.IKey != 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gre.IKey))
+		gre.IFlags |= uint16(nl.GRE_KEY)
+	}
+
+	if gre.OKey != 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gre.OKey))
+		gre.OFlags |= uint16(nl.GRE_KEY)
+	}
+
+	nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gre.IFlags))
+	nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gre.OFlags))
+
+	if gre.Link != 0 {
+		nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link))
+	}
+
+	nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc))
+	nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl))
+	nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos))
+}
+
+func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) {
+	gre := link.(*Gretun)
+	for _, datum := range data {
+		switch datum.Attr.Type {
+		case nl.IFLA_GRE_OKEY:
+			gre.IKey = ntohl(datum.Value[0:4])
+		case nl.IFLA_GRE_IKEY:
+			gre.OKey = ntohl(datum.Value[0:4])
+		case nl.IFLA_GRE_LOCAL:
+			gre.Local = net.IP(datum.Value[0:4])
+		case nl.IFLA_GRE_REMOTE:
+			gre.Remote = net.IP(datum.Value[0:4])
+		case nl.IFLA_GRE_IFLAGS:
+			gre.IFlags = ntohs(datum.Value[0:2])
+		case nl.IFLA_GRE_OFLAGS:
+			gre.OFlags = ntohs(datum.Value[0:2])
+
+		case nl.IFLA_GRE_TTL:
+			gre.Ttl = uint8(datum.Value[0])
+		case nl.IFLA_GRE_TOS:
+			gre.Tos = uint8(datum.Value[0])
+		case nl.IFLA_GRE_PMTUDISC:
+			gre.PMtuDisc = uint8(datum.Value[0])
 		}
 	}
 }
@@ -1630,8 +1844,10 @@
 	b := make([]byte, 4)
 	native.PutUint32(b, uint32(xdp.Fd))
 	nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b)
-	native.PutUint32(b, xdp.Flags)
-	nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b)
+	if xdp.Flags != 0 {
+		native.PutUint32(b, xdp.Flags)
+		nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b)
+	}
 	req.AddData(attrs)
 }
 
@@ -1649,6 +1865,8 @@
 			xdp.Attached = attr.Value[0] != 0
 		case nl.IFLA_XDP_FLAGS:
 			xdp.Flags = native.Uint32(attr.Value[0:4])
+		case nl.IFLA_XDP_PROG_ID:
+			xdp.ProgId = native.Uint32(attr.Value[0:4])
 		}
 	}
 	return xdp, nil
diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go
index 0e5eb90..6a6f71c 100644
--- a/vendor/github.com/vishvananda/netlink/neigh.go
+++ b/vendor/github.com/vishvananda/netlink/neigh.go
@@ -14,6 +14,7 @@
 	Flags        int
 	IP           net.IP
 	HardwareAddr net.HardwareAddr
+	LLIPAddr     net.IP //Used in the case of NHRP
 }
 
 // String returns $ip/$hwaddr $label
diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go
index f069db2..5edc8b4 100644
--- a/vendor/github.com/vishvananda/netlink/neigh_linux.go
+++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go
@@ -128,6 +128,7 @@
 
 func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
 	var family int
+
 	if neigh.Family > 0 {
 		family = neigh.Family
 	} else {
@@ -151,7 +152,10 @@
 	dstData := nl.NewRtAttr(NDA_DST, ipData)
 	req.AddData(dstData)
 
-	if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil {
+	if neigh.LLIPAddr != nil {
+		llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4())
+		req.AddData(llIPData)
+	} else if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil {
 		hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr))
 		req.AddData(hwData)
 	}
@@ -237,12 +241,33 @@
 		return nil, err
 	}
 
+	// This should be cached for perfomance
+	// once per table dump
+	link, err := LinkByIndex(neigh.LinkIndex)
+	if err != nil {
+		return nil, err
+	}
+	encapType := link.Attrs().EncapType
+
 	for _, attr := range attrs {
 		switch attr.Attr.Type {
 		case NDA_DST:
 			neigh.IP = net.IP(attr.Value)
 		case NDA_LLADDR:
-			neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+			// BUG: Is this a bug in the netlink library?
+			// #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len))
+			// #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0))
+			attrLen := attr.Attr.Len - syscall.SizeofRtAttr
+			if attrLen == 4 && (encapType == "ipip" ||
+				encapType == "sit" ||
+				encapType == "gre") {
+				neigh.LLIPAddr = net.IP(attr.Value)
+			} else if attrLen == 16 &&
+				encapType == "tunnel6" {
+				neigh.IP = net.IP(attr.Value)
+			} else {
+				neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+			}
 		}
 	}
 
diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
index 2d57c16..86111b9 100644
--- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -108,6 +108,10 @@
 	return ErrNotImplemented
 }
 
+func LinkSetTxQLen(link Link, qlen int) error {
+	return ErrNotImplemented
+}
+
 func LinkAdd(link Link) error {
 	return ErrNotImplemented
 }
diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
new file mode 100644
index 0000000..6c0d333
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
@@ -0,0 +1,74 @@
+package nl
+
+import (
+	"fmt"
+	"unsafe"
+)
+
+const (
+	SizeofBridgeVlanInfo = 0x04
+)
+
+/* Bridge Flags */
+const (
+	BRIDGE_FLAGS_MASTER = iota /* Bridge command to/from master */
+	BRIDGE_FLAGS_SELF          /* Bridge command to/from lowerdev */
+)
+
+/* Bridge management nested attributes
+ * [IFLA_AF_SPEC] = {
+ *     [IFLA_BRIDGE_FLAGS]
+ *     [IFLA_BRIDGE_MODE]
+ *     [IFLA_BRIDGE_VLAN_INFO]
+ * }
+ */
+const (
+	IFLA_BRIDGE_FLAGS = iota
+	IFLA_BRIDGE_MODE
+	IFLA_BRIDGE_VLAN_INFO
+)
+
+const (
+	BRIDGE_VLAN_INFO_MASTER = 1 << iota
+	BRIDGE_VLAN_INFO_PVID
+	BRIDGE_VLAN_INFO_UNTAGGED
+	BRIDGE_VLAN_INFO_RANGE_BEGIN
+	BRIDGE_VLAN_INFO_RANGE_END
+)
+
+// struct bridge_vlan_info {
+//   __u16 flags;
+//   __u16 vid;
+// };
+
+type BridgeVlanInfo struct {
+	Flags uint16
+	Vid   uint16
+}
+
+func (b *BridgeVlanInfo) Serialize() []byte {
+	return (*(*[SizeofBridgeVlanInfo]byte)(unsafe.Pointer(b)))[:]
+}
+
+func DeserializeBridgeVlanInfo(b []byte) *BridgeVlanInfo {
+	return (*BridgeVlanInfo)(unsafe.Pointer(&b[0:SizeofBridgeVlanInfo][0]))
+}
+
+func (b *BridgeVlanInfo) PortVID() bool {
+	return b.Flags&BRIDGE_VLAN_INFO_PVID > 0
+}
+
+func (b *BridgeVlanInfo) EngressUntag() bool {
+	return b.Flags&BRIDGE_VLAN_INFO_UNTAGGED > 0
+}
+
+func (b *BridgeVlanInfo) String() string {
+	return fmt.Sprintf("%+v", *b)
+}
+
+/* New extended info filters for IFLA_EXT_MASK */
+const (
+	RTEXT_FILTER_VF = 1 << iota
+	RTEXT_FILTER_BRVLAN
+	RTEXT_FILTER_BRVLAN_COMPRESSED
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
index 6692b53..380cc59 100644
--- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
@@ -79,8 +79,8 @@
 	CTA_TUPLE_ORIG  = 1
 	CTA_TUPLE_REPLY = 2
 	CTA_STATUS      = 3
-	CTA_TIMEOUT     = 8
-	CTA_MARK        = 9
+	CTA_TIMEOUT     = 7
+	CTA_MARK        = 8
 	CTA_PROTOINFO   = 4
 )
 
diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
index f7b9575..9ae65a1 100644
--- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
@@ -231,7 +231,8 @@
 	 * on/off switch
 	 */
 	IFLA_VF_STATS /* network device statistics */
-	IFLA_VF_MAX   = IFLA_VF_STATS
+	IFLA_VF_TRUST /* Trust state of VF */
+	IFLA_VF_MAX   = IFLA_VF_TRUST
 )
 
 const (
@@ -259,6 +260,7 @@
 	SizeofVfSpoofchk   = 0x08
 	SizeofVfLinkState  = 0x08
 	SizeofVfRssQueryEn = 0x08
+	SizeofVfTrust      = 0x08
 )
 
 // struct ifla_vf_mac {
@@ -419,12 +421,42 @@
 	return (*(*[SizeofVfRssQueryEn]byte)(unsafe.Pointer(msg)))[:]
 }
 
+// struct ifla_vf_trust {
+//   __u32 vf;
+//   __u32 setting;
+// };
+
+type VfTrust struct {
+	Vf      uint32
+	Setting uint32
+}
+
+func (msg *VfTrust) Len() int {
+	return SizeofVfTrust
+}
+
+func DeserializeVfTrust(b []byte) *VfTrust {
+	return (*VfTrust)(unsafe.Pointer(&b[0:SizeofVfTrust][0]))
+}
+
+func (msg *VfTrust) Serialize() []byte {
+	return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:]
+}
+
+const (
+	XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota
+	XDP_FLAGS_SKB_MODE
+	XDP_FLAGS_DRV_MODE
+	XDP_FLAGS_MASK = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE
+)
+
 const (
 	IFLA_XDP_UNSPEC   = iota
 	IFLA_XDP_FD       /* fd of xdp program to attach, or -1 to remove */
 	IFLA_XDP_ATTACHED /* read-only bool indicating if prog is attached */
 	IFLA_XDP_FLAGS    /* xdp prog related flags */
-	IFLA_XDP_MAX      = IFLA_XDP_FLAGS
+	IFLA_XDP_PROG_ID  /* xdp prog id */
+	IFLA_XDP_MAX      = IFLA_XDP_PROG_ID
 )
 
 const (
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
index 1329acd..72f7f6a 100644
--- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -621,6 +621,20 @@
 	return syscall.ParseNetlinkMessage(rb)
 }
 
+// SetSendTimeout allows to set a send timeout on the socket
+func (s *NetlinkSocket) SetSendTimeout(timeout *syscall.Timeval) error {
+	// Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine
+	// remains stuck on a send on a closed fd
+	return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, timeout)
+}
+
+// SetReceiveTimeout allows to set a receive timeout on the socket
+func (s *NetlinkSocket) SetReceiveTimeout(timeout *syscall.Timeval) error {
+	// Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine
+	// remains stuck on a recvmsg on a closed fd
+	return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, timeout)
+}
+
 func (s *NetlinkSocket) GetPid() (uint32, error) {
 	fd := int(atomic.LoadInt32(&s.fd))
 	lsa, err := syscall.Getsockname(fd)
diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
index 2c0dedd..1123396 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
@@ -160,71 +160,73 @@
 	req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type())))
 
 	options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
-	if prio, ok := qdisc.(*Prio); ok {
+
+	switch qdisc := qdisc.(type) {
+	case *Prio:
 		tcmap := nl.TcPrioMap{
-			Bands:   int32(prio.Bands),
-			Priomap: prio.PriorityMap,
+			Bands:   int32(qdisc.Bands),
+			Priomap: qdisc.PriorityMap,
 		}
 		options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize())
-	} else if tbf, ok := qdisc.(*Tbf); ok {
+	case *Tbf:
 		opt := nl.TcTbfQopt{}
-		opt.Rate.Rate = uint32(tbf.Rate)
-		opt.Peakrate.Rate = uint32(tbf.Peakrate)
-		opt.Limit = tbf.Limit
-		opt.Buffer = tbf.Buffer
+		opt.Rate.Rate = uint32(qdisc.Rate)
+		opt.Peakrate.Rate = uint32(qdisc.Peakrate)
+		opt.Limit = qdisc.Limit
+		opt.Buffer = qdisc.Buffer
 		nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize())
-		if tbf.Rate >= uint64(1<<32) {
-			nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(tbf.Rate))
+		if qdisc.Rate >= uint64(1<<32) {
+			nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate))
 		}
-		if tbf.Peakrate >= uint64(1<<32) {
-			nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(tbf.Peakrate))
+		if qdisc.Peakrate >= uint64(1<<32) {
+			nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate))
 		}
-		if tbf.Peakrate > 0 {
-			nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(tbf.Minburst))
+		if qdisc.Peakrate > 0 {
+			nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst))
 		}
-	} else if htb, ok := qdisc.(*Htb); ok {
+	case *Htb:
 		opt := nl.TcHtbGlob{}
-		opt.Version = htb.Version
-		opt.Rate2Quantum = htb.Rate2Quantum
-		opt.Defcls = htb.Defcls
+		opt.Version = qdisc.Version
+		opt.Rate2Quantum = qdisc.Rate2Quantum
+		opt.Defcls = qdisc.Defcls
 		// TODO: Handle Debug properly. For now default to 0
-		opt.Debug = htb.Debug
-		opt.DirectPkts = htb.DirectPkts
+		opt.Debug = qdisc.Debug
+		opt.DirectPkts = qdisc.DirectPkts
 		nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize())
 		// nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize())
-	} else if netem, ok := qdisc.(*Netem); ok {
+	case *Netem:
 		opt := nl.TcNetemQopt{}
-		opt.Latency = netem.Latency
-		opt.Limit = netem.Limit
-		opt.Loss = netem.Loss
-		opt.Gap = netem.Gap
-		opt.Duplicate = netem.Duplicate
-		opt.Jitter = netem.Jitter
+		opt.Latency = qdisc.Latency
+		opt.Limit = qdisc.Limit
+		opt.Loss = qdisc.Loss
+		opt.Gap = qdisc.Gap
+		opt.Duplicate = qdisc.Duplicate
+		opt.Jitter = qdisc.Jitter
 		options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize())
 		// Correlation
 		corr := nl.TcNetemCorr{}
-		corr.DelayCorr = netem.DelayCorr
-		corr.LossCorr = netem.LossCorr
-		corr.DupCorr = netem.DuplicateCorr
+		corr.DelayCorr = qdisc.DelayCorr
+		corr.LossCorr = qdisc.LossCorr
+		corr.DupCorr = qdisc.DuplicateCorr
 
 		if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 {
 			nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize())
 		}
 		// Corruption
 		corruption := nl.TcNetemCorrupt{}
-		corruption.Probability = netem.CorruptProb
-		corruption.Correlation = netem.CorruptCorr
+		corruption.Probability = qdisc.CorruptProb
+		corruption.Correlation = qdisc.CorruptCorr
 		if corruption.Probability > 0 {
 			nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize())
 		}
 		// Reorder
 		reorder := nl.TcNetemReorder{}
-		reorder.Probability = netem.ReorderProb
-		reorder.Correlation = netem.ReorderCorr
+		reorder.Probability = qdisc.ReorderProb
+		reorder.Correlation = qdisc.ReorderCorr
 		if reorder.Probability > 0 {
 			nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize())
 		}
-	} else if _, ok := qdisc.(*Ingress); ok {
+	case *Ingress:
 		// ingress filters must use the proper handle
 		if qdisc.Attrs().Parent != HANDLE_INGRESS {
 			return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS")
diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go
index 03ac4b2..68c6a22 100644
--- a/vendor/github.com/vishvananda/netlink/route.go
+++ b/vendor/github.com/vishvananda/netlink/route.go
@@ -16,6 +16,7 @@
 	Decode([]byte) error
 	Encode() ([]byte, error)
 	String() string
+	Equal(Destination) bool
 }
 
 type Encap interface {
@@ -23,6 +24,7 @@
 	Decode([]byte) error
 	Encode() ([]byte, error)
 	String() string
+	Equal(Encap) bool
 }
 
 // Route represents a netlink route.
@@ -72,6 +74,25 @@
 	return fmt.Sprintf("{%s}", strings.Join(elems, " "))
 }
 
+func (r Route) Equal(x Route) bool {
+	return r.LinkIndex == x.LinkIndex &&
+		r.ILinkIndex == x.ILinkIndex &&
+		r.Scope == x.Scope &&
+		ipNetEqual(r.Dst, x.Dst) &&
+		r.Src.Equal(x.Src) &&
+		r.Gw.Equal(x.Gw) &&
+		nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) &&
+		r.Protocol == x.Protocol &&
+		r.Priority == x.Priority &&
+		r.Table == x.Table &&
+		r.Type == x.Type &&
+		r.Tos == x.Tos &&
+		r.Flags == x.Flags &&
+		(r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) &&
+		(r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) &&
+		(r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap)))
+}
+
 func (r *Route) SetFlag(flag NextHopFlag) {
 	r.Flags |= int(flag)
 }
@@ -110,7 +131,46 @@
 		elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap))
 	}
 	elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1))
-	elems = append(elems, fmt.Sprintf("Gw: %d", n.Gw))
+	elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw))
 	elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags()))
 	return fmt.Sprintf("{%s}", strings.Join(elems, " "))
 }
+
+func (n NexthopInfo) Equal(x NexthopInfo) bool {
+	return n.LinkIndex == x.LinkIndex &&
+		n.Hops == x.Hops &&
+		n.Gw.Equal(x.Gw) &&
+		n.Flags == x.Flags &&
+		(n.NewDst == x.NewDst || (n.NewDst != nil && n.NewDst.Equal(x.NewDst))) &&
+		(n.Encap == x.Encap || (n.Encap != nil && n.Encap.Equal(x.Encap)))
+}
+
+type nexthopInfoSlice []*NexthopInfo
+
+func (n nexthopInfoSlice) Equal(x []*NexthopInfo) bool {
+	if len(n) != len(x) {
+		return false
+	}
+	for i := range n {
+		if n[i] == nil || x[i] == nil {
+			return false
+		}
+		if !n[i].Equal(*x[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// ipNetEqual returns true iff both IPNet are equal
+func ipNetEqual(ipn1 *net.IPNet, ipn2 *net.IPNet) bool {
+	if ipn1 == ipn2 {
+		return true
+	}
+	if ipn1 == nil || ipn2 == nil {
+		return false
+	}
+	m1, _ := ipn1.Mask.Size()
+	m2, _ := ipn2.Mask.Size()
+	return m1 == m2 && ipn1.IP.Equal(ipn2.IP)
+}
diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go
index cd739e7..9234c69 100644
--- a/vendor/github.com/vishvananda/netlink/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/route_linux.go
@@ -86,6 +86,34 @@
 	return strings.Join(s, "/")
 }
 
+func (d *MPLSDestination) Equal(x Destination) bool {
+	o, ok := x.(*MPLSDestination)
+	if !ok {
+		return false
+	}
+	if d == nil && o == nil {
+		return true
+	}
+	if d == nil || o == nil {
+		return false
+	}
+	if d.Labels == nil && o.Labels == nil {
+		return true
+	}
+	if d.Labels == nil || o.Labels == nil {
+		return false
+	}
+	if len(d.Labels) != len(o.Labels) {
+		return false
+	}
+	for i := range d.Labels {
+		if d.Labels[i] != o.Labels[i] {
+			return false
+		}
+	}
+	return true
+}
+
 type MPLSEncap struct {
 	Labels []int
 }
@@ -129,6 +157,34 @@
 	return strings.Join(s, "/")
 }
 
+func (e *MPLSEncap) Equal(x Encap) bool {
+	o, ok := x.(*MPLSEncap)
+	if !ok {
+		return false
+	}
+	if e == nil && o == nil {
+		return true
+	}
+	if e == nil || o == nil {
+		return false
+	}
+	if e.Labels == nil && o.Labels == nil {
+		return true
+	}
+	if e.Labels == nil || o.Labels == nil {
+		return false
+	}
+	if len(e.Labels) != len(o.Labels) {
+		return false
+	}
+	for i := range e.Labels {
+		if e.Labels[i] != o.Labels[i] {
+			return false
+		}
+	}
+	return true
+}
+
 // RouteAdd will add a route to the system.
 // Equivalent to: `ip route add $route`
 func RouteAdd(route *Route) error {
@@ -421,19 +477,8 @@
 				continue
 			case filterMask&RT_FILTER_DST != 0:
 				if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) {
-					if filter.Dst == nil {
-						if route.Dst != nil {
-							continue
-						}
-					} else {
-						if route.Dst == nil {
-							continue
-						}
-						aMaskLen, aMaskBits := route.Dst.Mask.Size()
-						bMaskLen, bMaskBits := filter.Dst.Mask.Size()
-						if !(route.Dst.IP.Equal(filter.Dst.IP) && aMaskLen == bMaskLen && aMaskBits == bMaskBits) {
-							continue
-						}
+					if !ipNetEqual(route.Dst, filter.Dst) {
+						continue
 					}
 				}
 			}
@@ -633,16 +678,34 @@
 // RouteSubscribe takes a chan down which notifications will be sent
 // when routes are added or deleted. Close the 'done' chan to stop subscription.
 func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error {
-	return routeSubscribeAt(netns.None(), netns.None(), ch, done)
+	return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil)
 }
 
 // RouteSubscribeAt works like RouteSubscribe plus it allows the caller
 // to choose the network namespace in which to subscribe (ns).
 func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
-	return routeSubscribeAt(ns, netns.None(), ch, done)
+	return routeSubscribeAt(ns, netns.None(), ch, done, nil)
 }
 
-func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
+// RouteSubscribeOptions contains a set of options to use with
+// RouteSubscribeWithOptions.
+type RouteSubscribeOptions struct {
+	Namespace     *netns.NsHandle
+	ErrorCallback func(error)
+}
+
+// RouteSubscribeWithOptions work like RouteSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error {
+	if options.Namespace == nil {
+		none := netns.None()
+		options.Namespace = &none
+	}
+	return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)
+}
+
+func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error {
 	s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE)
 	if err != nil {
 		return err
@@ -658,11 +721,17 @@
 		for {
 			msgs, err := s.Receive()
 			if err != nil {
+				if cberr != nil {
+					cberr(err)
+				}
 				return
 			}
 			for _, m := range msgs {
 				route, err := deserializeRoute(m.Data)
 				if err != nil {
+					if cberr != nil {
+						cberr(err)
+					}
 					return
 				}
 				ch <- RouteUpdate{Type: m.Header.Type, Route: route}
diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go
index f0243de..e4d9168 100644
--- a/vendor/github.com/vishvananda/netlink/rule.go
+++ b/vendor/github.com/vishvananda/netlink/rule.go
@@ -8,6 +8,7 @@
 // Rule represents a netlink rule.
 type Rule struct {
 	Priority          int
+	Family            int
 	Table             int
 	Mark              int
 	Mask              int
diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go
index f9cdc85..cbd91a5 100644
--- a/vendor/github.com/vishvananda/netlink/rule_linux.go
+++ b/vendor/github.com/vishvananda/netlink/rule_linux.go
@@ -37,6 +37,9 @@
 func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
 	msg := nl.NewRtMsg()
 	msg.Family = syscall.AF_INET
+	if rule.Family != 0 {
+		msg.Family = uint8(rule.Family)
+	}
 	var dstFamily uint8
 
 	var rtAttrs []*nl.RtAttr
diff --git a/volume/local/local.go b/volume/local/local.go
index b37c45e..c11a18c 100644
--- a/volume/local/local.go
+++ b/volume/local/local.go
@@ -139,16 +139,6 @@
 	return volume.DefaultDriverName
 }
 
-type alreadyExistsError struct {
-	path string
-}
-
-func (e alreadyExistsError) Error() string {
-	return "local volume already exists under " + e.path
-}
-
-func (e alreadyExistsError) Conflict() {}
-
 type systemError struct {
 	err error
 }
@@ -181,9 +171,6 @@
 
 	path := r.DataPath(name)
 	if err := idtools.MkdirAllAndChown(path, 0755, r.rootIDs); err != nil {
-		if os.IsExist(err) {
-			return nil, alreadyExistsError{filepath.Dir(path)}
-		}
 		return nil, errors.Wrapf(systemError{err}, "error while creating volume path '%s'", path)
 	}
 
diff --git a/volume/volume.go b/volume/volume.go
index b8ec1e5..207dc56 100644
--- a/volume/volume.go
+++ b/volume/volume.go
@@ -192,7 +192,6 @@
 		return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
 	}
 
-	// system.MkdirAll() produces an error if m.Source exists and is a file (not a directory),
 	if m.Type == mounttypes.TypeBind {
 		// Before creating the source directory on the host, invoke checkFun if it's not nil. One of
 		// the use case is to forbid creating the daemon socket as a directory if the daemon is in