Merge pull request #23594 from tiborvass/changelog-fix

v1.12.0: some fixes to CHANGELOG
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1e7a093..3f9d0e3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -243,7 +243,7 @@
 
 ### Misc
 
-+ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/c))
++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385))
 + Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325))
 + Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270))
 * The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266))
diff --git a/Dockerfile b/Dockerfile
index ab58e37..8c80b9a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -233,10 +233,10 @@
 	&& rm -rf "$GOPATH"
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+	&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64
index 2333d47..564d898 100644
--- a/Dockerfile.aarch64
+++ b/Dockerfile.aarch64
@@ -180,10 +180,10 @@
 	&& rm -rf "$GOPATH"
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+	&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
index 08b4d31..291915c 100644
--- a/Dockerfile.armhf
+++ b/Dockerfile.armhf
@@ -189,10 +189,10 @@
 	&& rm -rf "$GOPATH"
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+	&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
diff --git a/Dockerfile.gccgo b/Dockerfile.gccgo
index 03e8a2a..dc7780e 100644
--- a/Dockerfile.gccgo
+++ b/Dockerfile.gccgo
@@ -74,10 +74,10 @@
 ENV DOCKER_BUILDTAGS apparmor seccomp selinux
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-    && git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+    && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le
index 4ceed03..357f927 100644
--- a/Dockerfile.ppc64le
+++ b/Dockerfile.ppc64le
@@ -204,10 +204,10 @@
 	&& rm -rf "$GOPATH"
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+	&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="apparmor seccomp selinux" \
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
index ebb72c9..eab490c 100644
--- a/Dockerfile.s390x
+++ b/Dockerfile.s390x
@@ -161,7 +161,7 @@
 
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor selinux
+ENV DOCKER_BUILDTAGS apparmor selinux seccomp
 
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
@@ -197,10 +197,10 @@
 	&& rm -rf "$GOPATH"
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+	&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
diff --git a/Dockerfile.simple b/Dockerfile.simple
index 00d53bd..9e11bb1 100644
--- a/Dockerfile.simple
+++ b/Dockerfile.simple
@@ -57,10 +57,10 @@
 ENV CGO_LDFLAGS -L/lib
 
 # Install runc
-ENV RUNC_COMMIT 5ce88a95f6cf218ba7f3309562f95464a968e890
+ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
-	&& git clone https://github.com/crosbymichael/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
+	&& git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \
 	&& cd "$GOPATH/src/github.com/opencontainers/runc" \
 	&& git checkout -q "$RUNC_COMMIT" \
 	&& make static BUILDTAGS="seccomp apparmor selinux" \
diff --git a/api/client/bundlefile/bundlefile.go b/api/client/bundlefile/bundlefile.go
index 0e31772..75c2d07 100644
--- a/api/client/bundlefile/bundlefile.go
+++ b/api/client/bundlefile/bundlefile.go
@@ -4,8 +4,8 @@
 
 import (
 	"encoding/json"
+	"fmt"
 	"io"
-	"os"
 )
 
 // Bundlefile stores the contents of a bundlefile
@@ -34,19 +34,28 @@
 }
 
 // LoadFile loads a bundlefile from a path to the file
-func LoadFile(path string) (*Bundlefile, error) {
-	reader, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-
+func LoadFile(reader io.Reader) (*Bundlefile, error) {
 	bundlefile := &Bundlefile{}
 
-	if err := json.NewDecoder(reader).Decode(bundlefile); err != nil {
+	decoder := json.NewDecoder(reader)
+	if err := decoder.Decode(bundlefile); err != nil {
+		switch jsonErr := err.(type) {
+		case *json.SyntaxError:
+			return nil, fmt.Errorf(
+				"JSON syntax error at byte %v: %s",
+				jsonErr.Offset,
+				jsonErr.Error())
+		case *json.UnmarshalTypeError:
+			return nil, fmt.Errorf(
+				"Unexpected type at byte %v. Expected %s but received %s.",
+				jsonErr.Offset,
+				jsonErr.Type,
+				jsonErr.Value)
+		}
 		return nil, err
 	}
 
-	return bundlefile, err
+	return bundlefile, nil
 }
 
 // Print writes the contents of the bundlefile to the output writer
diff --git a/api/client/bundlefile/bundlefile_test.go b/api/client/bundlefile/bundlefile_test.go
new file mode 100644
index 0000000..1ff8235
--- /dev/null
+++ b/api/client/bundlefile/bundlefile_test.go
@@ -0,0 +1,79 @@
+// +build experimental
+
+package bundlefile
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/pkg/testutil/assert"
+)
+
+func TestLoadFileV01Success(t *testing.T) {
+	reader := strings.NewReader(`{
+		"Version": "0.1",
+		"Services": {
+			"redis": {
+				"Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce",
+				"Networks": ["default"]
+			},
+			"web": {
+				"Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d",
+				"Networks": ["default"],
+				"User": "web"
+			}
+		}
+	}`)
+
+	bundle, err := LoadFile(reader)
+	assert.NilError(t, err)
+	assert.Equal(t, bundle.Version, "0.1")
+	assert.Equal(t, len(bundle.Services), 2)
+}
+
+func TestLoadFileSyntaxError(t *testing.T) {
+	reader := strings.NewReader(`{
+		"Version": "0.1",
+		"Services": unquoted string
+	}`)
+
+	_, err := LoadFile(reader)
+	assert.Error(t, err, "syntax error at byte 37: invalid character 'u'")
+}
+
+func TestLoadFileTypeError(t *testing.T) {
+	reader := strings.NewReader(`{
+		"Version": "0.1",
+		"Services": {
+			"web": {
+				"Image": "redis",
+				"Networks": "none"
+			}
+		}
+	}`)
+
+	_, err := LoadFile(reader)
+	assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string")
+}
+
+func TestPrint(t *testing.T) {
+	var buffer bytes.Buffer
+	bundle := &Bundlefile{
+		Version: "0.1",
+		Services: map[string]Service{
+			"web": {
+				Image:   "image",
+				Command: []string{"echo", "something"},
+			},
+		},
+	}
+	assert.NilError(t, Print(&buffer, bundle))
+	output := buffer.String()
+	assert.Contains(t, output, "\"Image\": \"image\"")
+	assert.Contains(t, output,
+		`"Command": [
+                "echo",
+                "something"
+            ]`)
+}
diff --git a/api/client/container/restart.go b/api/client/container/restart.go
index 98618b5..04544e4 100644
--- a/api/client/container/restart.go
+++ b/api/client/container/restart.go
@@ -41,7 +41,8 @@
 	ctx := context.Background()
 	var errs []string
 	for _, name := range opts.containers {
-		if err := dockerCli.Client().ContainerRestart(ctx, name, time.Duration(opts.nSeconds)*time.Second); err != nil {
+		timeout := time.Duration(opts.nSeconds) * time.Second
+		if err := dockerCli.Client().ContainerRestart(ctx, name, &timeout); err != nil {
 			errs = append(errs, err.Error())
 		} else {
 			fmt.Fprintf(dockerCli.Out(), "%s\n", name)
diff --git a/api/client/container/stop.go b/api/client/container/stop.go
index 40b02b7..b05b0e3 100644
--- a/api/client/container/stop.go
+++ b/api/client/container/stop.go
@@ -43,7 +43,8 @@
 
 	var errs []string
 	for _, container := range opts.containers {
-		if err := dockerCli.Client().ContainerStop(ctx, container, time.Duration(opts.time)*time.Second); err != nil {
+		timeout := time.Duration(opts.time) * time.Second
+		if err := dockerCli.Client().ContainerStop(ctx, container, &timeout); err != nil {
 			errs = append(errs, err.Error())
 		} else {
 			fmt.Fprintf(dockerCli.Out(), "%s\n", container)
diff --git a/api/client/formatter/formatter.go b/api/client/formatter/formatter.go
index a52ec8e..1e250a2 100644
--- a/api/client/formatter/formatter.go
+++ b/api/client/formatter/formatter.go
@@ -155,6 +155,10 @@
 	ctx.postformat(tmpl, &containerContext{})
 }
 
+func isDangling(image types.Image) bool {
+	return len(image.RepoTags) == 1 && image.RepoTags[0] == "<none>:<none>" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "<none>@<none>"
+}
+
 func (ctx ImageContext) Write() {
 	switch ctx.Format {
 	case tableFormatKey:
@@ -200,42 +204,98 @@
 	}
 
 	for _, image := range ctx.Images {
+		images := []*imageContext{}
+		if isDangling(image) {
+			images = append(images, &imageContext{
+				trunc:  ctx.Trunc,
+				i:      image,
+				repo:   "<none>",
+				tag:    "<none>",
+				digest: "<none>",
+			})
+		} else {
+			repoTags := map[string][]string{}
+			repoDigests := map[string][]string{}
 
-		repoTags := image.RepoTags
-		repoDigests := image.RepoDigests
-
-		if len(repoTags) == 1 && repoTags[0] == "<none>:<none>" && len(repoDigests) == 1 && repoDigests[0] == "<none>@<none>" {
-			// dangling image - clear out either repoTags or repoDigests so we only show it once below
-			repoDigests = []string{}
-		}
-		// combine the tags and digests lists
-		tagsAndDigests := append(repoTags, repoDigests...)
-		for _, repoAndRef := range tagsAndDigests {
-			repo := "<none>"
-			tag := "<none>"
-			digest := "<none>"
-
-			if !strings.HasPrefix(repoAndRef, "<none>") {
-				ref, err := reference.ParseNamed(repoAndRef)
+			for _, refString := range append(image.RepoTags) {
+				ref, err := reference.ParseNamed(refString)
 				if err != nil {
 					continue
 				}
-				repo = ref.Name()
-
-				switch x := ref.(type) {
-				case reference.Canonical:
-					digest = x.Digest().String()
-				case reference.NamedTagged:
-					tag = x.Tag()
+				if nt, ok := ref.(reference.NamedTagged); ok {
+					repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag())
 				}
 			}
-			imageCtx := &imageContext{
-				trunc:  ctx.Trunc,
-				i:      image,
-				repo:   repo,
-				tag:    tag,
-				digest: digest,
+			for _, refString := range append(image.RepoDigests) {
+				ref, err := reference.ParseNamed(refString)
+				if err != nil {
+					continue
+				}
+				if c, ok := ref.(reference.Canonical); ok {
+					repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String())
+				}
 			}
+
+			for repo, tags := range repoTags {
+				digests := repoDigests[repo]
+
+				// Do not display digests as their own row
+				delete(repoDigests, repo)
+
+				if !ctx.Digest {
+					// Ignore digest references, just show tag once
+					digests = nil
+				}
+
+				for _, tag := range tags {
+					if len(digests) == 0 {
+						images = append(images, &imageContext{
+							trunc:  ctx.Trunc,
+							i:      image,
+							repo:   repo,
+							tag:    tag,
+							digest: "<none>",
+						})
+						continue
+					}
+					// Display the digests for each tag
+					for _, dgst := range digests {
+						images = append(images, &imageContext{
+							trunc:  ctx.Trunc,
+							i:      image,
+							repo:   repo,
+							tag:    tag,
+							digest: dgst,
+						})
+					}
+
+				}
+			}
+
+			// Show rows for remaining digest only references
+			for repo, digests := range repoDigests {
+				// If digests are displayed, show row per digest
+				if ctx.Digest {
+					for _, dgst := range digests {
+						images = append(images, &imageContext{
+							trunc:  ctx.Trunc,
+							i:      image,
+							repo:   repo,
+							tag:    "<none>",
+							digest: dgst,
+						})
+					}
+				} else {
+					images = append(images, &imageContext{
+						trunc: ctx.Trunc,
+						i:     image,
+						repo:  repo,
+						tag:   "<none>",
+					})
+				}
+			}
+		}
+		for _, imageCtx := range images {
 			err = ctx.contextFormat(tmpl, imageCtx)
 			if err != nil {
 				return
diff --git a/api/client/formatter/formatter_test.go b/api/client/formatter/formatter_test.go
index 7dd5a68..07cde63 100644
--- a/api/client/formatter/formatter_test.go
+++ b/api/client/formatter/formatter_test.go
@@ -301,7 +301,6 @@
 			},
 			`REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
 image               tag1                imageID1            24 hours ago        0 B
-image               <none>              imageID1            24 hours ago        0 B
 image               tag2                imageID2            24 hours ago        0 B
 <none>              <none>              imageID3            24 hours ago        0 B
 `,
@@ -312,7 +311,7 @@
 					Format: "table {{.Repository}}",
 				},
 			},
-			"REPOSITORY\nimage\nimage\nimage\n<none>\n",
+			"REPOSITORY\nimage\nimage\n<none>\n",
 		},
 		{
 			ImageContext{
@@ -322,7 +321,6 @@
 				Digest: true,
 			},
 			`REPOSITORY          DIGEST
-image               <none>
 image               sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
 image               <none>
 <none>              <none>
@@ -335,7 +333,7 @@
 					Quiet:  true,
 				},
 			},
-			"REPOSITORY\nimage\nimage\nimage\n<none>\n",
+			"REPOSITORY\nimage\nimage\n<none>\n",
 		},
 		{
 			ImageContext{
@@ -344,7 +342,7 @@
 					Quiet:  true,
 				},
 			},
-			"imageID1\nimageID1\nimageID2\nimageID3\n",
+			"imageID1\nimageID2\nimageID3\n",
 		},
 		{
 			ImageContext{
@@ -355,8 +353,7 @@
 				Digest: true,
 			},
 			`REPOSITORY          TAG                 DIGEST                                                                    IMAGE ID            CREATED             SIZE
-image               tag1                <none>                                                                    imageID1            24 hours ago        0 B
-image               <none>              sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf   imageID1            24 hours ago        0 B
+image               tag1                sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf   imageID1            24 hours ago        0 B
 image               tag2                <none>                                                                    imageID2            24 hours ago        0 B
 <none>              <none>              <none>                                                                    imageID3            24 hours ago        0 B
 `,
@@ -369,7 +366,7 @@
 				},
 				Digest: true,
 			},
-			"imageID1\nimageID1\nimageID2\nimageID3\n",
+			"imageID1\nimageID2\nimageID3\n",
 		},
 		// Raw Format
 		{
@@ -385,12 +382,6 @@
 virtual_size: 0 B
 
 repository: image
-tag: <none>
-image_id: imageID1
-created_at: %s
-virtual_size: 0 B
-
-repository: image
 tag: tag2
 image_id: imageID2
 created_at: %s
@@ -402,7 +393,7 @@
 created_at: %s
 virtual_size: 0 B
 
-`, expectedTime, expectedTime, expectedTime, expectedTime),
+`, expectedTime, expectedTime, expectedTime),
 		},
 		{
 			ImageContext{
@@ -413,13 +404,6 @@
 			},
 			fmt.Sprintf(`repository: image
 tag: tag1
-digest: <none>
-image_id: imageID1
-created_at: %s
-virtual_size: 0 B
-
-repository: image
-tag: <none>
 digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf
 image_id: imageID1
 created_at: %s
@@ -439,7 +423,7 @@
 created_at: %s
 virtual_size: 0 B
 
-`, expectedTime, expectedTime, expectedTime, expectedTime),
+`, expectedTime, expectedTime, expectedTime),
 		},
 		{
 			ImageContext{
@@ -449,7 +433,6 @@
 				},
 			},
 			`image_id: imageID1
-image_id: imageID1
 image_id: imageID2
 image_id: imageID3
 `,
@@ -461,7 +444,7 @@
 					Format: "{{.Repository}}",
 				},
 			},
-			"image\nimage\nimage\n<none>\n",
+			"image\nimage\n<none>\n",
 		},
 		{
 			ImageContext{
@@ -470,7 +453,7 @@
 				},
 				Digest: true,
 			},
-			"image\nimage\nimage\n<none>\n",
+			"image\nimage\n<none>\n",
 		},
 	}
 
diff --git a/api/client/idresolver/idresolver.go b/api/client/idresolver/idresolver.go
index 05c4c9c..b958bd0 100644
--- a/api/client/idresolver/idresolver.go
+++ b/api/client/idresolver/idresolver.go
@@ -40,7 +40,7 @@
 		}
 		return id, nil
 	case swarm.Service:
-		service, err := r.client.ServiceInspect(ctx, id)
+		service, _, err := r.client.ServiceInspectWithRaw(ctx, id)
 		if err != nil {
 			return id, nil
 		}
diff --git a/api/client/info.go b/api/client/info.go
index df1415c..cf4d4ff 100644
--- a/api/client/info.go
+++ b/api/client/info.go
@@ -94,6 +94,10 @@
 		fmt.Fprintf(cli.out, "Default Runtime: %s\n", info.DefaultRuntime)
 	}
 
+	fmt.Fprintf(cli.out, "Security Options:")
+	ioutils.FprintfIfNotEmpty(cli.out, " %s", strings.Join(info.SecurityOptions, " "))
+	fmt.Fprintf(cli.out, "\n")
+
 	ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion)
 	ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem)
 	ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType)
diff --git a/api/client/node/accept.go b/api/client/node/accept.go
index ae672ff..df31aa2 100644
--- a/api/client/node/accept.go
+++ b/api/client/node/accept.go
@@ -33,7 +33,7 @@
 		}); err != nil {
 			return err
 		}
-		fmt.Println(id, "attempting to accept a node in the swarm.")
+		fmt.Fprintf(dockerCli.Out(), "Node %s accepted in the swarm.\n", id)
 	}
 
 	return nil
diff --git a/api/client/node/demote.go b/api/client/node/demote.go
index 25f2073..e9d70e9 100644
--- a/api/client/node/demote.go
+++ b/api/client/node/demote.go
@@ -33,7 +33,7 @@
 		}); err != nil {
 			return err
 		}
-		fmt.Println(id, "attempting to demote a manager in the swarm.")
+		fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", id)
 	}
 
 	return nil
diff --git a/api/client/node/list.go b/api/client/node/list.go
index c21cb94..374ecac 100644
--- a/api/client/node/list.go
+++ b/api/client/node/list.go
@@ -16,7 +16,7 @@
 )
 
 const (
-	listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
+	listItemFmt = "%s\t%s\t%s\t%s\t%s\t%s\n"
 )
 
 type listOptions struct {
@@ -74,7 +74,7 @@
 	// Ignore flushing errors
 	defer writer.Flush()
 
-	fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS", "LEADER")
+	fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "MEMBERSHIP", "STATUS", "AVAILABILITY", "MANAGER STATUS")
 	for _, node := range nodes {
 		name := node.Spec.Name
 		availability := string(node.Spec.Availability)
@@ -84,14 +84,13 @@
 			name = node.Description.Hostname
 		}
 
-		leader := ""
-		if node.ManagerStatus != nil && node.ManagerStatus.Leader {
-			leader = "Yes"
-		}
-
 		reachability := ""
 		if node.ManagerStatus != nil {
-			reachability = string(node.ManagerStatus.Reachability)
+			if node.ManagerStatus.Leader {
+				reachability = "Leader"
+			} else {
+				reachability = string(node.ManagerStatus.Reachability)
+			}
 		}
 
 		ID := node.ID
@@ -107,8 +106,7 @@
 			client.PrettyPrint(membership),
 			client.PrettyPrint(string(node.Status.State)),
 			client.PrettyPrint(availability),
-			client.PrettyPrint(reachability),
-			leader)
+			client.PrettyPrint(reachability))
 	}
 }
 
diff --git a/api/client/node/promote.go b/api/client/node/promote.go
index 858b36a..87fe42e 100644
--- a/api/client/node/promote.go
+++ b/api/client/node/promote.go
@@ -33,7 +33,7 @@
 		}); err != nil {
 			return err
 		}
-		fmt.Println(id, "attempting to promote a node to a manager in the swarm.")
+		fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", id)
 	}
 
 	return nil
diff --git a/api/client/node/update.go b/api/client/node/update.go
index 64cde8e..d18a4ab 100644
--- a/api/client/node/update.go
+++ b/api/client/node/update.go
@@ -21,7 +21,11 @@
 		Short: "Update a node",
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return runUpdate(dockerCli, args[0], mergeNodeUpdate(flags))
+			if err := runUpdate(dockerCli, args[0], mergeNodeUpdate(flags)); err != nil {
+				return err
+			}
+			fmt.Fprintln(dockerCli.Out(), args[0])
+			return nil
 		},
 	}
 
@@ -47,7 +51,6 @@
 		return err
 	}
 
-	fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID)
 	return nil
 }
 
diff --git a/api/client/plugin/disable.go b/api/client/plugin/disable.go
index 6d0ff69..058c688 100644
--- a/api/client/plugin/disable.go
+++ b/api/client/plugin/disable.go
@@ -3,21 +3,39 @@
 package plugin
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
+	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 )
 
 func newDisableCommand(dockerCli *client.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "disable",
+		Use:   "disable PLUGIN",
 		Short: "Disable a plugin",
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return dockerCli.Client().PluginDisable(context.Background(), args[0])
+			return runDisable(dockerCli, args[0])
 		},
 	}
 
 	return cmd
 }
+
+func runDisable(dockerCli *client.DockerCli, name string) error {
+	named, err := reference.ParseNamed(name) // FIXME: validate
+	if err != nil {
+		return err
+	}
+	if reference.IsNameOnly(named) {
+		named = reference.WithDefaultTag(named)
+	}
+	ref, ok := named.(reference.NamedTagged)
+	if !ok {
+		return fmt.Errorf("invalid name: %s", named.String())
+	}
+	return dockerCli.Client().PluginDisable(context.Background(), ref.String())
+}
diff --git a/api/client/plugin/enable.go b/api/client/plugin/enable.go
index 2a05b4a..cc2488b 100644
--- a/api/client/plugin/enable.go
+++ b/api/client/plugin/enable.go
@@ -3,21 +3,39 @@
 package plugin
 
 import (
+	"fmt"
+
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
+	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 )
 
 func newEnableCommand(dockerCli *client.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "enable",
+		Use:   "enable PLUGIN",
 		Short: "Enable a plugin",
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return dockerCli.Client().PluginEnable(context.Background(), args[0])
+			return runEnable(dockerCli, args[0])
 		},
 	}
 
 	return cmd
 }
+
+func runEnable(dockerCli *client.DockerCli, name string) error {
+	named, err := reference.ParseNamed(name) // FIXME: validate
+	if err != nil {
+		return err
+	}
+	if reference.IsNameOnly(named) {
+		named = reference.WithDefaultTag(named)
+	}
+	ref, ok := named.(reference.NamedTagged)
+	if !ok {
+		return fmt.Errorf("invalid name: %s", named.String())
+	}
+	return dockerCli.Client().PluginEnable(context.Background(), ref.String())
+}
diff --git a/api/client/plugin/inspect.go b/api/client/plugin/inspect.go
index f68ad40..8f7e98d 100644
--- a/api/client/plugin/inspect.go
+++ b/api/client/plugin/inspect.go
@@ -4,16 +4,18 @@
 
 import (
 	"encoding/json"
+	"fmt"
 
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
+	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 )
 
 func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "inspect",
+		Use:   "inspect PLUGIN",
 		Short: "Inspect a plugin",
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
@@ -25,7 +27,18 @@
 }
 
 func runInspect(dockerCli *client.DockerCli, name string) error {
-	p, err := dockerCli.Client().PluginInspect(context.Background(), name)
+	named, err := reference.ParseNamed(name) // FIXME: validate
+	if err != nil {
+		return err
+	}
+	if reference.IsNameOnly(named) {
+		named = reference.WithDefaultTag(named)
+	}
+	ref, ok := named.(reference.NamedTagged)
+	if !ok {
+		return fmt.Errorf("invalid name: %s", named.String())
+	}
+	p, err := dockerCli.Client().PluginInspect(context.Background(), ref.String())
 	if err != nil {
 		return err
 	}
diff --git a/api/client/plugin/install.go b/api/client/plugin/install.go
index dd38bd2..ea73498 100644
--- a/api/client/plugin/install.go
+++ b/api/client/plugin/install.go
@@ -3,35 +3,52 @@
 package plugin
 
 import (
+	"bufio"
 	"fmt"
+	"strings"
 
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
+	"github.com/docker/engine-api/types"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 )
 
+type pluginOptions struct {
+	name       string
+	grantPerms bool
+	disable    bool
+}
+
 func newInstallCommand(dockerCli *client.DockerCli) *cobra.Command {
+	var options pluginOptions
 	cmd := &cobra.Command{
-		Use:   "install",
+		Use:   "install PLUGIN",
 		Short: "Install a plugin",
 		Args:  cli.RequiresMinArgs(1), // TODO: allow for set args
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return runInstall(dockerCli, args[0], args[1:])
+			options.name = args[0]
+			return runInstall(dockerCli, options)
 		},
 	}
 
+	flags := cmd.Flags()
+	flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "grant all permissions necessary to run the plugin")
+	flags.BoolVar(&options.disable, "disable", false, "do not enable the plugin on install")
+
 	return cmd
 }
 
-func runInstall(dockerCli *client.DockerCli, name string, args []string) error {
-	named, err := reference.ParseNamed(name) // FIXME: validate
+func runInstall(dockerCli *client.DockerCli, opts pluginOptions) error {
+	named, err := reference.ParseNamed(opts.name) // FIXME: validate
 	if err != nil {
 		return err
 	}
-	named = reference.WithDefaultTag(named)
+	if reference.IsNameOnly(named) {
+		named = reference.WithDefaultTag(named)
+	}
 	ref, ok := named.(reference.NamedTagged)
 	if !ok {
 		return fmt.Errorf("invalid name: %s", named.String())
@@ -46,6 +63,34 @@
 	if err != nil {
 		return err
 	}
-	// TODO: pass acceptAllPermissions and noEnable flag
-	return dockerCli.Client().PluginInstall(ctx, ref.String(), encodedAuth, false, false, dockerCli.In(), dockerCli.Out())
+
+	registryAuthFunc := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "plugin install")
+
+	options := types.PluginInstallOptions{
+		RegistryAuth:          encodedAuth,
+		Disabled:              opts.disable,
+		AcceptAllPermissions:  opts.grantPerms,
+		AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name),
+		// TODO: Rename PrivilegeFunc, it has nothing to do with privileges
+		PrivilegeFunc: registryAuthFunc,
+	}
+
+	return dockerCli.Client().PluginInstall(ctx, ref.String(), options)
+}
+
+func acceptPrivileges(dockerCli *client.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) {
+	return func(privileges types.PluginPrivileges) (bool, error) {
+		fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name)
+		for _, privilege := range privileges {
+			fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value)
+		}
+
+		fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ")
+		reader := bufio.NewReader(dockerCli.In())
+		line, _, err := reader.ReadLine()
+		if err != nil {
+			return false, err
+		}
+		return strings.ToLower(string(line)) == "y", nil
+	}
 }
diff --git a/api/client/plugin/push.go b/api/client/plugin/push.go
index c650eb6..d26fab8 100644
--- a/api/client/plugin/push.go
+++ b/api/client/plugin/push.go
@@ -16,7 +16,7 @@
 
 func newPushCommand(dockerCli *client.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "push",
+		Use:   "push PLUGIN",
 		Short: "Push a plugin",
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
@@ -31,7 +31,9 @@
 	if err != nil {
 		return err
 	}
-	named = reference.WithDefaultTag(named)
+	if reference.IsNameOnly(named) {
+		named = reference.WithDefaultTag(named)
+	}
 	ref, ok := named.(reference.NamedTagged)
 	if !ok {
 		return fmt.Errorf("invalid name: %s", named.String())
diff --git a/api/client/plugin/remove.go b/api/client/plugin/remove.go
index 9acd79e..a8e52a4 100644
--- a/api/client/plugin/remove.go
+++ b/api/client/plugin/remove.go
@@ -7,13 +7,14 @@
 
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
+	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 	"golang.org/x/net/context"
 )
 
 func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
-		Use:     "rm",
+		Use:     "rm PLUGIN",
 		Short:   "Remove a plugin",
 		Aliases: []string{"remove"},
 		Args:    cli.RequiresMinArgs(1),
@@ -28,8 +29,19 @@
 func runRemove(dockerCli *client.DockerCli, names []string) error {
 	var errs cli.Errors
 	for _, name := range names {
+		named, err := reference.ParseNamed(name) // FIXME: validate
+		if err != nil {
+			return err
+		}
+		if reference.IsNameOnly(named) {
+			named = reference.WithDefaultTag(named)
+		}
+		ref, ok := named.(reference.NamedTagged)
+		if !ok {
+			return fmt.Errorf("invalid name: %s", named.String())
+		}
 		// TODO: pass names to api instead of making multiple api calls
-		if err := dockerCli.Client().PluginRemove(context.Background(), name); err != nil {
+		if err := dockerCli.Client().PluginRemove(context.Background(), ref.String()); err != nil {
 			errs = append(errs, err)
 			continue
 		}
diff --git a/api/client/plugin/set.go b/api/client/plugin/set.go
index 5bb7bfe..188bd63 100644
--- a/api/client/plugin/set.go
+++ b/api/client/plugin/set.go
@@ -3,16 +3,19 @@
 package plugin
 
 import (
+	"fmt"
+
 	"golang.org/x/net/context"
 
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
+	"github.com/docker/docker/reference"
 	"github.com/spf13/cobra"
 )
 
 func newSetCommand(dockerCli *client.DockerCli) *cobra.Command {
 	cmd := &cobra.Command{
-		Use:   "set",
+		Use:   "set PLUGIN key1=value1 [key2=value2...]",
 		Short: "Change settings for a plugin",
 		Args:  cli.RequiresMinArgs(2),
 		RunE: func(cmd *cobra.Command, args []string) error {
@@ -24,5 +27,16 @@
 }
 
 func runSet(dockerCli *client.DockerCli, name string, args []string) error {
-	return dockerCli.Client().PluginSet(context.Background(), name, args)
+	named, err := reference.ParseNamed(name) // FIXME: validate
+	if err != nil {
+		return err
+	}
+	if reference.IsNameOnly(named) {
+		named = reference.WithDefaultTag(named)
+	}
+	ref, ok := named.(reference.NamedTagged)
+	if !ok {
+		return fmt.Errorf("invalid name: %s", named.String())
+	}
+	return dockerCli.Client().PluginSet(context.Background(), ref.String(), args)
 }
diff --git a/api/client/service/inspect.go b/api/client/service/inspect.go
index a75e4e7..36dfa89 100644
--- a/api/client/service/inspect.go
+++ b/api/client/service/inspect.go
@@ -13,6 +13,7 @@
 	"github.com/docker/docker/pkg/ioutils"
 	apiclient "github.com/docker/engine-api/client"
 	"github.com/docker/engine-api/types/swarm"
+	"github.com/docker/go-units"
 	"github.com/spf13/cobra"
 )
 
@@ -50,7 +51,7 @@
 	ctx := context.Background()
 
 	getRef := func(ref string) (interface{}, []byte, error) {
-		service, err := client.ServiceInspect(ctx, ref)
+		service, _, err := client.ServiceInspectWithRaw(ctx, ref)
 		if err == nil || !apiclient.IsErrServiceNotFound(err) {
 			return service, nil, err
 		}
@@ -93,22 +94,61 @@
 	}
 
 	if service.Spec.Mode.Global != nil {
-		fmt.Fprintln(out, "Mode:\t\tGLOBAL")
+		fmt.Fprintln(out, "Mode:\t\tGlobal")
 	} else {
-		fmt.Fprintln(out, "Mode:\t\tREPLICATED")
+		fmt.Fprintln(out, "Mode:\t\tReplicated")
 		if service.Spec.Mode.Replicated.Replicas != nil {
-			fmt.Fprintf(out, " Replicas:\t\t%d\n", *service.Spec.Mode.Replicated.Replicas)
+			fmt.Fprintf(out, " Replicas:\t%d\n", *service.Spec.Mode.Replicated.Replicas)
 		}
 	}
 	fmt.Fprintln(out, "Placement:")
-	fmt.Fprintln(out, " Strategy:\tSPREAD")
-	fmt.Fprintf(out, "UpateConfig:\n")
+	fmt.Fprintln(out, " Strategy:\tSpread")
+	if service.Spec.TaskTemplate.Placement != nil && len(service.Spec.TaskTemplate.Placement.Constraints) > 0 {
+		ioutils.FprintfIfNotEmpty(out, " Constraints\t: %s\n", strings.Join(service.Spec.TaskTemplate.Placement.Constraints, ", "))
+	}
+	fmt.Fprintf(out, "UpdateConfig:\n")
 	fmt.Fprintf(out, " Parallelism:\t%d\n", service.Spec.UpdateConfig.Parallelism)
 	if service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 {
 		fmt.Fprintf(out, " Delay:\t\t%s\n", service.Spec.UpdateConfig.Delay)
 	}
 	fmt.Fprintf(out, "ContainerSpec:\n")
 	printContainerSpec(out, service.Spec.TaskTemplate.ContainerSpec)
+
+	if service.Spec.TaskTemplate.Resources != nil {
+		fmt.Fprintln(out, "Resources:")
+		printResources := func(out io.Writer, r *swarm.Resources) {
+			if r.NanoCPUs != 0 {
+				fmt.Fprintf(out, " CPU:\t\t%g\n", float64(r.NanoCPUs)/1e9)
+			}
+			if r.MemoryBytes != 0 {
+				fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(r.MemoryBytes)))
+			}
+		}
+		if service.Spec.TaskTemplate.Resources.Reservations != nil {
+			fmt.Fprintln(out, "Reservations:")
+			printResources(out, service.Spec.TaskTemplate.Resources.Reservations)
+		}
+		if service.Spec.TaskTemplate.Resources.Limits != nil {
+			fmt.Fprintln(out, "Limits:")
+			printResources(out, service.Spec.TaskTemplate.Resources.Limits)
+		}
+	}
+	if len(service.Spec.Networks) > 0 {
+		fmt.Fprintf(out, "Networks:")
+		for _, n := range service.Spec.Networks {
+			fmt.Fprintf(out, " %s", n.Target)
+		}
+	}
+
+	if len(service.Endpoint.Ports) > 0 {
+		fmt.Fprintln(out, "Ports:")
+		for _, port := range service.Endpoint.Ports {
+			fmt.Fprintf(out, " Name = %s\n", port.Name)
+			fmt.Fprintf(out, " Protocol = %s\n", port.Protocol)
+			fmt.Fprintf(out, " TargetPort = %d\n", port.TargetPort)
+			fmt.Fprintf(out, " PublishedPort = %d\n", port.PublishedPort)
+		}
+	}
 }
 
 func printContainerSpec(out io.Writer, containerSpec swarm.ContainerSpec) {
@@ -117,11 +157,20 @@
 		fmt.Fprintf(out, " Command:\t%s\n", strings.Join(containerSpec.Command, " "))
 	}
 	if len(containerSpec.Args) > 0 {
-		fmt.Fprintf(out, " Args:\t%s\n", strings.Join(containerSpec.Args, " "))
+		fmt.Fprintf(out, " Args:\t\t%s\n", strings.Join(containerSpec.Args, " "))
 	}
 	if len(containerSpec.Env) > 0 {
 		fmt.Fprintf(out, " Env:\t\t%s\n", strings.Join(containerSpec.Env, " "))
 	}
 	ioutils.FprintfIfNotEmpty(out, " Dir\t\t%s\n", containerSpec.Dir)
 	ioutils.FprintfIfNotEmpty(out, " User\t\t%s\n", containerSpec.User)
+	if len(containerSpec.Mounts) > 0 {
+		fmt.Fprintln(out, " Mounts:")
+		for _, v := range containerSpec.Mounts {
+			fmt.Fprintf(out, "  Target = %s\n", v.Target)
+			fmt.Fprintf(out, "  Source = %s\n", v.Source)
+			fmt.Fprintf(out, "  Writable = %v\n", v.Writable)
+			fmt.Fprintf(out, "  Type = %v\n", v.Type)
+		}
+	}
 }
diff --git a/api/client/service/list.go b/api/client/service/list.go
index c1246c8..c9a2087 100644
--- a/api/client/service/list.go
+++ b/api/client/service/list.go
@@ -6,15 +6,15 @@
 	"strings"
 	"text/tabwriter"
 
-	"golang.org/x/net/context"
-
 	"github.com/docker/docker/api/client"
 	"github.com/docker/docker/cli"
 	"github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/engine-api/types"
+	"github.com/docker/engine-api/types/filters"
 	"github.com/docker/engine-api/types/swarm"
 	"github.com/spf13/cobra"
+	"golang.org/x/net/context"
 )
 
 const (
@@ -47,11 +47,10 @@
 }
 
 func runList(dockerCli *client.DockerCli, opts listOptions) error {
+	ctx := context.Background()
 	client := dockerCli.Client()
 
-	services, err := client.ServiceList(
-		context.Background(),
-		types.ServiceListOptions{Filter: opts.filter.Value()})
+	services, err := client.ServiceList(ctx, types.ServiceListOptions{Filter: opts.filter.Value()})
 	if err != nil {
 		return err
 	}
@@ -60,31 +59,48 @@
 	if opts.quiet {
 		printQuiet(out, services)
 	} else {
-		printTable(out, services)
+		taskFilter := filters.NewArgs()
+		for _, service := range services {
+			taskFilter.Add("service", service.ID)
+		}
+
+		tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: taskFilter})
+		if err != nil {
+			return err
+		}
+
+		running := map[string]int{}
+		for _, task := range tasks {
+			if task.Status.State == "running" {
+				running[task.ServiceID]++
+			}
+		}
+
+		printTable(out, services, running)
 	}
 	return nil
 }
 
-func printTable(out io.Writer, services []swarm.Service) {
+func printTable(out io.Writer, services []swarm.Service, running map[string]int) {
 	writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0)
 
 	// Ignore flushing errors
 	defer writer.Flush()
 
-	fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "SCALE", "IMAGE", "COMMAND")
+	fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "REPLICAS", "IMAGE", "COMMAND")
 	for _, service := range services {
-		scale := ""
+		replicas := ""
 		if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil {
-			scale = fmt.Sprintf("%d", *service.Spec.Mode.Replicated.Replicas)
+			replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas)
 		} else if service.Spec.Mode.Global != nil {
-			scale = "global"
+			replicas = "global"
 		}
 		fmt.Fprintf(
 			writer,
 			listItemFmt,
 			stringid.TruncateID(service.ID),
 			service.Spec.Name,
-			scale,
+			replicas,
 			service.Spec.TaskTemplate.ContainerSpec.Image,
 			strings.Join(service.Spec.TaskTemplate.ContainerSpec.Args, " "))
 	}
diff --git a/api/client/service/opts.go b/api/client/service/opts.go
index 58c5f94..54b6582 100644
--- a/api/client/service/opts.go
+++ b/api/client/service/opts.go
@@ -28,7 +28,7 @@
 type memBytes int64
 
 func (m *memBytes) String() string {
-	return strconv.FormatInt(m.Value(), 10)
+	return units.BytesSize(float64(m.Value()))
 }
 
 func (m *memBytes) Set(value string) error {
@@ -48,7 +48,7 @@
 type nanoCPUs int64
 
 func (c *nanoCPUs) String() string {
-	return strconv.FormatInt(c.Value(), 10)
+	return big.NewRat(c.Value(), 1e9).FloatString(3)
 }
 
 func (c *nanoCPUs) Set(value string) error {
@@ -177,7 +177,7 @@
 		}
 
 		if len(parts) != 2 {
-			return fmt.Errorf("invald field '%s' must be a key=value pair", field)
+			return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
 		}
 
 		key, value := parts[0], parts[1]
@@ -191,14 +191,14 @@
 		case "writable":
 			mount.Writable, err = strconv.ParseBool(value)
 			if err != nil {
-				return fmt.Errorf("invald value for writable: %s", err.Error())
+				return fmt.Errorf("invalid value for writable: %s", value)
 			}
 		case "bind-propagation":
 			mount.BindOptions.Propagation = swarm.MountPropagation(strings.ToUpper(value))
 		case "volume-populate":
 			volumeOptions().Populate, err = strconv.ParseBool(value)
 			if err != nil {
-				return fmt.Errorf("invald value for populate: %s", err.Error())
+				return fmt.Errorf("invalid value for populate: %s", value)
 			}
 		case "volume-label":
 			setValueOnMap(volumeOptions().Labels, value)
@@ -235,7 +235,8 @@
 func (m *MountOpt) String() string {
 	mounts := []string{}
 	for _, mount := range m.values {
-		mounts = append(mounts, fmt.Sprintf("%v", mount))
+		repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
+		mounts = append(mounts, repr)
 	}
 	return strings.Join(mounts, ", ")
 }
@@ -456,7 +457,7 @@
 
 	flags.StringSliceVar(&opts.constraints, flagConstraint, []string{}, "Placement constraints")
 
-	flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously")
+	flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 0, "Maximum number of tasks updated simultaneously")
 	flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates")
 
 	flags.StringSliceVar(&opts.networks, flagNetwork, []string{}, "Network attachments")
diff --git a/api/client/service/opts_test.go b/api/client/service/opts_test.go
new file mode 100644
index 0000000..808ce23
--- /dev/null
+++ b/api/client/service/opts_test.go
@@ -0,0 +1,115 @@
+package service
+
+import (
+	"testing"
+	"time"
+
+	"github.com/docker/docker/pkg/testutil/assert"
+	"github.com/docker/engine-api/types/swarm"
+)
+
+func TestMemBytesString(t *testing.T) {
+	var mem memBytes = 1048576
+	assert.Equal(t, mem.String(), "1 MiB")
+}
+
+func TestMemBytesSetAndValue(t *testing.T) {
+	var mem memBytes
+	assert.NilError(t, mem.Set("5kb"))
+	assert.Equal(t, mem.Value(), int64(5120))
+}
+
+func TestNanoCPUsString(t *testing.T) {
+	var cpus nanoCPUs = 6100000000
+	assert.Equal(t, cpus.String(), "6.100")
+}
+
+func TestNanoCPUsSetAndValue(t *testing.T) {
+	var cpus nanoCPUs
+	assert.NilError(t, cpus.Set("0.35"))
+	assert.Equal(t, cpus.Value(), int64(350000000))
+}
+
+func TestDurationOptString(t *testing.T) {
+	dur := time.Duration(300 * 10e8)
+	duration := DurationOpt{value: &dur}
+	assert.Equal(t, duration.String(), "5m0s")
+}
+
+func TestDurationOptSetAndValue(t *testing.T) {
+	var duration DurationOpt
+	assert.NilError(t, duration.Set("300s"))
+	assert.Equal(t, *duration.Value(), time.Duration(300*10e8))
+}
+
+func TestUint64OptString(t *testing.T) {
+	value := uint64(2345678)
+	opt := Uint64Opt{value: &value}
+	assert.Equal(t, opt.String(), "2345678")
+
+	opt = Uint64Opt{}
+	assert.Equal(t, opt.String(), "none")
+}
+
+func TestUint64OptSetAndValue(t *testing.T) {
+	var opt Uint64Opt
+	assert.NilError(t, opt.Set("14445"))
+	assert.Equal(t, *opt.Value(), uint64(14445))
+}
+
+func TestMountOptString(t *testing.T) {
+	mount := MountOpt{
+		values: []swarm.Mount{
+			{
+				Type:   swarm.MountType("BIND"),
+				Source: "/home/path",
+				Target: "/target",
+			},
+			{
+				Type:   swarm.MountType("VOLUME"),
+				Source: "foo",
+				Target: "/target/foo",
+			},
+		},
+	}
+	expected := "BIND /home/path /target, VOLUME foo /target/foo"
+	assert.Equal(t, mount.String(), expected)
+}
+
+func TestMountOptSetNoError(t *testing.T) {
+	var mount MountOpt
+	assert.NilError(t, mount.Set("type=bind,target=/target,source=/foo"))
+
+	mounts := mount.Value()
+	assert.Equal(t, len(mounts), 1)
+	assert.Equal(t, mounts[0], swarm.Mount{
+		Type:   swarm.MountType("BIND"),
+		Source: "/foo",
+		Target: "/target",
+	})
+}
+
+func TestMountOptSetErrorNoType(t *testing.T) {
+	var mount MountOpt
+	assert.Error(t, mount.Set("target=/target,source=/foo"), "type is required")
+}
+
+func TestMountOptSetErrorNoTarget(t *testing.T) {
+	var mount MountOpt
+	assert.Error(t, mount.Set("type=VOLUME,source=/foo"), "target is required")
+}
+
+func TestMountOptSetErrorInvalidKey(t *testing.T) {
+	var mount MountOpt
+	assert.Error(t, mount.Set("type=VOLUME,bogus=foo"), "unexpected key 'bogus'")
+}
+
+func TestMountOptSetErrorInvalidField(t *testing.T) {
+	var mount MountOpt
+	assert.Error(t, mount.Set("type=VOLUME,bogus"), "invalid field 'bogus'")
+}
+
+func TestMountOptSetErrorInvalidWritable(t *testing.T) {
+	var mount MountOpt
+	assert.Error(t, mount.Set("type=VOLUME,writable=yes"), "invalid value for writable: yes")
+}
diff --git a/api/client/service/scale.go b/api/client/service/scale.go
index ae528b5..e313948 100644
--- a/api/client/service/scale.go
+++ b/api/client/service/scale.go
@@ -14,7 +14,7 @@
 
 func newScaleCommand(dockerCli *client.DockerCli) *cobra.Command {
 	return &cobra.Command{
-		Use:   "scale SERVICE=SCALE [SERVICE=SCALE...]",
+		Use:   "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]",
 		Short: "Scale one or multiple services",
 		Args:  scaleArgs,
 		RunE: func(cmd *cobra.Command, args []string) error {
@@ -61,7 +61,8 @@
 	client := dockerCli.Client()
 	ctx := context.Background()
 
-	service, err := client.ServiceInspect(ctx, serviceID)
+	service, _, err := client.ServiceInspectWithRaw(ctx, serviceID)
+
 	if err != nil {
 		return err
 	}
diff --git a/api/client/service/tasks.go b/api/client/service/tasks.go
index 6169d8b..7520b1f 100644
--- a/api/client/service/tasks.go
+++ b/api/client/service/tasks.go
@@ -44,7 +44,7 @@
 	client := dockerCli.Client()
 	ctx := context.Background()
 
-	service, err := client.ServiceInspect(ctx, opts.serviceID)
+	service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID)
 	if err != nil {
 		return err
 	}
diff --git a/api/client/service/update.go b/api/client/service/update.go
index 20bc386..c8e3a6d 100644
--- a/api/client/service/update.go
+++ b/api/client/service/update.go
@@ -41,7 +41,7 @@
 	client := dockerCli.Client()
 	ctx := context.Background()
 
-	service, err := client.ServiceInspect(ctx, serviceID)
+	service, _, err := client.ServiceInspectWithRaw(ctx, serviceID)
 	if err != nil {
 		return err
 	}
diff --git a/api/client/stack/opts.go b/api/client/stack/opts.go
index b4e12dc..4383943 100644
--- a/api/client/stack/opts.go
+++ b/api/client/stack/opts.go
@@ -31,7 +31,12 @@
 	}
 
 	fmt.Fprintf(stderr, "Loading bundle from %s\n", path)
-	bundle, err := bundlefile.LoadFile(path)
+	reader, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+
+	bundle, err := bundlefile.LoadFile(reader)
 	if err != nil {
 		return nil, fmt.Errorf("Error reading %s: %v\n", path, err)
 	}
diff --git a/api/client/swarm/init.go b/api/client/swarm/init.go
index 0c66246..1f403ae 100644
--- a/api/client/swarm/init.go
+++ b/api/client/swarm/init.go
@@ -9,6 +9,7 @@
 	"github.com/docker/docker/cli"
 	"github.com/docker/engine-api/types/swarm"
 	"github.com/spf13/cobra"
+	"github.com/spf13/pflag"
 )
 
 type initOptions struct {
@@ -19,6 +20,7 @@
 }
 
 func newInitCommand(dockerCli *client.DockerCli) *cobra.Command {
+	var flags *pflag.FlagSet
 	opts := initOptions{
 		listenAddr: NewNodeAddrOption(),
 		autoAccept: NewAutoAcceptOption(),
@@ -26,14 +28,14 @@
 
 	cmd := &cobra.Command{
 		Use:   "init",
-		Short: "Initialize a Swarm.",
+		Short: "Initialize a Swarm",
 		Args:  cli.NoArgs,
 		RunE: func(cmd *cobra.Command, args []string) error {
-			return runInit(dockerCli, opts)
+			return runInit(dockerCli, flags, opts)
 		},
 	}
 
-	flags := cmd.Flags()
+	flags = cmd.Flags()
 	flags.Var(&opts.listenAddr, "listen-addr", "Listen address")
 	flags.Var(&opts.autoAccept, "auto-accept", "Auto acceptance policy (worker, manager, or none)")
 	flags.StringVar(&opts.secret, "secret", "", "Set secret value needed to accept nodes into cluster")
@@ -41,7 +43,7 @@
 	return cmd
 }
 
-func runInit(dockerCli *client.DockerCli, opts initOptions) error {
+func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions) error {
 	client := dockerCli.Client()
 	ctx := context.Background()
 
@@ -50,8 +52,11 @@
 		ForceNewCluster: opts.forceNewCluster,
 	}
 
-	req.Spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(opts.secret)
-
+	if flags.Changed("secret") {
+		req.Spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(&opts.secret)
+	} else {
+		req.Spec.AcceptancePolicy.Policies = opts.autoAccept.Policies(nil)
+	}
 	nodeID, err := client.SwarmInit(ctx, req)
 	if err != nil {
 		return err
diff --git a/api/client/swarm/join.go b/api/client/swarm/join.go
index 346445f..1bb9ae0 100644
--- a/api/client/swarm/join.go
+++ b/api/client/swarm/join.go
@@ -25,7 +25,7 @@
 
 	cmd := &cobra.Command{
 		Use:   "join [OPTIONS] HOST:PORT",
-		Short: "Join a Swarm as a node and/or manager.",
+		Short: "Join a Swarm as a node and/or manager",
 		Args:  cli.ExactArgs(1),
 		RunE: func(cmd *cobra.Command, args []string) error {
 			opts.remote = args[0]
diff --git a/api/client/swarm/leave.go b/api/client/swarm/leave.go
index e3f8fbf..50f05c7 100644
--- a/api/client/swarm/leave.go
+++ b/api/client/swarm/leave.go
@@ -19,7 +19,7 @@
 
 	cmd := &cobra.Command{
 		Use:   "leave",
-		Short: "Leave a Swarm.",
+		Short: "Leave a Swarm",
 		Args:  cli.NoArgs,
 		RunE: func(cmd *cobra.Command, args []string) error {
 			return runLeave(dockerCli, opts)
diff --git a/api/client/swarm/opts.go b/api/client/swarm/opts.go
index fa543b6..1512910 100644
--- a/api/client/swarm/opts.go
+++ b/api/client/swarm/opts.go
@@ -35,12 +35,12 @@
 // Set the value for this flag
 func (a *NodeAddrOption) Set(value string) error {
 	if !strings.Contains(value, ":") {
-		return fmt.Errorf("Invalud url, a host and port are required")
+		return fmt.Errorf("Invalid url, a host and port are required")
 	}
 
 	parts := strings.Split(value, ":")
 	if len(parts) != 2 {
-		return fmt.Errorf("Invalud url, too many colons")
+		return fmt.Errorf("Invalid url, too many colons")
 	}
 
 	a.addr = value
@@ -102,7 +102,7 @@
 }
 
 // Policies returns a representation of this option for the api
-func (o *AutoAcceptOption) Policies(secret string) []swarm.Policy {
+func (o *AutoAcceptOption) Policies(secret *string) []swarm.Policy {
 	policies := []swarm.Policy{}
 	for _, p := range defaultPolicies {
 		if len(o.values) != 0 {
diff --git a/api/client/swarm/update.go b/api/client/swarm/update.go
index 0a851a6..a77112d 100644
--- a/api/client/swarm/update.go
+++ b/api/client/swarm/update.go
@@ -18,6 +18,7 @@
 	secret              string
 	taskHistoryLimit    int64
 	dispatcherHeartbeat time.Duration
+	nodeCertExpiry      time.Duration
 }
 
 func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command {
@@ -26,7 +27,7 @@
 
 	cmd := &cobra.Command{
 		Use:   "update",
-		Short: "update the Swarm.",
+		Short: "Update the Swarm",
 		Args:  cli.NoArgs,
 		RunE: func(cmd *cobra.Command, args []string) error {
 			return runUpdate(dockerCli, flags, opts)
@@ -38,6 +39,7 @@
 	flags.StringVar(&opts.secret, "secret", "", "Set secret value needed to accept nodes into cluster")
 	flags.Int64Var(&opts.taskHistoryLimit, "task-history-limit", 10, "Task history retention limit")
 	flags.DurationVar(&opts.dispatcherHeartbeat, "dispatcher-heartbeat", time.Duration(5*time.Second), "Dispatcher heartbeat period")
+	flags.DurationVar(&opts.nodeCertExpiry, "cert-expiry", time.Duration(90*24*time.Hour), "Validity period for node certificates")
 	return cmd
 }
 
@@ -54,6 +56,7 @@
 	if err != nil {
 		return err
 	}
+
 	err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec)
 	if err != nil {
 		return err
@@ -68,18 +71,17 @@
 
 	if flags.Changed("auto-accept") {
 		value := flags.Lookup("auto-accept").Value.(*AutoAcceptOption)
-		if len(spec.AcceptancePolicy.Policies) > 0 {
-			spec.AcceptancePolicy.Policies = value.Policies(spec.AcceptancePolicy.Policies[0].Secret)
-		} else {
-			spec.AcceptancePolicy.Policies = value.Policies("")
-		}
+		spec.AcceptancePolicy.Policies = value.Policies(nil)
 	}
 
+	var psecret *string
 	if flags.Changed("secret") {
 		secret, _ := flags.GetString("secret")
-		for _, policy := range spec.AcceptancePolicy.Policies {
-			policy.Secret = secret
-		}
+		psecret = &secret
+	}
+
+	for i := range spec.AcceptancePolicy.Policies {
+		spec.AcceptancePolicy.Policies[i].Secret = psecret
 	}
 
 	if flags.Changed("task-history-limit") {
@@ -92,5 +94,11 @@
 		}
 	}
 
+	if flags.Changed("cert-expiry") {
+		if v, err := flags.GetDuration("cert-expiry"); err == nil {
+			spec.CAConfig.NodeCertExpiry = v
+		}
+	}
+
 	return nil
 }
diff --git a/cli/error.go b/cli/error.go
index 902d1b6..e421c7f 100644
--- a/cli/error.go
+++ b/cli/error.go
@@ -1,21 +1,20 @@
 package cli
 
-import "bytes"
+import "strings"
 
 // Errors is a list of errors.
 // Useful in a loop if you don't want to return the error right away and you want to display after the loop,
 // all the errors that happened during the loop.
 type Errors []error
 
-func (errs Errors) Error() string {
-	if len(errs) < 1 {
+func (errList Errors) Error() string {
+	if len(errList) < 1 {
 		return ""
 	}
-	var buf bytes.Buffer
-	buf.WriteString(errs[0].Error())
-	for _, err := range errs[1:] {
-		buf.WriteString(", ")
-		buf.WriteString(err.Error())
+
+	out := make([]string, len(errList))
+	for i := range errList {
+		out[i] = errList[i].Error()
 	}
-	return buf.String()
+	return strings.Join(out, ", ")
 }
diff --git a/cmd/dockerd/daemon_plugin_support.go b/cmd/dockerd/daemon_plugin_support.go
index 56a4f85..cbbfe67 100644
--- a/cmd/dockerd/daemon_plugin_support.go
+++ b/cmd/dockerd/daemon_plugin_support.go
@@ -10,5 +10,5 @@
 )
 
 func pluginInit(config *daemon.Config, remote libcontainerd.Remote, rs registry.Service) error {
-	return plugin.Init(config.Root, config.ExecRoot, remote, rs)
+	return plugin.Init(config.Root, config.ExecRoot, remote, rs, config.LiveRestore)
 }
diff --git a/container/container.go b/container/container.go
index ac1ea19..d342269 100644
--- a/container/container.go
+++ b/container/container.go
@@ -823,7 +823,7 @@
 			})
 		}
 
-		createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs))
+		createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()]))
 	}
 
 	if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
diff --git a/container/container_windows.go b/container/container_windows.go
index c860df5..38560bb 100644
--- a/container/container_windows.go
+++ b/container/container_windows.go
@@ -54,7 +54,8 @@
 
 // TmpfsMounts returns the list of tmpfs mounts
 func (container *Container) TmpfsMounts() []Mount {
-	return nil
+	var mounts []Mount
+	return mounts
 }
 
 // UpdateContainer updates configuration of a container
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index abde5ce..1d9ee41 100644
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -1639,9 +1639,15 @@
 }
 
 _docker_swarm_update() {
+	case "$prev" in
+		--auto-accept|--cert-expiry|--dispatcher-heartbeat|--secret|--task-history-limit)
+			return
+			;;
+	esac
+
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--auto-accept --dispatcher-heartbeat --help --secret --task-history-limit" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--auto-accept --cert-expiry --dispatcher-heartbeat --help --secret --task-history-limit" -- "$cur" ) )
 			;;
 	esac
 }
diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker
index 69e7dbd..81c628e 100644
--- a/contrib/completion/zsh/_docker
+++ b/contrib/completion/zsh/_docker
@@ -1,4 +1,4 @@
-#compdef docker
+#compdef docker dockerd
 #
 # zsh completion for docker (http://docker.com)
 #
@@ -1410,6 +1410,13 @@
     return ret
 }
 
+_dockerd() {
+    integer ret=1
+    words[1]='daemon'
+    __docker_subcommand && ret=0
+    return ret
+}
+
 _docker "$@"
 
 # Local Variables:
diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service
index 9b0ce63..92d7246 100644
--- a/contrib/init/systemd/docker.service
+++ b/contrib/init/systemd/docker.service
@@ -20,6 +20,8 @@
 TimeoutStartSec=0
 # set delegate yes so that systemd does not reset the cgroups of docker containers
 Delegate=yes
+# kill only the docker process, not all processes in the cgroup
+KillMode=process
 
 [Install]
 WantedBy=multi-user.target
diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go
index 6f3143b..8c70526 100644
--- a/daemon/cluster/cluster.go
+++ b/daemon/cluster/cluster.go
@@ -28,25 +28,28 @@
 
 const swarmDirName = "swarm"
 const controlSocket = "control.sock"
-const swarmConnectTimeout = 10 * time.Second
+const swarmConnectTimeout = 20 * time.Second
 const stateFile = "docker-state.json"
 
 const (
 	initialReconnectDelay = 100 * time.Millisecond
-	maxReconnectDelay     = 10 * time.Second
+	maxReconnectDelay     = 30 * time.Second
 )
 
 // ErrNoManager is returned then a manager-only function is called on non-manager
-var ErrNoManager = fmt.Errorf("this node is not participating as a Swarm manager")
+var ErrNoManager = fmt.Errorf("This node is not participating as a Swarm manager")
 
 // ErrNoSwarm is returned on leaving a cluster that was never initialized
-var ErrNoSwarm = fmt.Errorf("this node is not part of Swarm")
+var ErrNoSwarm = fmt.Errorf("This node is not part of Swarm")
 
 // ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated
-var ErrSwarmExists = fmt.Errorf("this node is already part of a Swarm")
+var ErrSwarmExists = fmt.Errorf("This node is already part of a Swarm cluster. Use \"docker swarm leave\" to leave this cluster and join another one.")
+
+// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet.
+var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.")
 
 // ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached.
-var ErrSwarmJoinTimeoutReached = fmt.Errorf("timeout reached before node was joined")
+var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. Attempt to join the cluster will continue in the background. Use \"docker info\" command to see the current Swarm status of your node.")
 
 type state struct {
 	ListenAddr string
@@ -111,7 +114,7 @@
 	select {
 	case <-time.After(swarmConnectTimeout):
 		logrus.Errorf("swarm component could not be started before timeout was reached")
-	case <-n.Ready(context.Background()):
+	case <-n.Ready():
 	case <-ctx.Done():
 	}
 	if ctx.Err() != nil {
@@ -213,7 +216,7 @@
 
 	go func() {
 		select {
-		case <-node.Ready(context.Background()):
+		case <-node.Ready():
 			c.Lock()
 			c.reconnectDelay = initialReconnectDelay
 			c.Unlock()
@@ -249,13 +252,14 @@
 // Init initializes new cluster from user provided request.
 func (c *Cluster) Init(req types.InitRequest) (string, error) {
 	c.Lock()
-	if c.node != nil {
+	if node := c.node; node != nil {
 		c.Unlock()
 		if !req.ForceNewCluster {
-			return "", ErrSwarmExists
+			return "", errSwarmExists(node)
 		}
 		ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
 		defer cancel()
+		c.cancelReconnect()
 		if err := c.node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
 			return "", err
 		}
@@ -273,7 +277,7 @@
 	c.Unlock()
 
 	select {
-	case <-n.Ready(context.Background()):
+	case <-n.Ready():
 		if err := initAcceptancePolicy(n, req.Spec.AcceptancePolicy); err != nil {
 			return "", err
 		}
@@ -297,9 +301,9 @@
 // Join makes current Cluster part of an existing swarm cluster.
 func (c *Cluster) Join(req types.JoinRequest) error {
 	c.Lock()
-	if c.node != nil {
+	if node := c.node; node != nil {
 		c.Unlock()
-		return ErrSwarmExists
+		return errSwarmExists(node)
 	}
 	// todo: check current state existing
 	if len(req.RemoteAddrs) == 0 {
@@ -312,23 +316,29 @@
 	}
 	c.Unlock()
 
-	select {
-	case <-time.After(swarmConnectTimeout):
-		go c.reconnectOnFailure(ctx)
-		if nodeid := n.NodeID(); nodeid != "" {
-			return fmt.Errorf("Timeout reached before node was joined. Your cluster settings may be preventing this node from automatically joining. To accept this node into cluster run `docker node accept %v` in an existing cluster manager", nodeid)
+	certificateRequested := n.CertificateRequested()
+	for {
+		select {
+		case <-certificateRequested:
+			if n.NodeMembership() == swarmapi.NodeMembershipPending {
+				return fmt.Errorf("Your node is in the process of joining the cluster but needs to be accepted by existing cluster member.\nTo accept this node into cluster run \"docker node accept %v\" in an existing cluster manager. Use \"docker info\" command to see the current Swarm status of your node.", n.NodeID())
+			}
+			certificateRequested = nil
+		case <-time.After(swarmConnectTimeout):
+			// attempt to connect will continue in background, also reconnecting
+			go c.reconnectOnFailure(ctx)
+			return ErrSwarmJoinTimeoutReached
+		case <-n.Ready():
+			go c.reconnectOnFailure(ctx)
+			return nil
+		case <-ctx.Done():
+			c.RLock()
+			defer c.RUnlock()
+			if c.err != nil {
+				return c.err
+			}
+			return ctx.Err()
 		}
-		return ErrSwarmJoinTimeoutReached
-	case <-n.Ready(context.Background()):
-		go c.reconnectOnFailure(ctx)
-		return nil
-	case <-ctx.Done():
-		c.RLock()
-		defer c.RUnlock()
-		if c.err != nil {
-			return c.err
-		}
-		return ctx.Err()
 	}
 }
 
@@ -379,10 +389,11 @@
 	if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") {
 		return err
 	}
-	nodeID := node.NodeID()
-	for _, id := range c.config.Backend.ListContainersForNode(nodeID) {
-		if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
-			logrus.Errorf("error removing %v: %v", id, err)
+	if nodeID := node.NodeID(); nodeID != "" {
+		for _, id := range c.config.Backend.ListContainersForNode(nodeID) {
+			if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil {
+				logrus.Errorf("error removing %v: %v", id, err)
+			}
 		}
 	}
 	c.Lock()
@@ -444,12 +455,12 @@
 		return ErrNoManager
 	}
 
-	swarmSpec, err := convert.SwarmSpecToGRPC(spec)
+	swarm, err := getSwarm(c.getRequestContext(), c.client)
 	if err != nil {
 		return err
 	}
 
-	swarm, err := getSwarm(c.getRequestContext(), c.client)
+	swarmSpec, err := convert.SwarmSpecToGRPCandMerge(spec, &swarm.Spec)
 	if err != nil {
 		return err
 	}
@@ -918,7 +929,7 @@
 		if err != nil {
 			return err
 		}
-		s.Networks[i] = types.NetworkAttachmentConfig{Target: apiNetwork.ID}
+		s.Networks[i].Target = apiNetwork.ID
 	}
 	return nil
 }
@@ -1004,6 +1015,13 @@
 	return
 }
 
+func errSwarmExists(node *swarmagent.Node) error {
+	if node.NodeMembership() != swarmapi.NodeMembershipAccepted {
+		return ErrPendingSwarmExists
+	}
+	return ErrSwarmExists
+}
+
 func initAcceptancePolicy(node *swarmagent.Node, acceptancePolicy types.AcceptancePolicy) error {
 	ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
 	for conn := range node.ListenControlSocket(ctx) {
@@ -1030,7 +1048,7 @@
 			}
 			spec := &cluster.Spec
 
-			if err := convert.SwarmSpecUpdateAcceptancePolicy(spec, acceptancePolicy); err != nil {
+			if err := convert.SwarmSpecUpdateAcceptancePolicy(spec, acceptancePolicy, nil); err != nil {
 				return fmt.Errorf("error updating cluster settings: %v", err)
 			}
 			_, err := client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{
diff --git a/daemon/cluster/convert/network.go b/daemon/cluster/convert/network.go
index 53b9524..6bb9a81 100644
--- a/daemon/cluster/convert/network.go
+++ b/daemon/cluster/convert/network.go
@@ -148,17 +148,22 @@
 		}
 	}
 
-	return basictypes.NetworkResource{
+	nr := basictypes.NetworkResource{
 		ID:         n.ID,
 		Name:       n.Spec.Annotations.Name,
 		Scope:      "swarm",
-		Driver:     n.DriverState.Name,
 		EnableIPv6: spec.Ipv6Enabled,
 		IPAM:       ipam,
 		Internal:   spec.Internal,
-		Options:    n.DriverState.Options,
 		Labels:     n.Spec.Annotations.Labels,
 	}
+
+	if n.DriverState != nil {
+		nr.Driver = n.DriverState.Name
+		nr.Options = n.DriverState.Options
+	}
+
+	return nr
 }
 
 // BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec.
diff --git a/daemon/cluster/convert/swarm.go b/daemon/cluster/convert/swarm.go
index cb9d7d0..56cd0ed 100644
--- a/daemon/cluster/convert/swarm.go
+++ b/daemon/cluster/convert/swarm.go
@@ -49,7 +49,8 @@
 			Autoaccept: policy.Autoaccept,
 		}
 		if policy.Secret != nil {
-			p.Secret = string(policy.Secret.Data)
+			secret := string(policy.Secret.Data)
+			p.Secret = &secret
 		}
 		swarm.Spec.AcceptancePolicy.Policies = append(swarm.Spec.AcceptancePolicy.Policies, p)
 	}
@@ -57,8 +58,8 @@
 	return swarm
 }
 
-// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec.
-func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) {
+// SwarmSpecToGRPCandMerge converts a Spec to a grpc ClusterSpec and merge AcceptancePolicy from an existing grpc ClusterSpec if provided.
+func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) {
 	spec := swarmapi.ClusterSpec{
 		Annotations: swarmapi.Annotations{
 			Name:   s.Name,
@@ -82,15 +83,18 @@
 		},
 	}
 
-	if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy); err != nil {
+	if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil {
 		return swarmapi.ClusterSpec{}, err
 	}
+
 	return spec, nil
 }
 
 // SwarmSpecUpdateAcceptancePolicy updates a grpc ClusterSpec using AcceptancePolicy.
-func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy) error {
+func SwarmSpecUpdateAcceptancePolicy(spec *swarmapi.ClusterSpec, acceptancePolicy types.AcceptancePolicy, oldSpec *swarmapi.ClusterSpec) error {
 	spec.AcceptancePolicy.Policies = nil
+	hashs := make(map[string][]byte)
+
 	for _, p := range acceptancePolicy.Policies {
 		role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(p.Role))]
 		if !ok {
@@ -102,11 +106,24 @@
 			Autoaccept: p.Autoaccept,
 		}
 
-		if p.Secret != "" {
-			hashPwd, _ := bcrypt.GenerateFromPassword([]byte(p.Secret), 0)
+		if p.Secret != nil {
+			if *p.Secret == "" { // if provided secret is empty, it means erase previous secret.
+				policy.Secret = nil
+			} else { // if provided secret is not empty, we generate a new one.
+				hashPwd, ok := hashs[*p.Secret]
+				if !ok {
+					hashPwd, _ = bcrypt.GenerateFromPassword([]byte(*p.Secret), 0)
+					hashs[*p.Secret] = hashPwd
+				}
+				policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
+					Data: hashPwd,
+					Alg:  "bcrypt",
+				}
+			}
+		} else if oldSecret := getOldSecret(oldSpec, policy.Role); oldSecret != nil { // else use the old one.
 			policy.Secret = &swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret{
-				Data: hashPwd,
-				Alg:  "bcrypt",
+				Data: oldSecret.Data,
+				Alg:  oldSecret.Alg,
 			}
 		}
 
@@ -114,3 +131,15 @@
 	}
 	return nil
 }
+
+func getOldSecret(oldSpec *swarmapi.ClusterSpec, role swarmapi.NodeRole) *swarmapi.AcceptancePolicy_RoleAdmissionPolicy_HashedSecret {
+	if oldSpec == nil {
+		return nil
+	}
+	for _, p := range oldSpec.AcceptancePolicy.Policies {
+		if p.Role == role {
+			return p.Secret
+		}
+	}
+	return nil
+}
diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go
index f24d91b..c46bfab 100644
--- a/daemon/cluster/executor/container/adapter.go
+++ b/daemon/cluster/executor/container/adapter.go
@@ -39,7 +39,7 @@
 
 func (c *containerAdapter) pullImage(ctx context.Context) error {
 	// if the image needs to be pulled, the auth config will be retrieved and updated
-	encodedAuthConfig := c.container.task.ServiceAnnotations.Labels[fmt.Sprintf("%v.registryauth", systemLabelPrefix)]
+	encodedAuthConfig := c.container.spec().RegistryAuth
 
 	authConfig := &types.AuthConfig{}
 	if encodedAuthConfig != "" {
@@ -126,7 +126,6 @@
 
 	if nc != nil {
 		for n, ep := range nc.EndpointsConfig {
-			logrus.Errorf("CONNECT %s : %v", n, ep.IPAMConfig.IPv4Address)
 			if err := backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil {
 				return err
 			}
diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go
index 1326bf1..fcd0cba 100644
--- a/daemon/cluster/executor/container/container.go
+++ b/daemon/cluster/executor/container/container.go
@@ -348,6 +348,7 @@
 	log.Printf("Creating service config in agent for t = %+v", c.task)
 	svcCfg := &clustertypes.ServiceConfig{
 		Name:             c.task.ServiceAnnotations.Name,
+		Aliases:          make(map[string][]string),
 		ID:               c.task.ServiceID,
 		VirtualAddresses: make(map[string]*clustertypes.VirtualAddress),
 	}
@@ -357,6 +358,9 @@
 			// We support only IPv4 virtual IP for now.
 			IPv4: c.virtualIP(na.Network.ID),
 		}
+		if len(na.Aliases) > 0 {
+			svcCfg.Aliases[na.Network.ID] = na.Aliases
+		}
 	}
 
 	if c.task.Endpoint != nil {
diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go
index 7cb1113..efdd12a 100644
--- a/daemon/cluster/executor/container/controller.go
+++ b/daemon/cluster/executor/container/controller.go
@@ -2,13 +2,13 @@
 
 import (
 	"fmt"
-	"strings"
 
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
 	"github.com/docker/engine-api/types"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
 
@@ -84,31 +84,32 @@
 		return err
 	}
 
-	for {
-		if err := r.checkClosed(); err != nil {
-			return err
-		}
-		if err := r.adapter.create(ctx, r.backend); err != nil {
-			if isContainerCreateNameConflict(err) {
-				if _, err := r.adapter.inspect(ctx); err != nil {
-					return err
-				}
+	if err := r.adapter.pullImage(ctx); err != nil {
+		// NOTE(stevvooe): We always try to pull the image to make sure we have
+		// the most up to date version. This will return an error, but we only
+		// log it. If the image truly doesn't exist, the create below will
+		// error out.
+		//
+		// This gives us some nice behavior where we use up to date versions of
+		// mutable tags, but will still run if the old image is available but a
+		// registry is down.
+		//
+		// If you don't want this behavior, lock down your image to an
+		// immutable tag or digest.
+		log.G(ctx).WithError(err).Error("pulling image failed")
+	}
 
-				// container is already created. success!
-				return exec.ErrTaskPrepared
-			}
-
-			if !strings.Contains(err.Error(), "No such image") { // todo: better error detection
-				return err
-			}
-			if err := r.adapter.pullImage(ctx); err != nil {
+	if err := r.adapter.create(ctx, r.backend); err != nil {
+		if isContainerCreateNameConflict(err) {
+			if _, err := r.adapter.inspect(ctx); err != nil {
 				return err
 			}
 
-			continue // retry to create the container
+			// container is already created. success!
+			return exec.ErrTaskPrepared
 		}
 
-		break
+		return err
 	}
 
 	return nil
@@ -135,7 +136,7 @@
 	}
 
 	if err := r.adapter.start(ctx); err != nil {
-		return err
+		return errors.Wrap(err, "starting container failed")
 	}
 
 	return nil
diff --git a/daemon/cluster/provider/network.go b/daemon/cluster/provider/network.go
index d959c15..a383343 100644
--- a/daemon/cluster/provider/network.go
+++ b/daemon/cluster/provider/network.go
@@ -31,6 +31,7 @@
 type ServiceConfig struct {
 	ID               string
 	Name             string
+	Aliases          map[string][]string
 	VirtualAddresses map[string]*VirtualAddress
 	ExposedPorts     []*PortConfig
 }
diff --git a/daemon/daemon.go b/daemon/daemon.go
index 66b088a..7ec71a1 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -179,7 +179,7 @@
 					logrus.Errorf("Failed to restore with containerd: %q", err)
 					return
 				}
-				if !c.HostConfig.NetworkMode.IsContainer() {
+				if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
 					options, err := daemon.buildSandboxOptions(c)
 					if err != nil {
 						logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go
index af0a536..b48bcb9 100644
--- a/daemon/graphdriver/overlay2/overlay.go
+++ b/daemon/graphdriver/overlay2/overlay.go
@@ -114,19 +114,10 @@
 		backingFs = fsName
 	}
 
-	// check if they are running over btrfs, aufs, zfs or overlay
+	// check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
 	switch fsMagic {
-	case graphdriver.FsMagicBtrfs:
-		logrus.Error("'overlay' is not supported over btrfs.")
-		return nil, graphdriver.ErrIncompatibleFS
-	case graphdriver.FsMagicAufs:
-		logrus.Error("'overlay' is not supported over aufs.")
-		return nil, graphdriver.ErrIncompatibleFS
-	case graphdriver.FsMagicZfs:
-		logrus.Error("'overlay' is not supported over zfs.")
-		return nil, graphdriver.ErrIncompatibleFS
-	case graphdriver.FsMagicOverlay:
-		logrus.Error("'overlay' is not supported over overlay.")
+	case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
+		logrus.Errorf("'overlay2' is not supported over %s", backingFs)
 		return nil, graphdriver.ErrIncompatibleFS
 	}
 
diff --git a/daemon/list.go b/daemon/list.go
index 5d45de6..128d9ed 100644
--- a/daemon/list.go
+++ b/daemon/list.go
@@ -465,6 +465,7 @@
 			GlobalIPv6Address:   network.GlobalIPv6Address,
 			GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen,
 			MacAddress:          network.MacAddress,
+			NetworkID:           network.NetworkID,
 		}
 		if network.IPAMConfig != nil {
 			networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{
diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go
index afec89a..9492c17 100644
--- a/daemon/oci_linux.go
+++ b/daemon/oci_linux.go
@@ -480,9 +480,10 @@
 		}
 
 		if m.Source == "tmpfs" {
+			data := c.HostConfig.Tmpfs[m.Destination]
 			options := []string{"noexec", "nosuid", "nodev", volume.DefaultPropagationMode}
-			if m.Data != "" {
-				options = append(options, strings.Split(m.Data, ",")...)
+			if data != "" {
+				options = append(options, strings.Split(data, ",")...)
 			}
 
 			merged, err := mount.MergeTmpfsOptions(options)
diff --git a/daemon/volumes.go b/daemon/volumes.go
index fd70ce4..d178d41 100644
--- a/daemon/volumes.go
+++ b/daemon/volumes.go
@@ -129,7 +129,8 @@
 			return err
 		}
 
-		if binds[bind.Destination] {
+		_, tmpfsExists := hostConfig.Tmpfs[bind.Destination]
+		if binds[bind.Destination] || tmpfsExists {
 			return fmt.Errorf("Duplicate mount point '%s'", bind.Destination)
 		}
 
diff --git a/daemon/volumes_unix.go b/daemon/volumes_unix.go
index 5b2cd4b..ca0628c 100644
--- a/daemon/volumes_unix.go
+++ b/daemon/volumes_unix.go
@@ -16,7 +16,15 @@
 // /etc/resolv.conf, and if it is not, appends it to the array of mounts.
 func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) {
 	var mounts []container.Mount
+	// TODO: tmpfs mounts should be part of Mountpoints
+	tmpfsMounts := make(map[string]bool)
+	for _, m := range c.TmpfsMounts() {
+		tmpfsMounts[m.Destination] = true
+	}
 	for _, m := range c.MountPoints {
+		if tmpfsMounts[m.Destination] {
+			continue
+		}
 		if err := daemon.lazyInitializeVolume(c.ID, m); err != nil {
 			return nil, err
 		}
diff --git a/docs/getstarted/index.md b/docs/getstarted/index.md
new file mode 100644
index 0000000..b13b1c3
--- /dev/null
+++ b/docs/getstarted/index.md
@@ -0,0 +1,42 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/started/",
+"/windows/started/",
+"/linux/started/",
+]
+title = "Get Started with Docker"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_all"
+parent = "engine_use"
+weight="-80"
++++
+<![end-metadata]-->
+
+
+# Get Started with Docker
+
+This tutorial is a for non-technical users who are interested in learning more about Docker. By following these steps, you'll learn fundamental Docker features while working through some simple tasks. You'll learn how to:
+
+* install Docker software for your platform
+* run a software image in a container
+* browse for an image on Docker Hub
+* create your own image and run it in a container
+* create a Docker Hub account and an image repository
+* create an image of your own
+* push your image to Docker Hub for others to use
+
+The getting started was user tested to reduce the chance of users having problems. For the best chance of success, follow the steps as written the first time before exploring on your own. It takes approximately 45 minutes to complete.
+
+
+### Make sure you understand...
+
+This getting started uses Docker Engine CLI commands entered on the command line of a terminal window. You don't need to be a wizard at the command line, but you should be familiar with how to open your favorite shell or terminal, and run basic commands in that environment. It helps (but isn't required) to know how to navigate a directory tree, manipulate files, list running process, and so forth.
+
+
+Go to [the next page to install](step_one.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/last_page.md b/docs/getstarted/last_page.md
new file mode 100644
index 0000000..9b45019
--- /dev/null
+++ b/docs/getstarted/last_page.md
@@ -0,0 +1,70 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/last_page/",
+"/windows/last_page/",
+"/linux/last_page/",
+]
+title = "Learning more"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_learn_more"
+parent = "getstart_all"
+weight = 7
++++
+<![end-metadata]-->
+
+# Learning more
+
+This getting started provided very basic essentials for using Docker on Mac, Windows, and Linux. If you want to learn more with regard to end-to-end development, start with the full install instructions and feature overviews, then follow up with more advanced tutorials and user guides.
+
+Depending on your interest, the Docker documentation contains a wealth of information.  Here are some places to start:
+
+<style type="text/css">
+</style>
+<table class="tutorial">
+  <tr>
+    <th class="tg-031e">If you are looking for</th>
+    <th class="tg-031e">Where to find it</th>
+  </tr>
+  <tr>
+    <td class="tg-031e">More about Docker for Mac, features, examples, FAQs, relationship to Docker Machine and Docker Toolbox, and how this fits in the Docker ecosystem</td>
+    <td class="tg-031e">[Getting Started with Docker for Mac](/docker-for-mac/index.md)</td>
+  </tr>
+  <tr>
+    <td class="tg-031e">More about Docker for Windows, More about Docker for Windows, features, examples, FAQs, relationship to Docker Machine and Docker Toolbox, and how this fits in the Docker ecosystem</td>
+    <td class="tg-031e">[Getting Started with Docker for Windows](/docker-for-mac/index.md)</td>
+  </tr>
+  <tr>
+    <td class="tg-031e">More about Docker Toolbox</td>
+    <td class="tg-031e">[Docker Toolbox Overview](/toolbox/overview.md)</td>
+  </tr>
+  <tr>
+    <td class="tg-031e">More about Docker for Linux distributions</td>
+    <td class="tg-031e">[Install Docker Engine on Linux](/engine/installation/linux/index.md)</td>
+  </tr>
+  <tr>
+    <td class="tg-031e">More advanced tutorials on running containers, building your own images, networking containers, managing data for containers, and storing images on Docker Hub</td>
+    <td class="tg-031e"> [Learn by example](/engine/tutorials/index.md)</a></td>
+  </tr>
+  <tr>
+    <td class="tg-031e">Information about the Docker product line</td>
+    <td class="tg-031e"><a href="http://www.docker.com/products/">The product explainer is a good place to start.</a></td>
+  </tr>
+
+  <tr>
+    <td class="tg-031e">How to set up an automated build on Docker Hub</td>
+    <td class="tg-031e"><a href="https://docs.docker.com/docker-hub/">Docker Hub documentation</a></td>
+  </tr>
+  <tr>
+    <td class="tg-031e">How to run a multi-container application with Compose</td>
+    <td class="tg-031e"> [Docker Compose documentation](/compose/overview.md)
+    </td>
+  </tr>
+</table>
+
+
+
+
+&nbsp;
diff --git a/docs/getstarted/linux_install_help.md b/docs/getstarted/linux_install_help.md
new file mode 100644
index 0000000..f9e73d0
--- /dev/null
+++ b/docs/getstarted/linux_install_help.md
@@ -0,0 +1,44 @@
+<!--[metadata]>
++++
+aliases = ["/mac/started/"]
+title = "Install Docker and run hello-world"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker, install"]
+identifier = "getstart_linux_install"
+parent = "getstart_all"
+weight="-80"
++++
+<![end-metadata]-->
+
+# Example: Install Docker on Ubuntu Linux
+
+This installation procedure for users who are unfamiliar with package
+managers, and just want to try out the Getting Started tutorial while running Docker on Linux. If you are comfortable with package managers, prefer not to use
+`curl`, or have problems installing and want to troubleshoot, please use our
+`apt` and `yum` <a href="https://docs.docker.com/engine/installation/"
+target="_blank">repositories instead for your installation</a>.
+
+1. Log into your Ubuntu installation as a user with `sudo` privileges.
+
+2. Verify that you have `curl` installed.
+
+        $ which curl
+
+    If `curl` isn't installed, install it after updating your manager:
+
+        $ sudo apt-get update
+        $ sudo apt-get install curl
+
+3. Get the latest Docker package.
+
+        $ curl -fsSL https://get.docker.com/ | sh
+
+    The system prompts you for your `sudo` password. Then, it downloads and
+    installs Docker and its dependencies.
+
+    >**Note**: If your company is behind a filtering proxy, you may find that the
+    >`apt-key`
+    >command fails for the Docker repo during installation. To work around this,
+    >add the key directly using the following:
+    >
+    >       $ curl -fsSL https://get.docker.com/gpg | sudo apt-key add -
diff --git a/docs/getstarted/step_five.md b/docs/getstarted/step_five.md
new file mode 100644
index 0000000..1060fd3
--- /dev/null
+++ b/docs/getstarted/step_five.md
@@ -0,0 +1,78 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/step_five/",
+"/windows/step_five/",
+"/linux/step_five/",
+]
+title = "Create a Docker Hub account & repository"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_docker_hub"
+parent = "getstart_all"
+weight = 5
++++
+<![end-metadata]-->
+
+# Create a Docker Hub account & repository
+
+You've built something really cool, you should share it. In this next section,
+you'll do just that. You'll need a Docker Hub account. Then, you'll push your
+image up to it so other people with Docker Engine can run it.
+
+
+## Step 1: Sign up for an account
+
+1. Use your browser to navigate to <a href="https://hub.docker.com/?utm_source=getting_started_guide&utm_medium=embedded_MacOSX&utm_campaign=create_docker_hub_account" target="_blank">the Docker Hub signup page</a>.
+
+	Your browser displays the page.
+
+	![Docker Hub signup](tutimg/hub_signup.png)
+
+2. Fill out the form on the signup page.
+
+	Docker Hub is free. Docker does need a name, password, and email address.
+
+3. Press **Signup**.
+
+	The browser displays the welcome to Docker Hub page.
+
+## Step 2: Verify your email and add a repository
+
+Before you can share anything on the hub, you need to verify your email address.
+
+1. Open your email inbox.
+
+2. Look for the email titled `Please confirm email for your Docker Hub account`.
+
+	  If you don't see the email, check your Spam folder or wait a moment for the email to arrive.
+
+2. Open the email and click the **Confirm Your Email** button.
+
+	 The browser opens Docker Hub to your profile page.
+
+4. Choose **Create Repository**.
+
+	The browser opens the **Create Repository** page.
+
+5. Provide a Repository Name and Short Description.
+
+6. Make sure Visibility is set to **Public**.
+
+    When you are done, your form should look similar to the following:
+
+	![Add repo](tutimg/add_repository.png)
+
+6. Press **Create** when you are done.
+
+	Docker Hub creates your new repository.
+
+## Where to go next
+
+On this page, you opened an account on Docker Hub and created a new repository.
+In the next section, you populate the repository [by tagging and pushing the
+image you created earlier](step_six.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/step_four.md b/docs/getstarted/step_four.md
new file mode 100644
index 0000000..27656b3
--- /dev/null
+++ b/docs/getstarted/step_four.md
@@ -0,0 +1,225 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/step_four/",
+"/windows/step_four/",
+"/linux/step_four/",
+]
+title = "Build your own image"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_build_image"
+parent = "getstart_all"
+weight = 4
++++
+<![end-metadata]-->
+
+# Build your own image
+
+The `whalesay` image could be improved. It would be nice if you didn't have to
+think of something to say. And you type a lot to get `whalesay` to talk.
+
+    docker run docker/whalesay cowsay boo-boo
+
+In this next section, you will improve the `whalesay` image by building a new version that "talks on its own" and requires fewer words to run.
+
+## Step 1: Write a Dockerfile
+
+In this step, you use your favorite text editor to write a short Dockerfile.  A
+Dockerfile describes the software that is "baked" into an image. It isn't just
+ingredients tho, it can tell the software what environment to use or what
+commands to run. Your recipe is going to be very short.
+
+1. Go back to your command terminal window.
+
+2. Make a new directory by typing `mkdir mydockerbuild` and pressing RETURN.
+
+        $ mkdir mydockerbuild
+
+    This directory serves as the "context" for your build. The context just means it contains all the things you need to build your image.
+
+3. Change to your new directory.
+
+        $ cd mydockerbuild
+
+    Right now the directory is empty.
+
+4. Create a Dockerfile in the directory by typing `touch Dockerfile` and pressing RETURN.
+
+        $ touch Dockerfile
+
+    The command appears to do nothing but it actually creates the Dockerfile in the current directory.  Just type `ls Dockerfile` to see it.
+
+        $ ls Dockerfile
+        Dockerfile
+
+5. Open the `Dockerfile` in a visual text editor like <a href="https://atom.io/" target="_blank">Atom</a> or <a href="https://www.sublimetext.com/" target="_blank">Sublime</a>, or a text based editor like `vi`, or `nano` (https://www.nano-editor.org/).
+
+6. Add a line to the file like this:
+
+        FROM docker/whalesay:latest
+
+      The `FROM` keyword tells Docker which image your image is based on. Whalesay is cute and has the `cowsay` program already, so we'll start there.
+
+7. Now, add the `fortunes` program to the image.
+
+        RUN apt-get -y update && apt-get install -y fortunes
+
+      The `fortunes` program has a command that prints out wise sayings for our whale to say. So, the first step is to install it. This line installs the software into the image.
+
+8. Once the image has the software it needs, you instruct the software to run
+    when the image is loaded.
+
+          CMD /usr/games/fortune -a | cowsay
+
+    This line tells the `fortune` program to pass a nifty quote to the `cowsay` program.
+
+9. Check your work, your file should look like this:
+
+        FROM docker/whalesay:latest
+        RUN apt-get -y update && apt-get install -y fortunes
+        CMD /usr/games/fortune -a | cowsay
+
+10. Save and close your Dockerfile.
+
+    At this point, you have all your software ingredients and behaviors described in a Dockerfile. You are ready to build a new image.
+
+## Step 2: Build an image from your Dockerfile
+
+1. At the command line, make sure the Dockerfile is in the current directory by typing `cat Dockerfile`
+
+        $ cat Dockerfile
+        FROM docker/whalesay:latest
+
+        RUN apt-get -y update && apt-get install -y fortunes
+
+        CMD /usr/games/fortune -a | cowsay
+
+2. Now, build your new image by typing the `docker build -t docker-whale .` command in your terminal (don't forget the . period).
+
+        $ docker build -t docker-whale .
+        Sending build context to Docker daemon 158.8 MB
+        ...snip...
+        Removing intermediate container a8e6faa88df3
+        Successfully built 7d9495d03763
+
+	  The command takes several seconds to run and reports its outcome. Before
+    you do anything with the new image, take a minute to learn about the
+    Dockerfile build process.
+
+## Step 3: Learn about the build process
+
+The `docker build -t docker-whale .` command takes the `Dockerfile` in the
+current directory, and builds an image called `docker-whale` on your local
+machine. The command takes about a minute and its output looks really long and
+complex. In this section, you learn what each message means.
+
+First Docker checks to make sure it has everything it needs to build.
+
+    Sending build context to Docker daemon 158.8 MB
+
+Then, Docker loads with the `whalesay` image.	It already has this image
+locally as you might recall from the last page. So, Docker doesn't need to
+download it.
+
+    Step 0 : FROM docker/whalesay:latest
+     ---> fb434121fc77
+
+Docker moves onto the next step which is to update the `apt-get` package
+manager. This takes a lot of lines, no need to list them all again here.
+
+    Step 1 : RUN apt-get -y update && apt-get install -y fortunes
+     ---> Running in 27d224dfa5b2
+    Ign http://archive.ubuntu.com trusty InRelease
+    Ign http://archive.ubuntu.com trusty-updates InRelease
+    Ign http://archive.ubuntu.com trusty-security InRelease
+    Hit http://archive.ubuntu.com trusty Release.gpg
+    ....snip...
+    Get:15 http://archive.ubuntu.com trusty-security/restricted amd64 Packages [14.8 kB]
+    Get:16 http://archive.ubuntu.com trusty-security/universe amd64 Packages [134 kB]
+    Reading package lists...
+    ---> eb06e47a01d2
+
+Then, Docker installs the new `fortunes` software.
+
+    Removing intermediate container e2a84b5f390f
+    Step 2 : RUN apt-get install -y fortunes
+     ---> Running in 23aa52c1897c
+    Reading package lists...
+    Building dependency tree...
+    Reading state information...
+    The following extra packages will be installed:
+      fortune-mod fortunes-min librecode0
+    Suggested packages:
+      x11-utils bsdmainutils
+    The following NEW packages will be installed:
+      fortune-mod fortunes fortunes-min librecode0
+    0 upgraded, 4 newly installed, 0 to remove and 3 not upgraded.
+    Need to get 1961 kB of archives.
+    After this operation, 4817 kB of additional disk space will be used.
+    Get:1 http://archive.ubuntu.com/ubuntu/ trusty/main librecode0 amd64 3.6-21 [771 kB]
+    ...snip......
+    Setting up fortunes (1:1.99.1-7) ...
+    Processing triggers for libc-bin (2.19-0ubuntu6.6) ...
+     ---> c81071adeeb5
+    Removing intermediate container 23aa52c1897c
+
+Finally, Docker finishes the build and reports its outcome.		
+
+    Step 3 : CMD /usr/games/fortune -a | cowsay
+     ---> Running in a8e6faa88df3
+     ---> 7d9495d03763
+    Removing intermediate container a8e6faa88df3
+    Successfully built 7d9495d03763
+
+
+## Step 4: Run your new docker-whale
+
+In this step, you verify the new images is on your computer and then you run your new image.
+
+1. Open a command line terminal.
+
+2. Type `docker images` and press RETURN.
+
+    This command, you might remember, lists the images you have locally.
+
+        $ docker images
+        REPOSITORY           TAG          IMAGE ID          CREATED             VIRTUAL SIZE
+        docker-whale         latest       7d9495d03763      4 minutes ago       273.7 MB
+        docker/whalesay      latest       fb434121fc77      4 hours ago         247 MB
+        hello-world          latest       91c95931e552      5 weeks ago         910 B
+
+3. Run your new image by typing `docker run docker-whale` and pressing RETURN.
+
+        $ docker run docker-whale
+         _________________________________________
+        / "He was a modest, good-humored boy. It  \
+        \ was Oxford that made him insufferable." /
+         -----------------------------------------
+                  \
+                   \
+                    \     
+                                  ##        .            
+                            ## ## ##       ==            
+                         ## ## ## ##      ===            
+                     /""""""""""""""""___/ ===        
+                ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~   
+                     \______ o          __/            
+                      \    \        __/             
+                        \____\______/   
+
+As you can see, you've made the whale a lot smarter. It finds its own
+things to say and the command line is a lot shorter!  You may also notice
+that Docker didn't have to download anything.  That is because the image was
+built locally and is already available.
+
+## Where to go next
+
+On this page, you learned to build an image by writing your own Dockerfile.
+You ran your image in a container. You also just used Linux from your Mac yet
+again. In the next section, you take the first step in sharing your image by
+[creating a Docker Hub account](step_five.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/step_one.md b/docs/getstarted/step_one.md
new file mode 100644
index 0000000..c61a2d1
--- /dev/null
+++ b/docs/getstarted/step_one.md
@@ -0,0 +1,140 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/step_one/",
+"/windows/step_one/",
+"/linux/step_one/",
+]
+title = "Install Docker and run hello-world"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker, install"]
+[menu.main]
+identifier = "getstart_all_install"
+parent = "getstart_all"
+weight = 1
++++
+<![end-metadata]-->
+
+# Install Docker
+
+## Step 1: Get Docker
+
+### Docker for Mac
+
+Docker for Mac is our newest offering for the Mac. It runs as a native Mac application and uses <a href="https://github.com/mist64/xhyve/" target="_blank">xhyve</a> to virutalize the Docker Engine environment and Linux kernel-specific features for the Docker daemon.
+
+<a class="button" href="https://download.docker.com/mac/beta/Docker.dmg">Get Docker for Mac</a>
+
+#### Install Prequisites
+
+- Mac must be a 2010 or newer model, with Intel's hardware support for memory management unit (MMU) virtualization; i.e., Extended Page Tables (EPT)
+
+- OS X 10.10.3 Yosemite or newer
+
+- At least 4GB of RAM
+
+- VirtualBox prior to version 4.3.30 must NOT be installed (it is incompatible with Docker for Mac). Docker for Mac will error out on install in this case. Uninstall the older version of VirtualBox and re-try the install.
+
+#### Docker Toolbox for the Mac
+
+If you have an earlier Mac that doesn't meet the Docker for Mac prerequisites, <a href="https://www.docker.com/products/docker-toolbox" target="_blank">get Docker Toolbox</a> for the Mac.
+
+See [Docker Toolbox Overview](/toolbox/overview.md) for help on installing Docker with Toolbox.
+
+### Docker for Windows
+
+Docker for Windows is our newest offering for PCs. It runs as a native Windows application and uses Hyper-V to virutalize the Docker Engine environment and Linux kernel-specific features for the Docker daemon.
+
+<a class="button" href="https://download.docker.com/win/beta/InstallDocker.msi">Get Docker for Windows</a>
+
+#### Install Prequisites
+
+* 64bit Windows 10 Pro, Enterprise and Education (1511 November update, Build 10586 or later). In the future we will support more versions of Windows 10.
+
+* The Hyper-V package must be enabled. The Docker for Windows installer will enable it for you, if needed. (This requires a reboot).
+
+#### Docker Toolbox for Windows
+
+If you have an earlier Windows system that doesn't meet the Docker for Windows prerequisites, <a href="https://www.docker.com/products/docker-toolbox" target="_blank">get Docker Toolbox</a>.
+
+See [Docker Toolbox Overview](/toolbox/overview.md) for help on installing Docker with Toolbox.
+
+### Docker for Linux
+Docker Engine runs navitvely on Linux distributions.
+
+For full instructions on getting Docker for various Linux distributions, see [Install Docker Engine](/engine/installation/index.md).
+
+## Step 2: Install Docker
+
+* For install instructions for Docker for Mac, see [Getting Started with Docker for Mac](/docker-for-mac/index.md).
+
+* For install instructions for Docker for Windows, see [Getting Started with Docker for Windows](/docker-for-windows/index.md).
+
+* For install instructions for Docker Toolbox, see [Docker Toolbox Overview](/toolbox/overview.md).
+
+* For a simple example of installing Docker on Ubuntu Linux so that you can work through this tutorial, see [Installing Docker on Ubuntu Linux (Example)](linux_install_help.md).
+
+  For full install instructions for Docker on Linux, see [Install Docker Engine](/engine/installation/index.md) and select the flavor of Linux you want to use.
+
+## Step 3: Verify your installation
+
+1. Open a command-line terminal, and run some Docker commands to verify that Docker is working as expected.
+
+  Some good commands to try are `docker version` to check that you have the latest release installed and `docker ps` to see if you have any running containers. (Probably not, since you just started.)
+
+2. Type the `docker run hello-world` command and press RETURN.
+
+    The command does some work for you, if everything runs well, the command's
+    output looks like this:
+
+        $ docker run hello-world
+        Unable to find image 'hello-world:latest' locally
+        latest: Pulling from library/hello-world
+        535020c3e8ad: Pull complete
+        af340544ed62: Pull complete
+        Digest: sha256:a68868bfe696c00866942e8f5ca39e3e31b79c1e50feaee4ce5e28df2f051d5c
+        Status: Downloaded newer image for hello-world:latest
+
+        Hello from Docker.
+        This message shows that your installation appears to be working correctly.
+
+        To generate this message, Docker took the following steps:
+        1. The Docker Engine CLI client contacted the Docker Engine daemon.
+        2. The Docker Engine daemon pulled the "hello-world" image from the Docker Hub.
+        3. The Docker Engine daemon created a new container from that image which runs the
+           executable that produces the output you are currently reading.
+        4. The Docker Engine daemon streamed that output to the Docker Engine CLI client, which sent it
+           to your terminal.
+
+        To try something more ambitious, you can run an Ubuntu container with:
+        $ docker run -it ubuntu bash
+
+        Share images, automate workflows, and more with a free Docker Hub account:
+        https://hub.docker.com
+
+        For more examples and ideas, visit:
+        https://docs.docker.com/userguide/
+
+3. Run `docker ps -a` to show all containers on the system.
+
+        $ docker ps -a
+
+        CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS                      PORTS               NAMES
+        592376ff3eb8        hello-world         "/hello"            25 seconds ago      Exited (0) 24 seconds ago                       prickly_wozniak
+
+    You should see your `hello-world` container listed in the output for the `docker ps -a` command.
+
+    The command `docker ps` shows only currently running containers. Since `hello-world` already ran and exited, it wouldn't show up with a `docker ps`.
+
+## Looking for troubleshooting help?
+
+Typically, the above steps work out-of-the-box, but some scenarios can cause problems. If your `docker run hello-world` didn't work and resulted in errors, check out [Troubleshooting](/toolbox/faqs/troubleshoot.md) for quick fixes to common problems.
+
+## Where to go next
+
+At this point, you have successfully installed the Docker software. Leave the
+Docker Quickstart Terminal window open. Now, go to the next page to [read a very
+short introduction Docker images and containers](step_two.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/step_six.md b/docs/getstarted/step_six.md
new file mode 100644
index 0000000..3e413e7
--- /dev/null
+++ b/docs/getstarted/step_six.md
@@ -0,0 +1,205 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/step_six/",
+"/windows/step_six/",
+"/linux/step_six/",
+]
+title = "Tag, push, & pull your image"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_tag_push_pull"
+parent = "getstart_all"
+weight = 6
++++
+<![end-metadata]-->
+
+
+# Tag, push, and pull your image
+
+In this section, you tag and push your `docker-whale` image to your newly
+created repository. When you are done, you test the repository by pulling your
+new image.
+
+## Step 1: Tag and push the image
+
+If you don't already have a terminal open, open one now:
+
+1. Go back to your command line terminal.
+
+2. At the prompt, type `docker images` to list the images you currently have:
+
+        $ docker images
+        REPOSITORY           TAG          IMAGE ID            CREATED             VIRTUAL SIZE
+        docker-whale         latest       7d9495d03763        38 minutes ago      273.7 MB
+        <none>               <none>       5dac217f722c        45 minutes ago      273.7 MB
+        docker/whalesay      latest       fb434121fc77        4 hours ago         247 MB
+        hello-world          latest       91c95931e552        5 weeks ago         910 B
+
+5. Find the `IMAGE ID` for your `docker-whale` image.
+
+  	 In this example, the id is `7d9495d03763`.
+
+     Notice that currently, the `REPOSITORY` shows the repo name `docker-whale`
+     but not the namespace. You need to include the `namespace` for Docker Hub to
+     associate it with your account. The `namespace` is the same as your Docker
+     Hub account name. You need to rename the image to
+     `YOUR_DOCKERHUB_NAME/docker-whale`.
+
+6. Use `IMAGE ID` and the `docker tag` command to tag your `docker-whale` image.
+
+    The command you type looks like this:
+
+    ![Docker tag command](tutimg/tagger.png)
+
+    Of course, your account name will be your own. So, you type the command with
+    your image's ID and your account name and press RETURN.
+
+		$ docker tag 7d9495d03763 maryatdocker/docker-whale:latest
+
+7. Type the `docker images` command again to see your newly tagged image.
+
+        $ docker images
+        REPOSITORY                  TAG       IMAGE ID        CREATED          VIRTUAL SIZE
+        maryatdocker/docker-whale   latest    7d9495d03763    5 minutes ago    273.7 MB
+        docker-whale                latest    7d9495d03763    2 hours ago      273.7 MB
+        <none>                      <none>    5dac217f722c    5 hours ago      273.7 MB
+        docker/whalesay             latest    fb434121fc77    5 hours ago      247 MB
+        hello-world                 latest    91c95931e552    5 weeks ago      910 B
+
+8. Use the `docker login` command to log into the Docker Hub from the command line.
+
+    The format for the login command is:
+
+        docker login --username=yourhubusername --email=youremail@company.com
+
+    When prompted, enter your password and press enter. So, for example:
+
+        $ docker login --username=maryatdocker --email=mary@docker.com
+        Password:
+        WARNING: login credentials saved in C:\Users\sven\.docker\config.json
+        Login Succeeded
+
+9. Type the `docker push` command to push your image to your new repository.
+
+		$ docker push maryatdocker/docker-whale
+			The push refers to a repository [maryatdocker/docker-whale] (len: 1)
+			7d9495d03763: Image already exists
+			c81071adeeb5: Image successfully pushed
+			eb06e47a01d2: Image successfully pushed
+			fb434121fc77: Image successfully pushed
+			5d5bd9951e26: Image successfully pushed
+			99da72cfe067: Image successfully pushed
+			1722f41ddcb5: Image successfully pushed
+			5b74edbcaa5b: Image successfully pushed
+			676c4a1897e6: Image successfully pushed
+			07f8e8c5e660: Image successfully pushed
+			37bea4ee0c81: Image successfully pushed
+			a82efea989f9: Image successfully pushed
+			e9e06b06e14c: Image successfully pushed
+			Digest: sha256:ad89e88beb7dc73bf55d456e2c600e0a39dd6c9500d7cd8d1025626c4b985011
+
+10. Return to your profile on Docker Hub to see your new image.
+
+  ![Docker tag command](tutimg/new_image.png)
+
+## Step 2: Pull your new image
+
+In this last section, you'll pull the image you just pushed to hub. Before you
+do that though, you'll need to remove the original image from your local
+machine. If you left the original image on your machine. Docker would not pull
+from the hub &mdash; why would it? The two images are identical.
+
+1. Make sure Docker is running, and open a command line terminal.
+
+2. At the prompt, type `docker images` to list the images you currently have on your local machine.
+
+		$ docker images
+		REPOSITORY                  TAG       IMAGE ID        CREATED          VIRTUAL SIZE
+		maryatdocker/docker-whale   latest    7d9495d03763    5 minutes ago    273.7 MB
+		docker-whale                latest    7d9495d03763    2 hours ago      273.7 MB
+		<none>                      <none>    5dac217f722c    5 hours ago      273.7 MB
+		docker/whalesay             latest    fb434121fc77    5 hours ago      247 MB
+		hello-world                 latest    91c95931e552    5 weeks ago      910 B
+
+    To make a good test, you need to remove the `maryatdocker/docker-whale` and
+   `docker-whale` images from your local system. Removing them forces the next
+   `docker pull` to get the image from your repository.
+
+3. Use the `docker rmi` to remove the `maryatdocker/docker-whale` and `docker-whale`
+images.
+
+	You can use an ID or the name to remove an image.
+
+		$ docker rmi -f 7d9495d03763
+		$ docker rmi -f docker-whale
+
+4. Pull and load a new image from your repository using the `docker run` command.
+
+    The command you type should include your username from Docker Hub.
+
+         docker run yourusername/docker-whale
+
+	Since the image is no longer available on your local system, Docker downloads it.
+
+		$ docker run maryatdocker/docker-whale
+		Unable to find image 'maryatdocker/docker-whale:latest' locally
+		latest: Pulling from maryatdocker/docker-whale
+		eb06e47a01d2: Pull complete
+		c81071adeeb5: Pull complete
+		7d9495d03763: Already exists
+		e9e06b06e14c: Already exists
+		a82efea989f9: Already exists
+		37bea4ee0c81: Already exists
+		07f8e8c5e660: Already exists
+		676c4a1897e6: Already exists
+		5b74edbcaa5b: Already exists
+		1722f41ddcb5: Already exists
+		99da72cfe067: Already exists
+		5d5bd9951e26: Already exists
+		fb434121fc77: Already exists
+		Digest: sha256:ad89e88beb7dc73bf55d456e2c600e0a39dd6c9500d7cd8d1025626c4b985011
+		Status: Downloaded newer image for maryatdocker/docker-whale:latest
+         ________________________________________
+        / Having wandered helplessly into a      \
+        | blinding snowstorm Sam was greatly     |
+        | relieved to see a sturdy Saint Bernard |
+        | dog bounding toward him with the       |
+        | traditional keg of brandy strapped to  |
+        | his collar.                            |
+        |                                        |
+        | "At last," cried Sam, "man's best      |
+        \ friend -- and a great big dog, too!"   /
+         ----------------------------------------
+                        \
+                         \
+                          \
+                                  ##        .
+                            ## ## ##       ==
+                         ## ## ## ##      ===
+                     /""""""""""""""""___/ ===
+                ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~
+                     \______ o          __/
+                      \    \        __/
+                        \____\______/
+
+## Where to go next
+
+You've done a lot, you've done all of the following fundamental Docker tasks.
+
+* installed Docker
+* run a software image in a container
+* located an interesting image on Docker Hub
+* run the image on your own machine
+* modified an image to create your own and run it
+* create a Docker Hub account and repository
+* pushed your image to Docker Hub for others to share
+
+<a href="https://twitter.com/intent/tweet?button_hashtag=dockerdocs&text=Just%20ran%20a%20container%20with%20an%20image%20I%20built.%20Find%20it%20on%20%23dockerhub.%20Build%20your%20own%3A%20http%3A%2F%2Fgoo.gl%2FMUi7cA" class="twitter-hashtag-button" data-size="large" data-related="docker" target="_blank">Tweet your accomplishment!</a>
+<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
+
+You've only scratched the surface of what Docker can do. Go to the next page to [learn more](last_page.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/step_three.md b/docs/getstarted/step_three.md
new file mode 100644
index 0000000..2f21641
--- /dev/null
+++ b/docs/getstarted/step_three.md
@@ -0,0 +1,143 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/step_three/",
+"/windows/step_three/",
+"/linux/step_three/",
+]
+title = "Find & run the whalesay image"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_locate"
+parent = "getstart_all"
+weight = 3
++++
+<![end-metadata]-->
+
+# Find and run the whalesay image
+
+People all over the world create Docker images. You can find these images by
+browsing the Docker Hub. In this next section, you'll search for and find the
+image you'll use in the rest of this getting started.
+
+## Step 1: Locate the whalesay image
+
+1. Open your browser and  <a href="https://hub.docker.com/?utm_source=getting_started_guide&utm_medium=embedded_MacOSX&utm_campaign=find_whalesay" target=_blank> browse to the Docker Hub</a>.
+
+    ![Browse Docker Hub](tutimg/browse_and_search.png)
+
+	The Docker Hub contains images from individuals like you and official images
+	from organizations like RedHat, IBM, Google, and a whole lot more.
+
+2. Click **Browse & Search**.
+
+    The browser opens the search page.
+
+3. Enter the word `whalesay` in the search bar.
+
+    ![Browse Docker Hub](tutimg/image_found.png)
+
+4. Click on the **docker/whalesay** image in the results.
+
+    The browser displays the repository for the **whalesay** image.
+
+    ![Browse Docker Hub](tutimg/whale_repo.png)
+
+	  Each image repository contains information about an image. It should
+    include information such as what kind of software the image contains and
+    how to use it. You may notice that the **whalesay** image is based on a
+    Linux distribution called Ubuntu. In the next step, you run the **whalesay** image on your machine.
+
+## Step 2: Run the whalesay image
+
+Make sure Docker is running. On Docker for Mac and Docker for Windows, this is indicated by the Docker whale showing in the status bar.
+
+1. Open a command-line terminal.
+
+2. Type the `docker run docker/whalesay cowsay boo` command and press RETURN.
+
+    This command runs the **whalesay** image in a container. Your terminal should look like the following:
+
+        $ docker run docker/whalesay cowsay boo
+        Unable to find image 'docker/whalesay:latest' locally
+        latest: Pulling from docker/whalesay
+        e9e06b06e14c: Pull complete
+        a82efea989f9: Pull complete
+        37bea4ee0c81: Pull complete
+        07f8e8c5e660: Pull complete
+        676c4a1897e6: Pull complete
+        5b74edbcaa5b: Pull complete
+        1722f41ddcb5: Pull complete
+        99da72cfe067: Pull complete
+        5d5bd9951e26: Pull complete
+        fb434121fc77: Already exists
+        Digest: sha256:d6ee73f978a366cf97974115abe9c4099ed59c6f75c23d03c64446bb9cd49163
+        Status: Downloaded newer image for docker/whalesay:latest
+         _____
+        < boo >
+         -----
+            \
+             \
+              \     
+                            ##        .            
+                      ## ## ##       ==            
+                   ## ## ## ##      ===            
+               /""""""""""""""""___/ ===        
+          ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~   
+               \______ o          __/            
+                \    \        __/             
+                  \____\______/   
+
+	The first time you run a software image, the `docker` command looks for it
+	on your local system. If the image isn't there, then `docker` gets it from
+	the hub.
+
+5. While still in the command line terminal, type `docker images` command and press RETURN.
+
+    The command lists all the images on your local system. You should see
+    `docker/whalesay` in the list.
+
+        $ docker images
+        REPOSITORY           TAG         IMAGE ID            CREATED            VIRTUAL SIZE
+        docker/whalesay      latest      fb434121fc77        3 hours ago        247 MB
+        hello-world          latest      91c95931e552        5 weeks ago        910 B
+
+    When you run an image in a container, Docker downloads the image to your
+    computer. This local copy of the image saves you time.  Docker only
+    downloads the image again if the image's source changes on the hub.  You
+    can, of course, delete the image yourself. You'll learn more about that
+    later. Let's leave the image there for now because we are going to use it
+    later.
+
+6. Take a moment to play with the **whalesay** container a bit.
+
+    Try running the `whalesay` image again with a word or phrase. Try a long or
+    short phrase.  Can you break the cow?
+
+        $ docker run docker/whalesay cowsay boo-boo
+         _________
+        < boo-boo >
+         ---------
+            \
+             \
+              \     
+                            ##        .            
+                      ## ## ##       ==            
+                   ## ## ## ##      ===            
+               /""""""""""""""""___/ ===        
+          ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ /  ===- ~~~   
+               \______ o          __/            
+                \    \        __/             
+                  \____\______/   
+
+## Where to go next
+
+On this page, you learned to search for images on Docker Hub. You used your
+command line to run an image. Think about it, effectively you ran a piece of
+Linux software on your Mac computer.  You learned that running an image copies
+it on your computer.  Now, you are ready to create your own Docker image.
+Go on to the next part [to build your own image](step_four.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/step_two.md b/docs/getstarted/step_two.md
new file mode 100644
index 0000000..d12c0a1
--- /dev/null
+++ b/docs/getstarted/step_two.md
@@ -0,0 +1,46 @@
+<!--[metadata]>
++++
+aliases = [
+"/mac/step_two/",
+"/windows/step_two/",
+"/linux/step_two/",
+]
+title = "Understand images & containers"
+description = "Getting started with Docker"
+keywords = ["beginner, getting started, Docker"]
+[menu.main]
+identifier = "getstart_understand"
+parent = "getstart_all"
+weight = 2
++++
+<![end-metadata]-->
+
+#  Learn about images & containers
+
+Docker Engine provides the core Docker technology that enables images and
+containers. As the last step in your installation, you ran the
+`docker run hello-world` command. The command you ran had three parts.
+
+![Container Explainer](tutimg/container_explainer.png)
+
+An *image* is a filesystem and parameters to use at runtime. It doesn't have
+state and never changes. A *container* is a running instance of an image.
+When you ran the command, Docker Engine:
+
+* checked to see if you had the `hello-world` software image
+* downloaded the image from the Docker Hub (more about the hub later)
+* loaded the image into the container and "ran" it
+
+Depending on how it was built, an image might run a simple, single command and then exit. This is what `Hello-World` did.
+
+A Docker image, though, is capable of much more. An image can start software as complex as a database, wait for you (or someone else) to add data, store the data for later use, and then wait for the next person.
+
+Who built the `hello-world` software image though? In this case, Docker did but anyone can. Docker Engine lets people (or companies) create and share software through Docker images. Using Docker Engine, you don't have to worry about whether your computer can run the software in a Docker image &mdash; a Docker container *can always run it*.
+
+## Where to go next
+
+See, that was quick wasn't it? Now, you are ready to do some really fun stuff with Docker.
+Go on to the next part [to find and run the whalesay image](step_three.md).
+
+
+&nbsp;
diff --git a/docs/getstarted/tutimg/add_repository.png b/docs/getstarted/tutimg/add_repository.png
new file mode 100644
index 0000000..91f89f0
--- /dev/null
+++ b/docs/getstarted/tutimg/add_repository.png
Binary files differ
diff --git a/docs/getstarted/tutimg/browse_and_search.png b/docs/getstarted/tutimg/browse_and_search.png
new file mode 100644
index 0000000..0ead079
--- /dev/null
+++ b/docs/getstarted/tutimg/browse_and_search.png
Binary files differ
diff --git a/docs/getstarted/tutimg/container_explainer.png b/docs/getstarted/tutimg/container_explainer.png
new file mode 100644
index 0000000..2f2d691
--- /dev/null
+++ b/docs/getstarted/tutimg/container_explainer.png
Binary files differ
diff --git a/docs/getstarted/tutimg/hub_signup.png b/docs/getstarted/tutimg/hub_signup.png
new file mode 100644
index 0000000..83795ea
--- /dev/null
+++ b/docs/getstarted/tutimg/hub_signup.png
Binary files differ
diff --git a/docs/getstarted/tutimg/image_found.png b/docs/getstarted/tutimg/image_found.png
new file mode 100644
index 0000000..3d58069
--- /dev/null
+++ b/docs/getstarted/tutimg/image_found.png
Binary files differ
diff --git a/docs/getstarted/tutimg/line_one.png b/docs/getstarted/tutimg/line_one.png
new file mode 100644
index 0000000..a39d1c9
--- /dev/null
+++ b/docs/getstarted/tutimg/line_one.png
Binary files differ
diff --git a/docs/getstarted/tutimg/new_image.png b/docs/getstarted/tutimg/new_image.png
new file mode 100644
index 0000000..c801f36
--- /dev/null
+++ b/docs/getstarted/tutimg/new_image.png
Binary files differ
diff --git a/docs/getstarted/tutimg/tagger.png b/docs/getstarted/tutimg/tagger.png
new file mode 100644
index 0000000..f4b8fc6
--- /dev/null
+++ b/docs/getstarted/tutimg/tagger.png
Binary files differ
diff --git a/docs/getstarted/tutimg/whale_repo.png b/docs/getstarted/tutimg/whale_repo.png
new file mode 100644
index 0000000..558d06e
--- /dev/null
+++ b/docs/getstarted/tutimg/whale_repo.png
Binary files differ
diff --git a/docs/installation/linux/centos.md b/docs/installation/linux/centos.md
index 1647f76..08d8c40 100644
--- a/docs/installation/linux/centos.md
+++ b/docs/installation/linux/centos.md
@@ -57,7 +57,7 @@
         $ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
         [dockerrepo]
         name=Docker Repository
-        baseurl=https://yum.dockerproject.org/repo/main/centos/$releasever/
+        baseurl=https://yum.dockerproject.org/repo/main/centos/7/
         enabled=1
         gpgcheck=1
         gpgkey=https://yum.dockerproject.org/gpg
diff --git a/docs/reference/commandline/info.md b/docs/reference/commandline/info.md
index 1303aa0..21f483c 100644
--- a/docs/reference/commandline/info.md
+++ b/docs/reference/commandline/info.md
@@ -45,19 +45,22 @@
      Paused: 1
      Stopped: 10
     Images: 52
-    Server Version: 1.11.1
+    Server Version: 1.12.0-dev
     Storage Driver: overlay
      Backing Filesystem: extfs
     Logging Driver: json-file
     Cgroup Driver: cgroupfs
     Plugins:
      Volume: local
-     Network: bridge null host
+     Network: bridge null host overlay
     Swarm: 
      NodeID: 0gac67oclbxq7
      IsManager: YES
      Managers: 2
      Nodes: 2
+    Runtimes: default
+    Default Runtime: default
+    Security Options: apparmor seccomp
     Kernel Version: 4.4.0-21-generic
     Operating System: Ubuntu 16.04 LTS
     OSType: linux
diff --git a/docs/reference/commandline/node_inspect.md b/docs/reference/commandline/node_inspect.md
index 48119c1..7bb1921 100644
--- a/docs/reference/commandline/node_inspect.md
+++ b/docs/reference/commandline/node_inspect.md
@@ -30,17 +30,17 @@
 
     $ docker node inspect swarm-manager
     [
-      {
-        "ID": "0gac67oclbxq7",
+    {
+        "ID": "e216jshn25ckzbvmwlnh5jr3g",
         "Version": {
-            "Index": 2028
+            "Index": 10
         },
-        "CreatedAt": "2016-06-06T20:49:32.720047494Z",
-        "UpdatedAt": "2016-06-07T00:23:31.207632893Z",
+        "CreatedAt": "2016-06-16T22:52:44.9910662Z",
+        "UpdatedAt": "2016-06-16T22:52:45.230878043Z",
         "Spec": {
-            "Role": "MANAGER",
-            "Membership": "ACCEPTED",
-            "Availability": "ACTIVE"
+            "Role": "manager",
+            "Membership": "accepted",
+            "Availability": "active"
         },
         "Description": {
             "Hostname": "swarm-manager",
@@ -50,38 +50,55 @@
             },
             "Resources": {
                 "NanoCPUs": 1000000000,
-                "MemoryBytes": 1044250624
+                "MemoryBytes": 1039843328
             },
             "Engine": {
                 "EngineVersion": "1.12.0",
-                "Labels": {
-                    "provider": "virtualbox"
-                }
+                "Plugins": [
+                    {
+                        "Type": "Volume",
+                        "Name": "local"
+                    },
+                    {
+                        "Type": "Network",
+                        "Name": "overlay"
+                    },
+                    {
+                        "Type": "Network",
+                        "Name": "null"
+                    },
+                    {
+                        "Type": "Network",
+                        "Name": "host"
+                    },
+                    {
+                        "Type": "Network",
+                        "Name": "bridge"
+                    },
+                    {
+                        "Type": "Network",
+                        "Name": "overlay"
+                    }
+                ]
             }
         },
         "Status": {
-            "State": "READY"
+            "State": "ready"
         },
-        "Manager": {
-            "Raft": {
-                "RaftID": 2143745093569717375,
-                "Addr": "192.168.99.118:4500",
-                "Status": {
-                    "Leader": true,
-                    "Reachability": "REACHABLE"
-                }
-            }
-        },
-        "Attachment": {},
-      }
+        "ManagerStatus": {
+            "Leader": true,
+            "Reachability": "reachable",
+            "Addr": "168.0.32.137:2377"
+        }
+    }
     ]
 
-    $ docker node inspect --format '{{ .Manager.Raft.Status.Leader }}' self
+    $ docker node inspect --format '{{ .ManagerStatus.Leader }}' self
     false
 
     $ docker node inspect --pretty self
-    ID:                     2otfhz83efcc7
-    Hostname:               ad960a848573
+    ID:                     e216jshn25ckzbvmwlnh5jr3g
+    Hostname:               swarm-manager
     Status:
      State:                 Ready
      Availability:          Active
diff --git a/docs/reference/commandline/node_ls.md b/docs/reference/commandline/node_ls.md
index ce82f6b..56e157a 100644
--- a/docs/reference/commandline/node_ls.md
+++ b/docs/reference/commandline/node_ls.md
@@ -29,10 +29,10 @@
 Example output:
 
     $ docker node ls
-    ID              NAME           STATUS  AVAILABILITY     MANAGER STATUS  LEADER
-    0gac67oclbxq    swarm-master   Ready   Active           Reachable       Yes
-    0pwvm3ve66q7    swarm-node-02  Ready   Active
-    15xwihgw71aw *  swarm-node-01  Ready   Active           Reachable
+    ID                           NAME           MEMBERSHIP  STATUS  AVAILABILITY  MANAGER STATUS  LEADER
+    1bcef6utixb0l0ca7gxuivsj0    swarm-worker2   Accepted    Ready   Active
+    38ciaotwjuritcdtn9npbnkuz    swarm-worker1   Accepted    Ready   Active
+    e216jshn25ckzbvmwlnh5jr3g *  swarm-manager1  Accepted    Ready   Active        Reachable       Yes
 
 
 ## Filtering
@@ -49,22 +49,21 @@
 
 ### name
 
-The `name` filter matches on all or part of a tasks's name.
+The `name` filter matches on all or part of a node name.
 
 The following filter matches the node with a name equal to `swarm-master` string.
 
-    $ docker node ls -f name=swarm-master
-    ID              NAME          STATUS  AVAILABILITY      MANAGER STATUS  LEADER
-    0gac67oclbxq *  swarm-master  Ready   Active            Reachable       Yes
+    $ docker node ls -f name=swarm-manager1
+    ID                           NAME            MEMBERSHIP  STATUS  AVAILABILITY  MANAGER STATUS  LEADER
+    e216jshn25ckzbvmwlnh5jr3g *  swarm-manager1  Accepted    Ready   Active        Reachable       Yes
 
 ### id
 
 The `id` filter matches all or part of a node's id.
 
-    $ docker node ls -f id=0
-    ID              NAME           STATUS  AVAILABILITY     MANAGER STATUS  LEADER
-    0gac67oclbxq *  swarm-master   Ready   Active           Reachable       Yes
-    0pwvm3ve66q7    swarm-node-02  Ready   Active
+    $ docker node ls -f id=1
+    ID                         NAME           MEMBERSHIP  STATUS  AVAILABILITY  MANAGER STATUS  LEADER
+    1bcef6utixb0l0ca7gxuivsj0  swarm-worker2  Accepted    Ready   Active
 
 
 #### label
@@ -76,8 +75,8 @@
 
 ```bash
 $ docker node ls -f "label=foo"
-ID              NAME           STATUS  AVAILABILITY     MANAGER STATUS  LEADER
-15xwihgw71aw *  swarm-node-01  Ready   Active           Reachable
+ID                         NAME           MEMBERSHIP  STATUS  AVAILABILITY  MANAGER STATUS  LEADER
+1bcef6utixb0l0ca7gxuivsj0  swarm-worker2  Accepted    Ready   Active
 ```
 
 
diff --git a/docs/reference/commandline/node_tasks.md b/docs/reference/commandline/node_tasks.md
index 5bd6832..d4c258c 100644
--- a/docs/reference/commandline/node_tasks.md
+++ b/docs/reference/commandline/node_tasks.md
@@ -26,13 +26,13 @@
 
 Example output:
 
-    $ docker node tasks swarm-master
-    ID                         NAME     SERVICE  IMAGE        DESIRED STATE  LAST STATE       NODE
-    dx2g0fe3zsdb6y6q453f8dqw2  redis.1  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    f33pcf8lwhs4c1t4kq8szwzta  redis.4  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    5v26yzixl3one3ptjyqqbd0ro  redis.5  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    adcaphlhsfr30d47lby6walg6  redis.8  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    chancjvk9tex6768uzzacslq2  redis.9  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
+    $ docker node tasks swarm-manager1
+    ID                         NAME      SERVICE  IMAGE        LAST STATE          DESIRED STATE  NODE
+    7q92v0nr1hcgts2amcjyqg3pq  redis.1   redis    redis:3.0.6  Running 5 hours     Running        swarm-manager1
+    b465edgho06e318egmgjbqo4o  redis.6   redis    redis:3.0.6  Running 29 seconds  Running        swarm-manager1
+    bg8c07zzg87di2mufeq51a2qp  redis.7   redis    redis:3.0.6  Running 5 seconds   Running        swarm-manager1
+    dkkual96p4bb3s6b10r7coxxt  redis.9   redis    redis:3.0.6  Running 5 seconds   Running        swarm-manager1
+    0tgctg8h8cech4w0k0gwrmr23  redis.10  redis    redis:3.0.6  Running 5 seconds   Running        swarm-manager1
 
 
 ## Filtering
@@ -53,22 +53,22 @@
 
 The following filter matches all tasks with a name containing the `redis` string.
 
-    $ docker node tasks -f name=redis swarm-master
-    ID                         NAME     SERVICE  IMAGE        DESIRED STATE  LAST STATE       NODE
-    dx2g0fe3zsdb6y6q453f8dqw2  redis.1  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    f33pcf8lwhs4c1t4kq8szwzta  redis.4  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    5v26yzixl3one3ptjyqqbd0ro  redis.5  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    adcaphlhsfr30d47lby6walg6  redis.8  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-    chancjvk9tex6768uzzacslq2  redis.9  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
+    $ docker node tasks -f name=redis swarm-manager1
+    ID                         NAME      SERVICE  IMAGE        LAST STATE          DESIRED STATE  NODE
+    7q92v0nr1hcgts2amcjyqg3pq  redis.1   redis    redis:3.0.6  Running 5 hours     Running        swarm-manager1
+    b465edgho06e318egmgjbqo4o  redis.6   redis    redis:3.0.6  Running 29 seconds  Running        swarm-manager1
+    bg8c07zzg87di2mufeq51a2qp  redis.7   redis    redis:3.0.6  Running 5 seconds   Running        swarm-manager1
+    dkkual96p4bb3s6b10r7coxxt  redis.9   redis    redis:3.0.6  Running 5 seconds   Running        swarm-manager1
+    0tgctg8h8cech4w0k0gwrmr23  redis.10  redis    redis:3.0.6  Running 5 seconds   Running        swarm-manager1
 
 
 ### id
 
 The `id` filter matches a task's id.
 
-    $ docker node tasks -f id=f33pcf8lwhs4c1t4kq8szwzta swarm-master
-    ID                         NAME     SERVICE  IMAGE        DESIRED STATE  LAST STATE       NODE
-    f33pcf8lwhs4c1t4kq8szwzta  redis.4  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
+    $ docker node tasks -f id=bg8c07zzg87di2mufeq51a2qp swarm-manager1
+    ID                         NAME      SERVICE  IMAGE        LAST STATE             DESIRED STATE  NODE
+    bg8c07zzg87di2mufeq51a2qp  redis.7   redis    redis:3.0.6  Running 5 seconds      Running        swarm-manager1
 
 
 #### label
@@ -80,9 +80,9 @@
 
 ```bash
 $ docker node tasks -f "label=usage"
-ID                         NAME     SERVICE  IMAGE        DESIRED STATE  LAST STATE       NODE
-dx2g0fe3zsdb6y6q453f8dqw2  redis.1  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
-f33pcf8lwhs4c1t4kq8szwzta  redis.4  redis    redis:3.0.6  RUNNING        RUNNING 2 hours  swarm-master
+ID                         NAME     SERVICE  IMAGE        LAST STATE          DESIRED STATE  NODE
+b465edgho06e318egmgjbqo4o  redis.6  redis    redis:3.0.6  Running 10 minutes  Running        swarm-manager1
+bg8c07zzg87di2mufeq51a2qp  redis.7  redis    redis:3.0.6  Running 9 minutes   Running        swarm-manager1
 ```
 
 
diff --git a/docs/reference/commandline/node_update.md b/docs/reference/commandline/node_update.md
index a48712d..8f00543 100644
--- a/docs/reference/commandline/node_update.md
+++ b/docs/reference/commandline/node_update.md
@@ -16,6 +16,12 @@
 
     Update a node
 
+    Options:
+      --availability string   Availability of the node (active/pause/drain)
+      --help                  Print usage
+      --membership string     Membership of the node (accepted/rejected)
+      --role string           Role of the node (worker/manager)
+
 
 
 ## Related information
diff --git a/docs/reference/commandline/plugin_disable.md b/docs/reference/commandline/plugin_disable.md
index 44f78af..dd5998e 100644
--- a/docs/reference/commandline/plugin_disable.md
+++ b/docs/reference/commandline/plugin_disable.md
@@ -3,9 +3,9 @@
 title = "plugin disable"
 description = "the plugin disable command description and usage"
 keywords = ["plugin, disable"]
+advisory = "experimental"
 [menu.main]
 parent = "smn_cli"
-advisory = "experimental"
 +++
 <![end-metadata]-->
 
diff --git a/docs/reference/commandline/plugin_enable.md b/docs/reference/commandline/plugin_enable.md
index 2bd577f..3238823 100644
--- a/docs/reference/commandline/plugin_enable.md
+++ b/docs/reference/commandline/plugin_enable.md
@@ -3,9 +3,9 @@
 title = "plugin enable"
 description = "the plugin enable command description and usage"
 keywords = ["plugin, enable"]
+advisory = "experimental"
 [menu.main]
 parent = "smn_cli"
-advisory = "experimental"
 +++
 <![end-metadata]-->
 
diff --git a/docs/reference/commandline/plugin_inspect.md b/docs/reference/commandline/plugin_inspect.md
index e0282ea..67d63c2 100644
--- a/docs/reference/commandline/plugin_inspect.md
+++ b/docs/reference/commandline/plugin_inspect.md
@@ -3,9 +3,9 @@
 title = "plugin inspect"
 description = "The plugin inspect command description and usage"
 keywords = ["plugin, inspect"]
+advisory = "experimental"
 [menu.main]
 parent = "smn_cli"
-advisory = "experimental"
 +++
 <![end-metadata]-->
 
diff --git a/docs/reference/commandline/plugin_install.md b/docs/reference/commandline/plugin_install.md
index 276af5a..03d0895 100644
--- a/docs/reference/commandline/plugin_install.md
+++ b/docs/reference/commandline/plugin_install.md
@@ -3,9 +3,9 @@
 title = "plugin install"
 description = "the plugin install command description and usage"
 keywords = ["plugin, install"]
+advisory = "experimental"
 [menu.main]
 parent = "smn_cli"
-advisory = "experimental"
 +++
 <![end-metadata]-->
 
diff --git a/docs/reference/commandline/plugin_ls.md b/docs/reference/commandline/plugin_ls.md
index ea36368..624807a 100644
--- a/docs/reference/commandline/plugin_ls.md
+++ b/docs/reference/commandline/plugin_ls.md
@@ -3,9 +3,9 @@
 title = "plugin ls"
 description = "The plugin ls command description and usage"
 keywords = ["plugin, list"]
+advisory = "experimental"
 [menu.main]
 parent = "smn_cli"
-advisory = "experimental"
 +++
 <![end-metadata]-->
 
diff --git a/docs/reference/commandline/plugin_rm.md b/docs/reference/commandline/plugin_rm.md
index a5dcf9e..5f73b00 100644
--- a/docs/reference/commandline/plugin_rm.md
+++ b/docs/reference/commandline/plugin_rm.md
@@ -3,9 +3,9 @@
 title = "plugin rm"
 description = "the plugin rm command description and usage"
 keywords = ["plugin, rm"]
+advisory = "experimental"
 [menu.main]
 parent = "smn_cli"
-advisory = "experimental"
 +++
 <![end-metadata]-->
 
diff --git a/docs/reference/commandline/swarm_update.md b/docs/reference/commandline/swarm_update.md
index 942a330..afbcf64 100644
--- a/docs/reference/commandline/swarm_update.md
+++ b/docs/reference/commandline/swarm_update.md
@@ -22,6 +22,7 @@
           --help                            Print usage
           --secret string                   Set secret value needed to accept nodes into cluster
           --task-history-limit int          Task history retention limit (default 10)
+          --cert-expiry duration            Validity period for node certificates (default 2160h0m0s)
 
 Updates a Swarm cluster with new parameter values. This command must target a manager node.
 
diff --git a/docs/swarm/index.md b/docs/swarm/index.md
index abfd033..ebf3168 100644
--- a/docs/swarm/index.md
+++ b/docs/swarm/index.md
@@ -3,11 +3,11 @@
 title = "Swarm overview"
 description = "Docker Swarm overview"
 keywords = ["docker, container, cluster, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="swarm_overview"
 parent="engine_swarm"
 weight="1"
-advisory = "rc"
 +++
 <![end-metadata]-->
 # Docker Swarm overview
diff --git a/docs/swarm/key-concepts.md b/docs/swarm/key-concepts.md
index 34d7ce1..814e2be 100644
--- a/docs/swarm/key-concepts.md
+++ b/docs/swarm/key-concepts.md
@@ -3,11 +3,11 @@
 title = "Swarm key concepts"
 description = "Introducing key concepts for Docker Swarm"
 keywords = ["docker, container, cluster, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="swarm-concepts"
 parent="engine_swarm"
 weight="2"
-advisory = "rc"
 +++
 <![end-metadata]-->
 # Docker Swarm key concepts
diff --git a/docs/swarm/menu.md b/docs/swarm/menu.md
index df43027..7864943 100644
--- a/docs/swarm/menu.md
+++ b/docs/swarm/menu.md
@@ -3,11 +3,11 @@
 title = "Manage a Swarm (1.12 RC)"
 description = "How to use Docker Swarm to create and manage Docker Engine clusters"
 keywords = [" docker, documentation, developer, "]
+advisory = "rc"
 [menu.main]
 identifier = "engine_swarm"
 parent = "engine_use"
 weight = 0
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/add-nodes.md b/docs/swarm/swarm-tutorial/add-nodes.md
index d14c076..e052879 100644
--- a/docs/swarm/swarm-tutorial/add-nodes.md
+++ b/docs/swarm/swarm-tutorial/add-nodes.md
@@ -3,11 +3,11 @@
 title = "Add nodes to the Swarm"
 description = "Add nodes to the Swarm"
 keywords = ["tutorial, cluster management, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="add-nodes"
 parent="swarm-tutorial"
 weight=13
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/create-swarm.md b/docs/swarm/swarm-tutorial/create-swarm.md
index 2f55e8c..6f82a4d 100644
--- a/docs/swarm/swarm-tutorial/create-swarm.md
+++ b/docs/swarm/swarm-tutorial/create-swarm.md
@@ -3,11 +3,11 @@
 title = "Create a Swarm"
 description = "Initialize the Swarm"
 keywords = ["tutorial, cluster management, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="initialize-swarm"
 parent="swarm-tutorial"
 weight=12
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/delete-service.md b/docs/swarm/swarm-tutorial/delete-service.md
index d037796..8e74ef8 100644
--- a/docs/swarm/swarm-tutorial/delete-service.md
+++ b/docs/swarm/swarm-tutorial/delete-service.md
@@ -3,11 +3,11 @@
 title = "Delete the service"
 description = "Remove the service on the Swarm"
 keywords = ["tutorial, cluster management, swarm, service"]
+advisory = "rc"
 [menu.main]
 identifier="swarm-tutorial-delete-service"
 parent="swarm-tutorial"
 weight=19
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/deploy-service.md b/docs/swarm/swarm-tutorial/deploy-service.md
index cbfc5af..f6f0e08 100644
--- a/docs/swarm/swarm-tutorial/deploy-service.md
+++ b/docs/swarm/swarm-tutorial/deploy-service.md
@@ -3,11 +3,11 @@
 title = "Deploy a service"
 description = "Deploy the application"
 keywords = ["tutorial, cluster management, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="deploy-application"
 parent="swarm-tutorial"
 weight=16
-advisory = "rc"
 +++
 <![end-metadata]-->
 
@@ -39,8 +39,8 @@
     ```
     $ docker service ls
 
-    ID            NAME        SCALE  IMAGE   COMMAND
-    2zs4helqu64f  helloworld  1      alpine  ping docker.com
+    ID            NAME        REPLICAS  IMAGE   COMMAND
+    2zs4helqu64f  helloworld  1         alpine  ping docker.com
     ```
 
 ## What's next?
diff --git a/docs/swarm/swarm-tutorial/drain-node.md b/docs/swarm/swarm-tutorial/drain-node.md
index 4299788..8b8fb4f 100644
--- a/docs/swarm/swarm-tutorial/drain-node.md
+++ b/docs/swarm/swarm-tutorial/drain-node.md
@@ -3,6 +3,7 @@
 title = "Drain a node"
 description = "Drain nodes on the Swarm"
 keywords = ["tutorial, cluster management, swarm, service, drain"]
+advisory = "rc"
 [menu.main]
 identifier="swarm-tutorial-drain-node"
 parent="swarm-tutorial"
diff --git a/docs/swarm/swarm-tutorial/index.md b/docs/swarm/swarm-tutorial/index.md
index 4d4fdb0..366a574 100644
--- a/docs/swarm/swarm-tutorial/index.md
+++ b/docs/swarm/swarm-tutorial/index.md
@@ -3,11 +3,11 @@
 title = "Set up for the tutorial"
 description = "Getting Started tutorial for Docker Swarm"
 keywords = ["tutorial, cluster management, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="tutorial-setup"
 parent="swarm-tutorial"
 weight=11
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/inspect-service.md b/docs/swarm/swarm-tutorial/inspect-service.md
index a5d4455..c0ca3f5 100644
--- a/docs/swarm/swarm-tutorial/inspect-service.md
+++ b/docs/swarm/swarm-tutorial/inspect-service.md
@@ -3,11 +3,11 @@
 title = "Inspect the service"
 description = "Inspect the application"
 keywords = ["tutorial, cluster management, swarm"]
+advisory = "rc"
 [menu.main]
 identifier="inspect-application"
 parent="swarm-tutorial"
 weight=17
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/menu.md b/docs/swarm/swarm-tutorial/menu.md
index aec86ae..dd3721e 100644
--- a/docs/swarm/swarm-tutorial/menu.md
+++ b/docs/swarm/swarm-tutorial/menu.md
@@ -3,11 +3,11 @@
 title = "Get started with Swarm"
 description = "Getting started tutorial for Docker Swarm"
 keywords = ["cluster, swarm, tutorial"]
+advisory = "rc"
 [menu.main]
 identifier="swarm-tutorial"
 parent="engine_swarm"
 weight=10
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/rolling-update.md b/docs/swarm/swarm-tutorial/rolling-update.md
index 58ddaad..a7b072a 100644
--- a/docs/swarm/swarm-tutorial/rolling-update.md
+++ b/docs/swarm/swarm-tutorial/rolling-update.md
@@ -3,11 +3,11 @@
 title = "Apply rolling updates"
 description = "Apply rolling updates to a service on the Swarm"
 keywords = ["tutorial, cluster management, swarm, service, rolling-update"]
+advisory = "rc"
 [menu.main]
 identifier="swarm-tutorial-rolling-update"
 parent="swarm-tutorial"
 weight=20
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/docs/swarm/swarm-tutorial/scale-service.md b/docs/swarm/swarm-tutorial/scale-service.md
index 29fe65c..2228e2e 100644
--- a/docs/swarm/swarm-tutorial/scale-service.md
+++ b/docs/swarm/swarm-tutorial/scale-service.md
@@ -3,11 +3,11 @@
 title = "Scale the service"
 description = "Scale the service running in the Swarm"
 keywords = ["tutorial, cluster management, swarm, scale"]
+advisory = "rc"
 [menu.main]
 identifier="swarm-tutorial-scale-service"
 parent="swarm-tutorial"
 weight=18
-advisory = "rc"
 +++
 <![end-metadata]-->
 
diff --git a/experimental/README.md b/experimental/README.md
index 67abbe1..1098dd0 100644
--- a/experimental/README.md
+++ b/experimental/README.md
@@ -49,9 +49,9 @@
 To download the latest experimental `docker` binary for Linux,
 use the following URLs:
 
-    https://experimental.docker.com/builds/Linux/i386/docker-latest
+    https://experimental.docker.com/builds/Linux/i386/docker-latest.tgz
 
-    https://experimental.docker.com/builds/Linux/x86_64/docker-latest
+    https://experimental.docker.com/builds/Linux/x86_64/docker-latest.tgz
 
 After downloading the appropriate binary, you can follow the instructions
 [here](https://docs.docker.com/installation/binaries/#get-the-docker-binary) to run the `docker` daemon.
diff --git a/experimental/docker-stacks.md b/experimental/docker-stacks.md
index 31a02b6..83ee316 100644
--- a/experimental/docker-stacks.md
+++ b/experimental/docker-stacks.md
@@ -64,7 +64,7 @@
 
     ```bash
     # docker service ls
-    ID            NAME                                     SCALE  IMAGE
+    ID            NAME                                     REPLICAS  IMAGE
     COMMAND
     29bv0vnlm903  vossibility-stack_lookupd                1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqlookupd
     4awt47624qwh  vossibility-stack_nsqd                   1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqd --data-path=/data --lookupd-tcp-address=lookupd:4160
diff --git a/hack/make/build-deb b/hack/make/build-deb
index 5b586d0..2aed66f 100644
--- a/hack/make/build-deb
+++ b/hack/make/build-deb
@@ -73,7 +73,7 @@
 		# add runc and containerd compile and install
 		cat >> "$DEST/$version/Dockerfile.build" <<-EOF
 			# Install runc
-			RUN git clone https://github.com/crosbymichael/runc.git "/go/src/github.com/opencontainers/runc" \
+			RUN git clone https://github.com/opencontainers/runc.git "/go/src/github.com/opencontainers/runc" \
 					&& cd "/go/src/github.com/opencontainers/runc" \
 					&& git checkout -q "\$RUNC_COMMIT"
 			RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/opencontainers/runc" \
diff --git a/hack/make/build-rpm b/hack/make/build-rpm
index d70cb70..3a82a38 100644
--- a/hack/make/build-rpm
+++ b/hack/make/build-rpm
@@ -100,7 +100,7 @@
 		# add runc and containerd compile and install
 		cat >> "$DEST/$version/Dockerfile.build" <<-EOF
 			# Install runc
-			RUN git clone https://github.com/crosbymichael/runc.git "/go/src/github.com/opencontainers/runc" \
+			RUN git clone https://github.com/opencontainers/runc.git "/go/src/github.com/opencontainers/runc" \
 					&& cd "/go/src/github.com/opencontainers/runc" \
 					&& git checkout -q "\$RUNC_COMMIT"
 			RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/opencontainers/runc" \
diff --git a/hack/release.sh b/hack/release.sh
index bc33f70..9ed3aee 100755
--- a/hack/release.sh
+++ b/hack/release.sh
@@ -272,10 +272,11 @@
 	# TODO create redirect from builds/*/i686 to builds/*/i386
 
 	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
-# To install, run the following command as root:
-curl -sSL -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && sudo tar zxf docker-$VERSION.tgz -C /
+# To install, run the following commands as root:
+curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
+
 # Then start docker in daemon mode:
-sudo /usr/local/bin/docker daemon
+/usr/local/bin/dockerd
 EOF
 
 	# Add redirect at /builds/info for URL-backwards-compatibility
@@ -311,8 +312,8 @@
 echo
 echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
 echo
-echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
 echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
-echo "Windows 64bit client zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
+echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
+echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
 echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
 echo
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 0b1ea03..bc0822f 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -60,12 +60,12 @@
 clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
 clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
 clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
-clone git github.com/docker/engine-api f3b5ad20d4576de14c96603db522dec530d03f62
+clone git github.com/docker/engine-api c57d0447ea1ae71f6dad83c8d8a1215a89869a0c
 clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
 clone git github.com/imdario/mergo 0.2.1
 
 #get libnetwork packages
-clone git github.com/docker/libnetwork 452dff166e0abd9455b07c835613197f078a34de
+clone git github.com/docker/libnetwork 13be89d1cf79760acae842a32ad8531567220286
 clone git github.com/docker/go-events 39718a26497694185f8fb58a7d6f31947f3dc42d
 clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -102,7 +102,7 @@
 clone git github.com/docker/go v1.5.1-1-1-gbaf439e
 clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
 
-clone git github.com/opencontainers/runc 85873d917e86676e44ccb80719fcb47a794676a1 # libcontainer
+clone git github.com/opencontainers/runc cc29e3dded8e27ba8f65738f40d251c885030a28 # libcontainer
 clone git github.com/opencontainers/specs v1.0.0-rc1 # specs
 clone git github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
 # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
@@ -139,7 +139,7 @@
 clone git github.com/docker/containerd 860f3a94940894ac0a106eff4bd1616a67407ee2
 
 # cluster
-clone git github.com/docker/swarmkit 25572005febb76c2cc5f7e37d878615e6fe330f9
+clone git github.com/docker/swarmkit 310f1119bc81f22e60b5670d9d4731bc12d7be87
 clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
 clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
 clone git github.com/cloudflare/cfssl 92f037e39eb103fb30f9151be40d9ed267fc4ae2
@@ -158,6 +158,7 @@
 clone git github.com/prometheus/procfs 454a56f35412459b5e684fd5ec0f9211b94f002a
 clone hg bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675
 clone git github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
+clone git github.com/pkg/errors 01fa4104b9c248c8945d14d9f128454d5b28d595
 
 # cli
 clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git
diff --git a/integration-cli/daemon_swarm.go b/integration-cli/daemon_swarm.go
index 0b553d3..6c18dae 100644
--- a/integration-cli/daemon_swarm.go
+++ b/integration-cli/daemon_swarm.go
@@ -5,6 +5,7 @@
 	"fmt"
 	"net/http"
 	"strings"
+	"time"
 
 	"github.com/docker/docker/pkg/integration/checker"
 	"github.com/docker/engine-api/types"
@@ -26,11 +27,16 @@
 		ListenAddr: d.listenAddr,
 	}
 	for _, role := range []swarm.NodeRole{swarm.NodeRoleManager, swarm.NodeRoleWorker} {
-		req.Spec.AcceptancePolicy.Policies = append(req.Spec.AcceptancePolicy.Policies, swarm.Policy{
+		policy := swarm.Policy{
 			Role:       role,
 			Autoaccept: autoAccept[strings.ToLower(string(role))],
-			Secret:     secret,
-		})
+		}
+
+		if secret != "" {
+			policy.Secret = &secret
+		}
+
+		req.Spec.AcceptancePolicy.Policies = append(req.Spec.AcceptancePolicy.Policies, policy)
 	}
 	status, out, err := d.SockRequest("POST", "/swarm/init", req)
 	if status != http.StatusOK {
@@ -49,13 +55,17 @@
 
 // Join joins a current daemon with existing cluster.
 func (d *SwarmDaemon) Join(remoteAddr, secret, cahash string, manager bool) error {
-	status, out, err := d.SockRequest("POST", "/swarm/join", swarm.JoinRequest{
+	req := swarm.JoinRequest{
 		ListenAddr:  d.listenAddr,
 		RemoteAddrs: []string{remoteAddr},
 		Manager:     manager,
-		Secret:      secret,
 		CACertHash:  cahash,
-	})
+	}
+
+	if secret != "" {
+		req.Secret = secret
+	}
+	status, out, err := d.SockRequest("POST", "/swarm/join", req)
 	if status != http.StatusOK {
 		return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out)
 	}
@@ -105,6 +115,7 @@
 
 type serviceConstructor func(*swarm.Service)
 type nodeConstructor func(*swarm.Node)
+type specConstructor func(*swarm.Spec)
 
 func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string {
 	var service swarm.Service
@@ -157,14 +168,22 @@
 	return &node
 }
 
-func (d *SwarmDaemon) updateNode(c *check.C, node *swarm.Node, f ...nodeConstructor) {
-	for _, fn := range f {
-		fn(node)
+func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) {
+	for i := 0; ; i++ {
+		node := d.getNode(c, id)
+		for _, fn := range f {
+			fn(node)
+		}
+		url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
+		status, out, err := d.SockRequest("POST", url, node.Spec)
+		if i < 10 && strings.Contains(string(out), "update out of sequence") {
+			time.Sleep(100 * time.Millisecond)
+			continue
+		}
+		c.Assert(err, checker.IsNil)
+		c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+		return
 	}
-	url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index)
-	status, out, err := d.SockRequest("POST", url, node.Spec)
-	c.Assert(err, checker.IsNil)
-	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
 }
 
 func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node {
@@ -176,3 +195,19 @@
 	c.Assert(json.Unmarshal(out, &nodes), checker.IsNil)
 	return nodes
 }
+
+func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) {
+	var sw swarm.Swarm
+	status, out, err := d.SockRequest("GET", "/swarm", nil)
+	c.Assert(err, checker.IsNil)
+	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+	c.Assert(json.Unmarshal(out, &sw), checker.IsNil)
+
+	for _, fn := range f {
+		fn(&sw.Spec)
+	}
+	url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index)
+	status, out, err = d.SockRequest("POST", url, sw.Spec)
+	c.Assert(err, checker.IsNil)
+	c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out)))
+}
diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go
index 9b6ba6c..9f086bd 100644
--- a/integration-cli/docker_api_swarm_test.go
+++ b/integration-cli/docker_api_swarm_test.go
@@ -82,7 +82,7 @@
 	err := d2.Join(d1.listenAddr, "", "", false)
 	c.Assert(err, checker.NotNil)
 	if secret == "" {
-		c.Assert(err.Error(), checker.Contains, "Timeout reached")
+		c.Assert(err.Error(), checker.Contains, "needs to be accepted")
 		info, err := d2.info()
 		c.Assert(err, checker.IsNil)
 		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
@@ -97,23 +97,25 @@
 		c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
 	}
 	d3 := s.AddDaemon(c, false, false)
-	go func() {
-		for i := 0; ; i++ {
-			info, err := d3.info()
-			c.Assert(err, checker.IsNil)
-			if info.NodeID != "" {
-				d1.updateNode(c, d1.getNode(c, info.NodeID), func(n *swarm.Node) {
-					n.Spec.Membership = swarm.NodeMembershipAccepted
-				})
-				return
-			}
-			if i >= 10 {
-				c.Errorf("could not find nodeID")
-			}
-			time.Sleep(300 * time.Millisecond)
+	c.Assert(d3.Join(d1.listenAddr, secret, "", false), checker.NotNil)
+	info, err := d3.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
+	c.Assert(len(info.NodeID), checker.GreaterThan, 5)
+	d1.updateNode(c, info.NodeID, func(n *swarm.Node) {
+		n.Spec.Membership = swarm.NodeMembershipAccepted
+	})
+	for i := 0; ; i++ {
+		info, err := d3.info()
+		c.Assert(err, checker.IsNil)
+		if info.LocalNodeState == swarm.LocalNodeStateActive {
+			break
 		}
-	}()
-	c.Assert(d3.Join(d1.listenAddr, secret, "", false), checker.IsNil)
+		if i > 10 {
+			c.Errorf("node did not become active")
+		}
+		time.Sleep(200 * time.Millisecond)
+	}
 }
 
 func (s *DockerSwarmSuite) TestApiSwarmSecretAcceptance(c *check.C) {
@@ -145,6 +147,74 @@
 	info, err = d2.info()
 	c.Assert(err, checker.IsNil)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+	// change secret
+	d1.updateSwarm(c, func(s *swarm.Spec) {
+		for i := range s.AcceptancePolicy.Policies {
+			p := "foobaz"
+			s.AcceptancePolicy.Policies[i].Secret = &p
+		}
+	})
+
+	err = d2.Join(d1.listenAddr, "foobar", "", false)
+	c.Assert(err, checker.NotNil)
+	c.Assert(err.Error(), checker.Contains, "secret token is necessary")
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+	c.Assert(d2.Join(d1.listenAddr, "foobaz", "", false), checker.IsNil)
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+	c.Assert(d2.Leave(false), checker.IsNil)
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+	// change policy, don't change secret
+	d1.updateSwarm(c, func(s *swarm.Spec) {
+		for i, p := range s.AcceptancePolicy.Policies {
+			if p.Role == swarm.NodeRoleManager {
+				s.AcceptancePolicy.Policies[i].Autoaccept = false
+			}
+			s.AcceptancePolicy.Policies[i].Secret = nil
+		}
+	})
+
+	err = d2.Join(d1.listenAddr, "", "", false)
+	c.Assert(err, checker.NotNil)
+	c.Assert(err.Error(), checker.Contains, "secret token is necessary")
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+	c.Assert(d2.Join(d1.listenAddr, "foobaz", "", false), checker.IsNil)
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+	c.Assert(d2.Leave(false), checker.IsNil)
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
+	// clear secret
+	d1.updateSwarm(c, func(s *swarm.Spec) {
+		for i := range s.AcceptancePolicy.Policies {
+			p := ""
+			s.AcceptancePolicy.Policies[i].Secret = &p
+		}
+	})
+
+	c.Assert(d2.Join(d1.listenAddr, "", "", false), checker.IsNil)
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
+	c.Assert(d2.Leave(false), checker.IsNil)
+	info, err = d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive)
+
 }
 
 func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) {
@@ -168,7 +238,7 @@
 	c.Assert(info.ControlAvailable, checker.Equals, false)
 	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive)
 
-	d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+	d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
 		n.Spec.Role = swarm.NodeRoleManager
 	})
 
@@ -187,7 +257,7 @@
 		time.Sleep(100 * time.Millisecond)
 	}
 
-	d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+	d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
 		n.Spec.Role = swarm.NodeRoleWorker
 	})
 
@@ -398,7 +468,7 @@
 
 	nodes := d.listNodes(c)
 
-	d.updateNode(c, d.getNode(c, nodes[0].ID), func(n *swarm.Node) {
+	d.updateNode(c, nodes[0].ID, func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityPause
 	})
 
@@ -421,14 +491,14 @@
 	waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances)
 
 	// drain d2, all containers should move to d1
-	d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+	d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityDrain
 	})
 	waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances)
 	waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0)
 
 	// set d2 back to active
-	d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+	d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityActive
 	})
 
@@ -448,7 +518,7 @@
 	d2ContainerCount := len(d2.activeContainers())
 
 	// set d2 to paused, scale service up, only d1 gets new tasks
-	d1.updateNode(c, d1.getNode(c, d2.NodeID), func(n *swarm.Node) {
+	d1.updateNode(c, d2.NodeID, func(n *swarm.Node) {
 		n.Spec.Availability = swarm.NodeAvailabilityPause
 	})
 
@@ -482,6 +552,32 @@
 	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
 }
 
+// #23629
+func (s *DockerSwarmSuite) TestApiSwarmLeaveOnPendingJoin(c *check.C) {
+	s.AddDaemon(c, true, true)
+	d2 := s.AddDaemon(c, false, false)
+
+	id, err := d2.Cmd("run", "-d", "busybox", "top")
+	c.Assert(err, checker.IsNil)
+	id = strings.TrimSpace(id)
+
+	go d2.Join("nosuchhost:1234", "", "", false) // will block on pending state
+
+	time.Sleep(1 * time.Second)
+
+	info, err := d2.info()
+	c.Assert(err, checker.IsNil)
+	c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStatePending)
+
+	c.Assert(d2.Leave(true), checker.IsNil)
+
+	waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1)
+
+	id2, err := d2.Cmd("ps", "-q")
+	c.Assert(err, checker.IsNil)
+	c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2))
+}
+
 func (s *DockerSwarmSuite) TestApiSwarmManagerRestore(c *check.C) {
 	d1 := s.AddDaemon(c, true, true)
 
diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go
index 34f1f7e..2f71d0f 100644
--- a/integration-cli/docker_cli_by_digest_test.go
+++ b/integration-cli/docker_cli_by_digest_test.go
@@ -284,10 +284,8 @@
 	out, _ = dockerCmd(c, "images", "--digests")
 
 	// make sure image 1 has repo, tag, <none> AND repo, <none>, digest
-	reWithTag1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*<none>\s`)
-	reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest1.String() + `\s`)
+	reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`)
 	c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out))
-	c.Assert(reWithTag1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag1.String(), out))
 	// make sure image 2 has repo, <none>, digest
 	c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out))
 
@@ -298,21 +296,19 @@
 	out, _ = dockerCmd(c, "images", "--digests")
 
 	// make sure image 1 has repo, tag, digest
-	c.Assert(reWithTag1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag1.String(), out))
+	c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out))
 
 	// make sure image 2 has repo, tag, digest
-	reWithTag2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*<none>\s`)
-	reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*<none>\s*` + digest2.String() + `\s`)
-	c.Assert(reWithTag2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag2.String(), out))
+	reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`)
 	c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out))
 
 	// list images
 	out, _ = dockerCmd(c, "images", "--digests")
 
 	// make sure image 1 has repo, tag, digest
-	c.Assert(reWithTag1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag1.String(), out))
+	c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out))
 	// make sure image 2 has repo, tag, digest
-	c.Assert(reWithTag2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithTag2.String(), out))
+	c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out))
 	// make sure busybox has tag, but not digest
 	busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*<none>\s`)
 	c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out))
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index ac3fe75..b866f8b 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -2401,7 +2401,7 @@
 }
 `
 	ioutil.WriteFile(configName, []byte(config), 0644)
-	err = s.d.Start("--config-file", configName)
+	err = s.d.StartWithBusybox("--config-file", configName)
 	c.Assert(err, check.IsNil)
 
 	// Run with default runtime
@@ -2497,7 +2497,7 @@
 }
 
 func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) {
-	err := s.d.Start("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
+	err := s.d.StartWithBusybox("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
 	c.Assert(err, check.IsNil)
 
 	// Run with default runtime
@@ -2519,7 +2519,7 @@
 
 	// Start a daemon without any extra runtimes
 	s.d.Stop()
-	err = s.d.Start()
+	err = s.d.StartWithBusybox()
 	c.Assert(err, check.IsNil)
 
 	// Run with default runtime
@@ -2546,7 +2546,7 @@
 
 	// Check that we can select a default runtime
 	s.d.Stop()
-	err = s.d.Start("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
+	err = s.d.StartWithBusybox("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager")
 	c.Assert(err, check.IsNil)
 
 	out, err = s.d.Cmd("run", "--rm", "busybox", "ls")
diff --git a/integration-cli/docker_cli_health_test.go b/integration-cli/docker_cli_health_test.go
index b374dba..d231965 100644
--- a/integration-cli/docker_cli_health_test.go
+++ b/integration-cli/docker_cli_health_test.go
@@ -127,12 +127,10 @@
 	c.Check(last.ExitCode, checker.Equals, 0)
 	c.Check(last.Output, checker.Equals, "OK\n")
 
-	// Fail the check, which should now make it exit
+	// Fail the check
 	dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status")
-	waitForStatus(c, "fatal_healthcheck", "running", "exited")
+	waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy")
 
-	out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", "fatal_healthcheck")
-	c.Check(out, checker.Equals, "unhealthy\n")
 	failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck")
 	fails, err := strconv.Atoi(strings.TrimSpace(failsStr))
 	c.Check(err, check.IsNil)
diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go
index 816cbfd..ba35811 100644
--- a/integration-cli/docker_cli_info_test.go
+++ b/integration-cli/docker_cli_info_test.go
@@ -32,6 +32,7 @@
 		"Storage Driver:",
 		"Volume:",
 		"Network:",
+		"Security Options:",
 	}
 
 	if DaemonIsLinux.Condition() {
diff --git a/integration-cli/docker_cli_info_unix_test.go b/integration-cli/docker_cli_info_unix_test.go
new file mode 100644
index 0000000..900534d
--- /dev/null
+++ b/integration-cli/docker_cli_info_unix_test.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package main
+
+import (
+	"github.com/docker/docker/pkg/integration/checker"
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) {
+	testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux)
+
+	out, _ := dockerCmd(c, "info")
+	c.Assert(out, checker.Contains, "Security Options: apparmor seccomp")
+}
diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go
index d5c5713..25f08f3 100644
--- a/integration-cli/docker_cli_network_unix_test.go
+++ b/integration-cli/docker_cli_network_unix_test.go
@@ -1634,7 +1634,7 @@
 		t.Fatal(err)
 	}
 
-	// start a new container try to publist port 80:80 will failed
+	// start a new container, trying to publish port 80:80 should fail
 	out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top")
 	if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") {
 		t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container")
@@ -1645,7 +1645,17 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	_, err = s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top")
+	id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Cleanup because these containers will not be shut down by daemon
+	out, err = s.d.Cmd("stop", newCon)
+	if err != nil {
+		t.Fatalf("err: %v %v", err, string(out))
+	}
+	_, err = s.d.Cmd("stop", strings.TrimSpace(id))
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/integration-cli/docker_cli_plugins_test.go b/integration-cli/docker_cli_plugins_test.go
new file mode 100644
index 0000000..4846744
--- /dev/null
+++ b/integration-cli/docker_cli_plugins_test.go
@@ -0,0 +1,54 @@
+package main
+
+import (
+	"github.com/docker/docker/pkg/integration/checker"
+	"github.com/go-check/check"
+)
+
+func (s *DockerSuite) TestPluginBasicOps(c *check.C) {
+	testRequires(c, DaemonIsLinux, ExperimentalDaemon)
+	name := "tiborvass/no-remove"
+	tag := "latest"
+	nameWithTag := name + ":" + tag
+
+	_, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", name)
+	c.Assert(err, checker.IsNil)
+
+	out, _, err := dockerCmdWithError("plugin", "ls")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, name)
+	c.Assert(out, checker.Contains, tag)
+	c.Assert(out, checker.Contains, "true")
+
+	out, _, err = dockerCmdWithError("plugin", "inspect", nameWithTag)
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "A test plugin for Docker")
+
+	out, _, err = dockerCmdWithError("plugin", "remove", nameWithTag)
+	c.Assert(out, checker.Contains, "is active")
+
+	_, _, err = dockerCmdWithError("plugin", "disable", nameWithTag)
+	c.Assert(err, checker.IsNil)
+
+	out, _, err = dockerCmdWithError("plugin", "remove", nameWithTag)
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, nameWithTag)
+}
+
+func (s *DockerSuite) TestPluginInstallDisable(c *check.C) {
+	testRequires(c, DaemonIsLinux, ExperimentalDaemon)
+	name := "tiborvass/no-remove"
+	tag := "latest"
+	nameWithTag := name + ":" + tag
+
+	_, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", name)
+	c.Assert(err, checker.IsNil)
+
+	out, _, err := dockerCmdWithError("plugin", "ls")
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, "false")
+
+	out, _, err = dockerCmdWithError("plugin", "remove", nameWithTag)
+	c.Assert(err, checker.IsNil)
+	c.Assert(out, checker.Contains, nameWithTag)
+}
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index 0703214..164a515 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -829,6 +829,23 @@
 	}
 }
 
+func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) {
+	name := "img-with-volumes"
+	_, err := buildImage(
+		name,
+		`
+    FROM busybox
+    VOLUME /run
+    RUN touch /run/stuff
+    `,
+		true)
+	if err != nil {
+		c.Fatal(err)
+	}
+	out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run")
+	c.Assert(out, checker.Not(checker.Contains), "stuff")
+}
+
 // Test case for #22420
 func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) {
 	testRequires(c, DaemonIsLinux)
diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go
new file mode 100644
index 0000000..95deab5
--- /dev/null
+++ b/integration-cli/docker_cli_swarm_test.go
@@ -0,0 +1,76 @@
+// +build !windows
+
+package main
+
+import (
+	"encoding/json"
+	"time"
+
+	"github.com/docker/docker/pkg/integration/checker"
+	"github.com/docker/engine-api/types/swarm"
+	"github.com/go-check/check"
+)
+
+func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) {
+	d := s.AddDaemon(c, true, true)
+
+	getSpec := func() swarm.Spec {
+		out, err := d.Cmd("swarm", "inspect")
+		c.Assert(err, checker.IsNil)
+		var sw []swarm.Swarm
+		c.Assert(json.Unmarshal([]byte(out), &sw), checker.IsNil)
+		c.Assert(len(sw), checker.Equals, 1)
+		return sw[0].Spec
+	}
+
+	out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s", "--auto-accept", "manager", "--auto-accept", "worker", "--secret", "foo")
+	c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
+
+	spec := getSpec()
+	c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
+	c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
+
+	c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
+
+	for _, p := range spec.AcceptancePolicy.Policies {
+		c.Assert(p.Autoaccept, checker.Equals, true)
+		c.Assert(p.Secret, checker.NotNil)
+		c.Assert(*p.Secret, checker.Not(checker.Equals), "")
+	}
+
+	out, err = d.Cmd("swarm", "update", "--auto-accept", "none")
+	c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
+
+	spec = getSpec()
+	c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
+	c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second))
+
+	c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
+
+	for _, p := range spec.AcceptancePolicy.Policies {
+		c.Assert(p.Autoaccept, checker.Equals, false)
+		// secret is still set
+		c.Assert(p.Secret, checker.NotNil)
+		c.Assert(*p.Secret, checker.Not(checker.Equals), "")
+	}
+
+	out, err = d.Cmd("swarm", "update", "--auto-accept", "manager", "--secret", "")
+	c.Assert(err, checker.IsNil, check.Commentf("out: %v", out))
+
+	spec = getSpec()
+
+	c.Assert(spec.AcceptancePolicy.Policies, checker.HasLen, 2)
+
+	for _, p := range spec.AcceptancePolicy.Policies {
+		c.Assert(p.Autoaccept, checker.Equals, p.Role == swarm.NodeRoleManager)
+		// secret has been removed
+		c.Assert(p.Secret, checker.IsNil)
+	}
+
+	// setting anything under 30m for cert-expiry is not allowed
+	out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m")
+	c.Assert(err, checker.NotNil)
+	c.Assert(out, checker.Contains, "minimum certificate expiry time")
+	spec = getSpec()
+	c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour)
+}
diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go
index 70275e9..56a1596 100644
--- a/integration-cli/requirements.go
+++ b/integration-cli/requirements.go
@@ -30,6 +30,10 @@
 		func() bool { return daemonPlatform == "linux" },
 		"Test requires a Linux daemon",
 	}
+	ExperimentalDaemon = testRequirement{
+		func() bool { return utils.ExperimentalBuild() },
+		"Test requires an experimental daemon",
+	}
 	NotExperimentalDaemon = testRequirement{
 		func() bool { return !utils.ExperimentalBuild() },
 		"Test requires a non experimental daemon",
diff --git a/man/docker-info.1.md b/man/docker-info.1.md
index d777f25..1d96b56 100644
--- a/man/docker-info.1.md
+++ b/man/docker-info.1.md
@@ -41,14 +41,22 @@
      Paused: 1
      Stopped: 10
     Images: 52
-    Server Version: 1.11.1
+    Server Version: 1.12.0-dev
     Storage Driver: overlay
      Backing Filesystem: extfs
     Logging Driver: json-file
     Cgroup Driver: cgroupfs
     Plugins:
      Volume: local
-     Network: bridge null host
+     Network: bridge null host overlay
+    Swarm: 
+     NodeID: 0gac67oclbxq7
+     IsManager: YES
+     Managers: 2
+     Nodes: 2
+    Runtimes: default
+    Default Runtime: default
+    Security Options: apparmor seccomp
     Kernel Version: 4.4.0-21-generic
     Operating System: Ubuntu 16.04 LTS
     OSType: linux
diff --git a/pkg/testutil/assert/assert.go b/pkg/testutil/assert/assert.go
new file mode 100644
index 0000000..fffdfd0
--- /dev/null
+++ b/pkg/testutil/assert/assert.go
@@ -0,0 +1,47 @@
+// Package assert contains functions for making assertions in unit tests
+package assert
+
+import (
+	"strings"
+)
+
+// TestingT is an interface which defines the methods of testing.T that are
+// required by this package
+type TestingT interface {
+	Fatalf(string, ...interface{})
+}
+
+// Equal compare the actual value to the expected value and fails the test if
+// they are not equal.
+func Equal(t TestingT, actual, expected interface{}) {
+	if expected != actual {
+		t.Fatalf("Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual)
+	}
+}
+
+// NilError asserts that the error is nil, otherwise it fails the test.
+func NilError(t TestingT, err error) {
+	if err != nil {
+		t.Fatalf("Expected no error, got: %s", err.Error())
+	}
+}
+
+// Error asserts that error is not nil, and contains the expected text,
+// otherwise it fails the test.
+func Error(t TestingT, err error, contains string) {
+	if err == nil {
+		t.Fatalf("Expected an error, but error was nil")
+	}
+
+	if !strings.Contains(err.Error(), contains) {
+		t.Fatalf("Expected error to contain '%s', got '%s'", contains, err.Error())
+	}
+}
+
+// Contains asserts that the string contains a substring, otherwise it fails the
+// test.
+func Contains(t TestingT, actual, contains string) {
+	if !strings.Contains(actual, contains) {
+		t.Fatalf("Expected '%s' to contain '%s'", actual, contains)
+	}
+}
diff --git a/pkg/testutil/pkg.go b/pkg/testutil/pkg.go
new file mode 100644
index 0000000..110b2e6
--- /dev/null
+++ b/pkg/testutil/pkg.go
@@ -0,0 +1 @@
+package testutil
diff --git a/plugin/backend.go b/plugin/backend.go
index 541f06c..1b6daa4 100644
--- a/plugin/backend.go
+++ b/plugin/backend.go
@@ -40,7 +40,7 @@
 	if err != nil {
 		return tp, err
 	}
-	return p.p, nil
+	return p.P, nil
 }
 
 // Pull pulls a plugin and enables it.
@@ -76,10 +76,6 @@
 	}
 
 	p := pm.newPlugin(ref, pluginID)
-	if ref, ok := ref.(reference.NamedTagged); ok {
-		p.p.Tag = ref.Tag()
-	}
-
 	if err := pm.initPlugin(p); err != nil {
 		return nil, err
 	}
@@ -90,14 +86,14 @@
 	pm.save()
 	pm.Unlock()
 
-	return computePrivileges(&p.p.Manifest), nil
+	return computePrivileges(&p.P.Manifest), nil
 }
 
 // List displays the list of plugins and associated metadata.
 func (pm *Manager) List() ([]types.Plugin, error) {
 	out := make([]types.Plugin, 0, len(pm.plugins))
 	for _, p := range pm.plugins {
-		out = append(out, p.p)
+		out = append(out, p.P)
 	}
 	return out, nil
 }
@@ -105,7 +101,7 @@
 // Push pushes a plugin to the store.
 func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error {
 	p, err := pm.get(name)
-	dest := filepath.Join(pm.libRoot, p.p.ID)
+	dest := filepath.Join(pm.libRoot, p.P.ID)
 	config, err := os.Open(filepath.Join(dest, "manifest.json"))
 	if err != nil {
 		return err
diff --git a/plugin/manager.go b/plugin/manager.go
index 8d29845..33887ce 100644
--- a/plugin/manager.go
+++ b/plugin/manager.go
@@ -22,10 +22,7 @@
 	"github.com/docker/engine-api/types"
 )
 
-const (
-	defaultPluginRuntimeDestination = "/run/docker/plugins"
-	defaultPluginStateDestination   = "/state"
-)
+const defaultPluginRuntimeDestination = "/run/docker/plugins"
 
 var manager *Manager
 
@@ -46,10 +43,9 @@
 
 type plugin struct {
 	//sync.RWMutex TODO
-	p                 types.Plugin
+	P                 types.Plugin `json:"plugin"`
 	client            *plugins.Client
 	restartManager    restartmanager.RestartManager
-	stateSourcePath   string
 	runtimeSourcePath string
 }
 
@@ -58,25 +54,36 @@
 }
 
 func (p *plugin) Name() string {
-	return p.p.Name
+	name := p.P.Name
+	if len(p.P.Tag) > 0 {
+		// TODO: this feels hacky, maybe we should be storing the distribution reference rather than splitting these
+		name += ":" + p.P.Tag
+	}
+	return name
 }
 
 func (pm *Manager) newPlugin(ref reference.Named, id string) *plugin {
 	p := &plugin{
-		p: types.Plugin{
+		P: types.Plugin{
 			Name: ref.Name(),
 			ID:   id,
 		},
-		stateSourcePath:   filepath.Join(pm.libRoot, id, "state"),
 		runtimeSourcePath: filepath.Join(pm.runRoot, id),
 	}
 	if ref, ok := ref.(reference.NamedTagged); ok {
-		p.p.Tag = ref.Tag()
+		p.P.Tag = ref.Tag()
 	}
 	return p
 }
 
-// TODO: figure out why save() doesn't json encode *plugin object
+func (pm *Manager) restorePlugin(p *plugin) error {
+	p.runtimeSourcePath = filepath.Join(pm.runRoot, p.P.ID)
+	if p.P.Active {
+		return pm.restore(p)
+	}
+	return nil
+}
+
 type pluginMap map[string]*plugin
 
 // Manager controls the plugin subsystem.
@@ -90,6 +97,7 @@
 	containerdClient libcontainerd.Client
 	registryService  registry.Service
 	handleLegacy     bool
+	liveRestore      bool
 }
 
 // GetManager returns the singleton plugin Manager
@@ -99,7 +107,7 @@
 
 // Init (was NewManager) instantiates the singleton Manager.
 // TODO: revert this to NewManager once we get rid of all the singletons.
-func Init(root, execRoot string, remote libcontainerd.Remote, rs registry.Service) (err error) {
+func Init(root, execRoot string, remote libcontainerd.Remote, rs registry.Service, liveRestore bool) (err error) {
 	if manager != nil {
 		return nil
 	}
@@ -120,17 +128,18 @@
 		handlers:        make(map[string]func(string, *plugins.Client)),
 		registryService: rs,
 		handleLegacy:    true,
+		liveRestore:     liveRestore,
 	}
 	if err := os.MkdirAll(manager.runRoot, 0700); err != nil {
 		return err
 	}
-	if err := manager.init(); err != nil {
-		return err
-	}
 	manager.containerdClient, err = remote.Client(manager)
 	if err != nil {
 		return err
 	}
+	if err := manager.init(); err != nil {
+		return err
+	}
 	return nil
 }
 
@@ -165,7 +174,7 @@
 		defer manager.RUnlock()
 	pluginLoop:
 		for _, p := range manager.plugins {
-			for _, typ := range p.p.Manifest.Interface.Types {
+			for _, typ := range p.P.Manifest.Interface.Types {
 				if typ.Capability != capability || typ.Prefix != "docker" {
 					continue pluginLoop
 				}
@@ -195,7 +204,18 @@
 	)
 	handleLegacy := true
 	if manager != nil {
-		p, err = manager.get(name)
+		fullName := name
+		if named, err := reference.ParseNamed(fullName); err == nil { // FIXME: validate
+			if reference.IsNameOnly(named) {
+				named = reference.WithDefaultTag(named)
+			}
+			ref, ok := named.(reference.NamedTagged)
+			if !ok {
+				return nil, fmt.Errorf("invalid name: %s", named.String())
+			}
+			fullName = ref.String()
+		}
+		p, err = manager.get(fullName)
 		if err != nil {
 			if _, ok := err.(ErrNotFound); !ok {
 				return nil, err
@@ -216,7 +236,7 @@
 	}
 
 	capability = strings.ToLower(capability)
-	for _, typ := range p.p.Manifest.Interface.Types {
+	for _, typ := range p.P.Manifest.Interface.Types {
 		if typ.Capability == capability && typ.Prefix == "docker" {
 			return p, nil
 		}
@@ -257,55 +277,78 @@
 		}
 		return err
 	}
-	// TODO: Populate pm.plugins
-	if err := json.NewDecoder(dt).Decode(&pm.nameToID); err != nil {
+
+	if err := json.NewDecoder(dt).Decode(&pm.plugins); err != nil {
 		return err
 	}
-	// FIXME: validate, restore
 
-	return nil
+	var group sync.WaitGroup
+	group.Add(len(pm.plugins))
+	for _, p := range pm.plugins {
+		go func(p *plugin) {
+			defer group.Done()
+			if err := pm.restorePlugin(p); err != nil {
+				logrus.Errorf("Error restoring plugin '%s': %s", p.Name(), err)
+				return
+			}
+
+			pm.Lock()
+			pm.nameToID[p.Name()] = p.P.ID
+			requiresManualRestore := !pm.liveRestore && p.P.Active
+			pm.Unlock()
+
+			if requiresManualRestore {
+				// if liveRestore is not enabled, the plugin will be stopped now so we should enable it
+				if err := pm.enable(p); err != nil {
+					logrus.Errorf("Error restoring plugin '%s': %s", p.Name(), err)
+				}
+			}
+		}(p)
+		group.Wait()
+	}
+	return pm.save()
 }
 
 func (pm *Manager) initPlugin(p *plugin) error {
-	dt, err := os.Open(filepath.Join(pm.libRoot, p.p.ID, "manifest.json"))
+	dt, err := os.Open(filepath.Join(pm.libRoot, p.P.ID, "manifest.json"))
 	if err != nil {
 		return err
 	}
-	err = json.NewDecoder(dt).Decode(&p.p.Manifest)
+	err = json.NewDecoder(dt).Decode(&p.P.Manifest)
 	dt.Close()
 	if err != nil {
 		return err
 	}
 
-	p.p.Config.Mounts = make([]types.PluginMount, len(p.p.Manifest.Mounts))
-	for i, mount := range p.p.Manifest.Mounts {
-		p.p.Config.Mounts[i] = mount
+	p.P.Config.Mounts = make([]types.PluginMount, len(p.P.Manifest.Mounts))
+	for i, mount := range p.P.Manifest.Mounts {
+		p.P.Config.Mounts[i] = mount
 	}
-	p.p.Config.Env = make([]string, 0, len(p.p.Manifest.Env))
-	for _, env := range p.p.Manifest.Env {
+	p.P.Config.Env = make([]string, 0, len(p.P.Manifest.Env))
+	for _, env := range p.P.Manifest.Env {
 		if env.Value != nil {
-			p.p.Config.Env = append(p.p.Config.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value))
+			p.P.Config.Env = append(p.P.Config.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value))
 		}
 	}
-	copy(p.p.Config.Args, p.p.Manifest.Args.Value)
+	copy(p.P.Config.Args, p.P.Manifest.Args.Value)
 
-	f, err := os.Create(filepath.Join(pm.libRoot, p.p.ID, "plugin-config.json"))
+	f, err := os.Create(filepath.Join(pm.libRoot, p.P.ID, "plugin-config.json"))
 	if err != nil {
 		return err
 	}
-	err = json.NewEncoder(f).Encode(&p.p.Config)
+	err = json.NewEncoder(f).Encode(&p.P.Config)
 	f.Close()
 	return err
 }
 
 func (pm *Manager) remove(p *plugin) error {
-	if p.p.Active {
-		return fmt.Errorf("plugin %s is active", p.p.Name)
+	if p.P.Active {
+		return fmt.Errorf("plugin %s is active", p.Name())
 	}
 	pm.Lock() // fixme: lock single record
 	defer pm.Unlock()
-	os.RemoveAll(p.stateSourcePath)
-	delete(pm.plugins, p.p.Name)
+	delete(pm.plugins, p.P.ID)
+	delete(pm.nameToID, p.Name())
 	pm.save()
 	return nil
 }
@@ -326,7 +369,7 @@
 func (pm *Manager) save() error {
 	filePath := filepath.Join(pm.libRoot, "plugins.json")
 
-	jsonData, err := json.Marshal(pm.nameToID)
+	jsonData, err := json.Marshal(pm.plugins)
 	if err != nil {
 		logrus.Debugf("Error in json.Marshal: %v", err)
 		return err
diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go
index b5051da..277fa3c 100644
--- a/plugin/manager_linux.go
+++ b/plugin/manager_linux.go
@@ -25,11 +25,11 @@
 	}
 
 	p.restartManager = restartmanager.New(container.RestartPolicy{Name: "always"}, 0)
-	if err := pm.containerdClient.Create(p.p.ID, libcontainerd.Spec(*spec), libcontainerd.WithRestartManager(p.restartManager)); err != nil { // POC-only
+	if err := pm.containerdClient.Create(p.P.ID, libcontainerd.Spec(*spec), libcontainerd.WithRestartManager(p.restartManager)); err != nil { // POC-only
 		return err
 	}
 
-	socket := p.p.Manifest.Interface.Socket
+	socket := p.P.Manifest.Interface.Socket
 	p.client, err = plugins.NewClient("unix://"+filepath.Join(p.runtimeSourcePath, socket), nil)
 	if err != nil {
 		return err
@@ -38,11 +38,11 @@
 	//TODO: check net.Dial
 
 	pm.Lock() // fixme: lock single record
-	p.p.Active = true
+	p.P.Active = true
 	pm.save()
 	pm.Unlock()
 
-	for _, typ := range p.p.Manifest.Interface.Types {
+	for _, typ := range p.P.Manifest.Interface.Types {
 		if handler := pm.handlers[typ.String()]; handler != nil {
 			handler(p.Name(), p.Client())
 		}
@@ -51,25 +51,25 @@
 	return nil
 }
 
+func (pm *Manager) restore(p *plugin) error {
+	p.restartManager = restartmanager.New(container.RestartPolicy{Name: "always"}, 0)
+	return pm.containerdClient.Restore(p.P.ID, libcontainerd.WithRestartManager(p.restartManager))
+}
+
 func (pm *Manager) initSpec(p *plugin) (*specs.Spec, error) {
 	s := oci.DefaultSpec()
 
-	rootfs := filepath.Join(pm.libRoot, p.p.ID, "rootfs")
+	rootfs := filepath.Join(pm.libRoot, p.P.ID, "rootfs")
 	s.Root = specs.Root{
 		Path:     rootfs,
 		Readonly: false, // TODO: all plugins should be readonly? settable in manifest?
 	}
 
-	mounts := append(p.p.Config.Mounts, types.PluginMount{
+	mounts := append(p.P.Config.Mounts, types.PluginMount{
 		Source:      &p.runtimeSourcePath,
 		Destination: defaultPluginRuntimeDestination,
 		Type:        "bind",
 		Options:     []string{"rbind", "rshared"},
-	}, types.PluginMount{
-		Source:      &p.stateSourcePath,
-		Destination: defaultPluginStateDestination,
-		Type:        "bind",
-		Options:     []string{"rbind", "rshared"},
 	})
 	for _, mount := range mounts {
 		m := specs.Mount{
@@ -95,15 +95,19 @@
 		s.Mounts = append(s.Mounts, m)
 	}
 
-	envs := make([]string, 1, len(p.p.Config.Env)+1)
+	envs := make([]string, 1, len(p.P.Config.Env)+1)
 	envs[0] = "PATH=" + system.DefaultPathEnv
-	envs = append(envs, p.p.Config.Env...)
+	envs = append(envs, p.P.Config.Env...)
 
-	args := append(p.p.Manifest.Entrypoint, p.p.Config.Args...)
+	args := append(p.P.Manifest.Entrypoint, p.P.Config.Args...)
+	cwd := p.P.Manifest.Workdir
+	if len(cwd) == 0 {
+		cwd = "/"
+	}
 	s.Process = specs.Process{
 		Terminal: false,
 		Args:     args,
-		Cwd:      "/", // TODO: add in manifest?
+		Cwd:      cwd,
 		Env:      envs,
 	}
 
@@ -114,13 +118,13 @@
 	if err := p.restartManager.Cancel(); err != nil {
 		logrus.Error(err)
 	}
-	if err := pm.containerdClient.Signal(p.p.ID, int(syscall.SIGKILL)); err != nil {
+	if err := pm.containerdClient.Signal(p.P.ID, int(syscall.SIGKILL)); err != nil {
 		logrus.Error(err)
 	}
 	os.RemoveAll(p.runtimeSourcePath)
 	pm.Lock() // fixme: lock single record
 	defer pm.Unlock()
-	p.p.Active = false
+	p.P.Active = false
 	pm.save()
 	return nil
 }
diff --git a/plugin/manager_windows.go b/plugin/manager_windows.go
index 055a732..84d7a4c 100644
--- a/plugin/manager_windows.go
+++ b/plugin/manager_windows.go
@@ -19,3 +19,7 @@
 func (pm *Manager) disable(p *plugin) error {
 	return fmt.Errorf("Not implemented")
 }
+
+func (pm *Manager) restore(p *plugin) error {
+	return fmt.Errorf("Not implemented")
+}
diff --git a/profiles/seccomp/default.json b/profiles/seccomp/default.json
index 51ec3d4..40af6ad 100755
--- a/profiles/seccomp/default.json
+++ b/profiles/seccomp/default.json
@@ -687,6 +687,21 @@
 			"args": []
 		},
 		{
+			"name": "mlock",
+			"action": "SCMP_ACT_ALLOW",
+			"args": []
+		},
+		{
+			"name": "mlock2",
+			"action": "SCMP_ACT_ALLOW",
+			"args": []
+		},
+		{
+			"name": "mlockall",
+			"action": "SCMP_ACT_ALLOW",
+			"args": []
+		},
+		{
 			"name": "mmap",
 			"action": "SCMP_ACT_ALLOW",
 			"args": []
diff --git a/profiles/seccomp/seccomp_default.go b/profiles/seccomp/seccomp_default.go
index 87ae358..9e8d47f 100644
--- a/profiles/seccomp/seccomp_default.go
+++ b/profiles/seccomp/seccomp_default.go
@@ -722,6 +722,21 @@
 			Args:   []*types.Arg{},
 		},
 		{
+			Name:   "mlock",
+			Action: types.ActAllow,
+			Args:   []*types.Arg{},
+		},
+		{
+			Name:   "mlock2",
+			Action: types.ActAllow,
+			Args:   []*types.Arg{},
+		},
+		{
+			Name:   "mlockall",
+			Action: types.ActAllow,
+			Args:   []*types.Arg{},
+		},
+		{
 			Name:   "mmap",
 			Action: types.ActAllow,
 			Args:   []*types.Arg{},
@@ -1663,24 +1678,6 @@
 					Args:   []*types.Arg{},
 				},
 			}...)
-		case "CAP_IPC_LOCK":
-			syscalls = append(syscalls, []*types.Syscall{
-				{
-					Name:   "mlock",
-					Action: types.ActAllow,
-					Args:   []*types.Arg{},
-				},
-				{
-					Name:   "mlock2",
-					Action: types.ActAllow,
-					Args:   []*types.Arg{},
-				},
-				{
-					Name:   "mlockall",
-					Action: types.ActAllow,
-					Args:   []*types.Arg{},
-				},
-			}...)
 		case "CAP_SYS_ADMIN":
 			capSysAdmin = true
 			syscalls = append(syscalls, []*types.Syscall{
diff --git a/vendor/src/github.com/docker/engine-api/client/container_restart.go b/vendor/src/github.com/docker/engine-api/client/container_restart.go
index 9ae2e4b..93c042d 100644
--- a/vendor/src/github.com/docker/engine-api/client/container_restart.go
+++ b/vendor/src/github.com/docker/engine-api/client/container_restart.go
@@ -11,9 +11,11 @@
 // ContainerRestart stops and starts a container again.
 // It makes the daemon to wait for the container to be up again for
 // a specific amount of time, given the timeout.
-func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout time.Duration) error {
+func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
 	query := url.Values{}
-	query.Set("t", timetypes.DurationToSecondsString(timeout))
+	if timeout != nil {
+		query.Set("t", timetypes.DurationToSecondsString(*timeout))
+	}
 	resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
 	ensureReaderClosed(resp)
 	return err
diff --git a/vendor/src/github.com/docker/engine-api/client/container_stop.go b/vendor/src/github.com/docker/engine-api/client/container_stop.go
index 890650d..1fc577f 100644
--- a/vendor/src/github.com/docker/engine-api/client/container_stop.go
+++ b/vendor/src/github.com/docker/engine-api/client/container_stop.go
@@ -10,9 +10,11 @@
 
 // ContainerStop stops a container without terminating the process.
 // The process is blocked until the container stops or the timeout expires.
-func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout time.Duration) error {
+func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
 	query := url.Values{}
-	query.Set("t", timetypes.DurationToSecondsString(timeout))
+	if timeout != nil {
+		query.Set("t", timetypes.DurationToSecondsString(*timeout))
+	}
 	resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
 	ensureReaderClosed(resp)
 	return err
diff --git a/vendor/src/github.com/docker/engine-api/client/interface.go b/vendor/src/github.com/docker/engine-api/client/interface.go
index 95d7755..929a6bc 100644
--- a/vendor/src/github.com/docker/engine-api/client/interface.go
+++ b/vendor/src/github.com/docker/engine-api/client/interface.go
@@ -15,26 +15,21 @@
 
 // CommonAPIClient is the common methods between stable and experimental versions of APIClient.
 type CommonAPIClient interface {
+	ContainerAPIClient
+	ImageAPIClient
+	NodeAPIClient
+	NetworkAPIClient
+	ServiceAPIClient
+	SwarmAPIClient
+	SystemAPIClient
+	VolumeAPIClient
 	ClientVersion() string
-	CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
-	CheckpointDelete(ctx context.Context, container string, checkpointID string) error
-	CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error)
-	SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
-	SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
-	SwarmLeave(ctx context.Context, force bool) error
-	SwarmInspect(ctx context.Context) (swarm.Swarm, error)
-	SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error
-	NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error)
-	NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
-	NodeRemove(ctx context.Context, nodeID string) error
-	NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
-	ServiceCreate(ctx context.Context, service swarm.ServiceSpec) (types.ServiceCreateResponse, error)
-	ServiceInspect(ctx context.Context, serviceID string) (swarm.Service, error)
-	ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
-	ServiceRemove(ctx context.Context, serviceID string) error
-	ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec) error
-	TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
-	TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+	ServerVersion(ctx context.Context) (types.Version, error)
+	UpdateClientVersion(v string)
+}
+
+// ContainerAPIClient defines API client methods for the containers
+type ContainerAPIClient interface {
 	ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
 	ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error)
 	ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error)
@@ -54,18 +49,21 @@
 	ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
 	ContainerRename(ctx context.Context, container, newContainerName string) error
 	ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
-	ContainerRestart(ctx context.Context, container string, timeout time.Duration) error
+	ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
 	ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
 	ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error)
 	ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
-	ContainerStop(ctx context.Context, container string, timeout time.Duration) error
+	ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
 	ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error)
 	ContainerUnpause(ctx context.Context, container string) error
 	ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error
 	ContainerWait(ctx context.Context, container string) (int, error)
 	CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
 	CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
-	Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error)
+}
+
+// ImageAPIClient defines API client methods for the images
+type ImageAPIClient interface {
 	ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
 	ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
 	ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error)
@@ -79,7 +77,10 @@
 	ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
 	ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
 	ImageTag(ctx context.Context, image, ref string) error
-	Info(ctx context.Context) (types.Info, error)
+}
+
+// NetworkAPIClient defines API client methods for the networks
+type NetworkAPIClient interface {
 	NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error
 	NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
 	NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error
@@ -87,9 +88,45 @@
 	NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error)
 	NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
 	NetworkRemove(ctx context.Context, networkID string) error
+}
+
+// NodeAPIClient defines API client methods for the nodes
+type NodeAPIClient interface {
+	NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error)
+	NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
+	NodeRemove(ctx context.Context, nodeID string) error
+	NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
+}
+
+// ServiceAPIClient defines API client methods for the services
+type ServiceAPIClient interface {
+	ServiceCreate(ctx context.Context, service swarm.ServiceSpec) (types.ServiceCreateResponse, error)
+	ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error)
+	ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
+	ServiceRemove(ctx context.Context, serviceID string) error
+	ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec) error
+	TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
+	TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
+}
+
+// SwarmAPIClient defines API client methods for the swarm
+type SwarmAPIClient interface {
+	SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
+	SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
+	SwarmLeave(ctx context.Context, force bool) error
+	SwarmInspect(ctx context.Context) (swarm.Swarm, error)
+	SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec) error
+}
+
+// SystemAPIClient defines API client methods for the system
+type SystemAPIClient interface {
+	Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error)
+	Info(ctx context.Context) (types.Info, error)
 	RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error)
-	ServerVersion(ctx context.Context) (types.Version, error)
-	UpdateClientVersion(v string)
+}
+
+// VolumeAPIClient defines API client methods for the volumes
+type VolumeAPIClient interface {
 	VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error)
 	VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
 	VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
diff --git a/vendor/src/github.com/docker/engine-api/client/interface_experimental.go b/vendor/src/github.com/docker/engine-api/client/interface_experimental.go
index c968e53..eb0cd7b 100644
--- a/vendor/src/github.com/docker/engine-api/client/interface_experimental.go
+++ b/vendor/src/github.com/docker/engine-api/client/interface_experimental.go
@@ -3,8 +3,6 @@
 package client
 
 import (
-	"io"
-
 	"github.com/docker/engine-api/types"
 	"golang.org/x/net/context"
 )
@@ -12,11 +10,24 @@
 // APIClient is an interface that clients that talk with a docker server must implement.
 type APIClient interface {
 	CommonAPIClient
+	CheckpointAPIClient
+	PluginAPIClient
+}
+
+// CheckpointAPIClient defines API client methods for the checkpoints
+type CheckpointAPIClient interface {
+	CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
+	CheckpointDelete(ctx context.Context, container string, checkpointID string) error
+	CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error)
+}
+
+// PluginAPIClient defines API client methods for the plugins
+type PluginAPIClient interface {
 	PluginList(ctx context.Context) (types.PluginsListResponse, error)
 	PluginRemove(ctx context.Context, name string) error
 	PluginEnable(ctx context.Context, name string) error
 	PluginDisable(ctx context.Context, name string) error
-	PluginInstall(ctx context.Context, name, registryAuth string, acceptAllPermissions, noEnable bool, in io.ReadCloser, out io.Writer) error
+	PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error
 	PluginPush(ctx context.Context, name string, registryAuth string) error
 	PluginSet(ctx context.Context, name string, args []string) error
 	PluginInspect(ctx context.Context, name string) (*types.Plugin, error)
diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_install.go b/vendor/src/github.com/docker/engine-api/client/plugin_install.go
index 9005e81..914376f 100644
--- a/vendor/src/github.com/docker/engine-api/client/plugin_install.go
+++ b/vendor/src/github.com/docker/engine-api/client/plugin_install.go
@@ -3,21 +3,28 @@
 package client
 
 import (
-	"bufio"
 	"encoding/json"
-	"fmt"
-	"io"
+	"net/http"
 	"net/url"
-	"strings"
 
 	"github.com/docker/engine-api/types"
 	"golang.org/x/net/context"
 )
 
 // PluginInstall installs a plugin
-func (cli *Client) PluginInstall(ctx context.Context, name, registryAuth string, acceptAllPermissions, noEnable bool, in io.ReadCloser, out io.Writer) error {
-	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
-	resp, err := cli.post(ctx, "/plugins/pull", url.Values{"name": []string{name}}, nil, headers)
+func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error {
+	// FIXME(vdemeester) name is a ref, we might want to parse/validate it here.
+	query := url.Values{}
+	query.Set("name", name)
+	resp, err := cli.tryPluginPull(ctx, query, options.RegistryAuth)
+	if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
+		newAuthHeader, privilegeErr := options.PrivilegeFunc()
+		if privilegeErr != nil {
+			ensureReaderClosed(resp)
+			return privilegeErr
+		}
+		resp, err = cli.tryPluginPull(ctx, query, newAuthHeader)
+	}
 	if err != nil {
 		ensureReaderClosed(resp)
 		return err
@@ -28,27 +35,24 @@
 	}
 	ensureReaderClosed(resp)
 
-	if !acceptAllPermissions && len(privileges) > 0 {
-
-		fmt.Fprintf(out, "Plugin %q requested the following privileges:\n", name)
-		for _, privilege := range privileges {
-			fmt.Fprintf(out, " - %s: %v\n", privilege.Name, privilege.Value)
-		}
-
-		fmt.Fprint(out, "Do you grant the above permissions? [y/N] ")
-		reader := bufio.NewReader(in)
-		line, _, err := reader.ReadLine()
+	if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
+		accept, err := options.AcceptPermissionsFunc(privileges)
 		if err != nil {
 			return err
 		}
-		if strings.ToLower(string(line)) != "y" {
+		if !accept {
 			resp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
 			ensureReaderClosed(resp)
 			return pluginPermissionDenied{name}
 		}
 	}
-	if noEnable {
+	if options.Disabled {
 		return nil
 	}
 	return cli.PluginEnable(ctx, name)
 }
+
+func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) {
+	headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
+	return cli.post(ctx, "/plugins/pull", query, nil, headers)
+}
diff --git a/vendor/src/github.com/docker/engine-api/client/service_inspect.go b/vendor/src/github.com/docker/engine-api/client/service_inspect.go
index 3dbb8cd..958cd66 100644
--- a/vendor/src/github.com/docker/engine-api/client/service_inspect.go
+++ b/vendor/src/github.com/docker/engine-api/client/service_inspect.go
@@ -1,25 +1,33 @@
 package client
 
 import (
+	"bytes"
 	"encoding/json"
+	"io/ioutil"
 	"net/http"
 
 	"github.com/docker/engine-api/types/swarm"
 	"golang.org/x/net/context"
 )
 
-// ServiceInspect returns the service information.
-func (cli *Client) ServiceInspect(ctx context.Context, serviceID string) (swarm.Service, error) {
+// ServiceInspectWithRaw returns the service information and the raw data.
+func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) {
 	serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil)
 	if err != nil {
 		if serverResp.statusCode == http.StatusNotFound {
-			return swarm.Service{}, serviceNotFoundError{serviceID}
+			return swarm.Service{}, nil, serviceNotFoundError{serviceID}
 		}
-		return swarm.Service{}, err
+		return swarm.Service{}, nil, err
+	}
+	defer ensureReaderClosed(serverResp)
+
+	body, err := ioutil.ReadAll(serverResp.body)
+	if err != nil {
+		return swarm.Service{}, nil, err
 	}
 
 	var response swarm.Service
-	err = json.NewDecoder(serverResp.body).Decode(&response)
-	ensureReaderClosed(serverResp)
-	return response, err
+	rdr := bytes.NewReader(body)
+	err = json.NewDecoder(rdr).Decode(&response)
+	return response, body, err
 }
diff --git a/vendor/src/github.com/docker/engine-api/types/container/host_config.go b/vendor/src/github.com/docker/engine-api/types/container/host_config.go
index 43c3343..8e653fc 100644
--- a/vendor/src/github.com/docker/engine-api/types/container/host_config.go
+++ b/vendor/src/github.com/docker/engine-api/types/container/host_config.go
@@ -303,7 +303,7 @@
 	PublishAllPorts bool              // Should docker publish all exposed port for the container
 	ReadonlyRootfs  bool              // Is the container root filesystem in read-only
 	SecurityOpt     []string          // List of string values to customize labels for MLS systems, such as SELinux.
-	StorageOpt      map[string]string // Storage driver options per container.
+	StorageOpt      map[string]string `json:",omitempty"` // Storage driver options per container.
 	Tmpfs           map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
 	UTSMode         UTSMode           // UTS namespace to use for the container
 	UsernsMode      UsernsMode        // The user namespace to use for the container
diff --git a/vendor/src/github.com/docker/engine-api/types/plugin.go b/vendor/src/github.com/docker/engine-api/types/plugin.go
index f9df9ef..05030ff 100644
--- a/vendor/src/github.com/docker/engine-api/types/plugin.go
+++ b/vendor/src/github.com/docker/engine-api/types/plugin.go
@@ -7,6 +7,15 @@
 	"fmt"
 )
 
+// PluginInstallOptions holds parameters to install a plugin.
+type PluginInstallOptions struct {
+	Disabled              bool
+	AcceptAllPermissions  bool
+	RegistryAuth          string // RegistryAuth is the base64 encoded credentials for the registry
+	PrivilegeFunc         RequestPrivilegeFunc
+	AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
+}
+
 // PluginConfig represents the values of settings potentially modifiable by a user
 type PluginConfig struct {
 	Mounts  []PluginMount
diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go
index 052b6b3..27a0ab6 100644
--- a/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go
+++ b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go
@@ -29,7 +29,7 @@
 type Policy struct {
 	Role       NodeRole
 	Autoaccept bool
-	Secret     string `json:",omitempty"`
+	Secret     *string `json:",omitempty"`
 }
 
 // OrchestrationConfig represents ochestration configuration.
diff --git a/vendor/src/github.com/docker/libnetwork/agent.go b/vendor/src/github.com/docker/libnetwork/agent.go
index 7276bee..6215459 100644
--- a/vendor/src/github.com/docker/libnetwork/agent.go
+++ b/vendor/src/github.com/docker/libnetwork/agent.go
@@ -353,7 +353,7 @@
 				ingressPorts = ep.ingressPorts
 			}
 
-			if err := c.addServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.virtualIP, ingressPorts, ep.Iface().Address().IP); err != nil {
+			if err := c.addServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.virtualIP, ingressPorts, ep.svcAliases, ep.Iface().Address().IP); err != nil {
 				return err
 			}
 		}
@@ -364,6 +364,7 @@
 			ServiceID:    ep.svcID,
 			VirtualIP:    ep.virtualIP.String(),
 			IngressPorts: ingressPorts,
+			Aliases:      ep.svcAliases,
 			EndpointIP:   ep.Iface().Address().IP.String(),
 		})
 
@@ -399,7 +400,7 @@
 				ingressPorts = ep.ingressPorts
 			}
 
-			if err := c.rmServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.virtualIP, ingressPorts, ep.Iface().Address().IP); err != nil {
+			if err := c.rmServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), ep.virtualIP, ingressPorts, ep.svcAliases, ep.Iface().Address().IP); err != nil {
 				return err
 			}
 		}
@@ -554,6 +555,7 @@
 	vip := net.ParseIP(epRec.VirtualIP)
 	ip := net.ParseIP(epRec.EndpointIP)
 	ingressPorts := epRec.IngressPorts
+	aliases := epRec.Aliases
 
 	if name == "" || ip == nil {
 		logrus.Errorf("Invalid endpoint name/ip received while handling service table event %s", value)
@@ -562,7 +564,7 @@
 
 	if isAdd {
 		if svcID != "" {
-			if err := c.addServiceBinding(svcName, svcID, nid, eid, vip, ingressPorts, ip); err != nil {
+			if err := c.addServiceBinding(svcName, svcID, nid, eid, vip, ingressPorts, aliases, ip); err != nil {
 				logrus.Errorf("Failed adding service binding for value %s: %v", value, err)
 				return
 			}
@@ -571,7 +573,7 @@
 		n.addSvcRecords(name, ip, nil, true)
 	} else {
 		if svcID != "" {
-			if err := c.rmServiceBinding(svcName, svcID, nid, eid, vip, ingressPorts, ip); err != nil {
+			if err := c.rmServiceBinding(svcName, svcID, nid, eid, vip, ingressPorts, aliases, ip); err != nil {
 				logrus.Errorf("Failed adding service binding for value %s: %v", value, err)
 				return
 			}
diff --git a/vendor/src/github.com/docker/libnetwork/agent.pb.go b/vendor/src/github.com/docker/libnetwork/agent.pb.go
index 19b3042..e9d2c8d 100644
--- a/vendor/src/github.com/docker/libnetwork/agent.pb.go
+++ b/vendor/src/github.com/docker/libnetwork/agent.pb.go
@@ -72,6 +72,8 @@
 	EndpointIP string `protobuf:"bytes,5,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"`
 	// IngressPorts exposed by the service to which this endpoint belongs.
 	IngressPorts []*PortConfig `protobuf:"bytes,6,rep,name=ingress_ports,json=ingressPorts" json:"ingress_ports,omitempty"`
+	// A list of aliases which are alternate names for the service
+	Aliases []string `protobuf:"bytes,7,rep,name=aliases" json:"aliases,omitempty"`
 }
 
 func (m *EndpointRecord) Reset()                    { *m = EndpointRecord{} }
@@ -120,7 +122,7 @@
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 10)
+	s := make([]string, 0, 11)
 	s = append(s, "&libnetwork.EndpointRecord{")
 	s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
 	s = append(s, "ServiceName: "+fmt.Sprintf("%#v", this.ServiceName)+",\n")
@@ -130,6 +132,7 @@
 	if this.IngressPorts != nil {
 		s = append(s, "IngressPorts: "+fmt.Sprintf("%#v", this.IngressPorts)+",\n")
 	}
+	s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n")
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -228,6 +231,21 @@
 			i += n
 		}
 	}
+	if len(m.Aliases) > 0 {
+		for _, s := range m.Aliases {
+			data[i] = 0x3a
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				data[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			data[i] = uint8(l)
+			i++
+			i += copy(data[i:], s)
+		}
+	}
 	return i, nil
 }
 
@@ -326,6 +344,12 @@
 			n += 1 + l + sovAgent(uint64(l))
 		}
 	}
+	if len(m.Aliases) > 0 {
+		for _, s := range m.Aliases {
+			l = len(s)
+			n += 1 + l + sovAgent(uint64(l))
+		}
+	}
 	return n
 }
 
@@ -372,6 +396,7 @@
 		`VirtualIP:` + fmt.Sprintf("%v", this.VirtualIP) + `,`,
 		`EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`,
 		`IngressPorts:` + strings.Replace(fmt.Sprintf("%v", this.IngressPorts), "PortConfig", "PortConfig", 1) + `,`,
+		`Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -602,6 +627,35 @@
 				return err
 			}
 			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAgent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthAgent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Aliases = append(m.Aliases, string(data[iNdEx:postIndex]))
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipAgent(data[iNdEx:])
@@ -865,29 +919,30 @@
 )
 
 var fileDescriptorAgent = []byte{
-	// 384 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xda, 0x40,
-	0x18, 0xc6, 0x31, 0xb8, 0x08, 0xbf, 0xc6, 0x2e, 0x3a, 0x55, 0x95, 0xc5, 0x60, 0x28, 0x52, 0x25,
-	0x86, 0xca, 0x48, 0x74, 0x64, 0x03, 0x3a, 0x78, 0xa9, 0x2c, 0xf7, 0xcf, 0x8a, 0x0c, 0xbe, 0xba,
-	0xa7, 0xba, 0x3e, 0xeb, 0x7c, 0xd0, 0xb5, 0x63, 0x94, 0x2d, 0x1f, 0x20, 0x53, 0xbe, 0x4c, 0xc6,
-	0x8c, 0x99, 0xa2, 0xc0, 0x9a, 0x25, 0x1f, 0x21, 0x77, 0x67, 0x1b, 0x14, 0x89, 0xe1, 0x95, 0x4e,
-	0xbf, 0xe7, 0xf7, 0x9e, 0x5e, 0x3d, 0x60, 0x46, 0x09, 0xce, 0xb8, 0x97, 0x33, 0xca, 0x29, 0x82,
-	0x94, 0xac, 0x33, 0xcc, 0xff, 0x51, 0xf6, 0xa7, 0xff, 0x2e, 0xa1, 0x09, 0x55, 0x78, 0x22, 0x5f,
-	0xa5, 0x31, 0xba, 0x6a, 0x82, 0xfd, 0x25, 0x8b, 0x73, 0x4a, 0x32, 0x1e, 0xe2, 0x0d, 0x65, 0x31,
-	0x42, 0xa0, 0x67, 0xd1, 0x5f, 0xec, 0x68, 0x43, 0x6d, 0x6c, 0x84, 0xea, 0x8d, 0x3e, 0x40, 0xb7,
-	0xc0, 0x6c, 0x47, 0x36, 0x78, 0xa5, 0xb2, 0xa6, 0xca, 0xcc, 0x8a, 0x7d, 0x95, 0xca, 0x27, 0x80,
-	0x5a, 0x21, 0xb1, 0xd3, 0x92, 0xc2, 0xdc, 0x3a, 0x3c, 0x0c, 0x8c, 0x6f, 0x25, 0xf5, 0x97, 0xa1,
-	0x51, 0x09, 0x7e, 0x2c, 0xed, 0x1d, 0x61, 0x7c, 0x1b, 0xa5, 0x2b, 0x92, 0x3b, 0xfa, 0xc9, 0xfe,
-	0x59, 0x52, 0x3f, 0x08, 0x8d, 0x4a, 0xf0, 0x73, 0x34, 0x01, 0x13, 0x57, 0x47, 0x4a, 0xfd, 0x8d,
-	0xd2, 0x6d, 0xa1, 0x43, 0x7d, 0xbb, 0xf0, 0xa1, 0x56, 0xc4, 0xc2, 0x0c, 0x2c, 0x92, 0x25, 0x0c,
-	0x17, 0xc5, 0x2a, 0xa7, 0x8c, 0x17, 0x4e, 0x7b, 0xd8, 0x1a, 0x9b, 0xd3, 0xf7, 0xde, 0xa9, 0x10,
-	0x2f, 0x10, 0xc1, 0x82, 0x66, 0xbf, 0x48, 0x12, 0x76, 0x2b, 0x59, 0xa2, 0x62, 0xf4, 0xa4, 0x01,
-	0x9c, 0xc2, 0xb3, 0x7d, 0xcc, 0xa0, 0xa3, 0xfa, 0xdb, 0xd0, 0x54, 0x75, 0x61, 0x4f, 0x07, 0xe7,
-	0xbf, 0xf6, 0x82, 0x4a, 0x0b, 0x8f, 0x0b, 0x68, 0x00, 0x26, 0x8f, 0x58, 0x82, 0xb9, 0xba, 0x4d,
-	0x55, 0x65, 0x85, 0x50, 0x22, 0xb9, 0x89, 0x3e, 0x82, 0x9d, 0x6f, 0xd7, 0x29, 0x29, 0x7e, 0xe3,
-	0xb8, 0x74, 0x74, 0xe5, 0x58, 0x47, 0x2a, 0xb5, 0xd1, 0x12, 0x3a, 0xf5, 0xef, 0xc8, 0x81, 0xd6,
-	0xf7, 0x45, 0xd0, 0x6b, 0xf4, 0xdf, 0x5e, 0x5e, 0x0f, 0xcd, 0x1a, 0x0b, 0x24, 0x93, 0x1f, 0xcb,
-	0xa0, 0xa7, 0xbd, 0x4e, 0x04, 0xea, 0xeb, 0x17, 0x37, 0x6e, 0x63, 0xee, 0xdc, 0xef, 0xdd, 0xc6,
-	0xf3, 0xde, 0xd5, 0xfe, 0x1f, 0x5c, 0xed, 0x56, 0xcc, 0x9d, 0x98, 0x47, 0x31, 0xeb, 0xb6, 0xba,
-	0xf8, 0xf3, 0x4b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x6d, 0x44, 0x68, 0x53, 0x02, 0x00, 0x00,
+	// 397 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xbf, 0xae, 0xd3, 0x30,
+	0x14, 0xc6, 0x9b, 0xdb, 0x70, 0x6f, 0x73, 0xd2, 0x84, 0xca, 0x42, 0x28, 0xea, 0x90, 0x96, 0x4a,
+	0x48, 0x1d, 0x50, 0x2a, 0x95, 0xb1, 0x5b, 0x5b, 0x86, 0x2c, 0x28, 0x32, 0x7f, 0xd6, 0x2a, 0x6d,
+	0x4c, 0xb0, 0x08, 0x71, 0x64, 0xbb, 0x65, 0x65, 0x44, 0xbc, 0x03, 0x13, 0x23, 0x2f, 0xc2, 0xc8,
+	0xc8, 0x84, 0x68, 0x57, 0x16, 0x1e, 0x01, 0xdb, 0x49, 0x5a, 0x21, 0x75, 0xb0, 0xe4, 0xfc, 0xce,
+	0xef, 0x4b, 0x4e, 0x3e, 0x70, 0xd3, 0x9c, 0x94, 0x32, 0xaa, 0x38, 0x93, 0x0c, 0x41, 0x41, 0xb7,
+	0x25, 0x91, 0x1f, 0x18, 0x7f, 0x37, 0x7c, 0x90, 0xb3, 0x9c, 0x19, 0x3c, 0xd3, 0xb7, 0xda, 0x98,
+	0x7c, 0xbb, 0x01, 0xff, 0x59, 0x99, 0x55, 0x8c, 0x96, 0x12, 0x93, 0x1d, 0xe3, 0x19, 0x42, 0x60,
+	0x97, 0xe9, 0x7b, 0x12, 0x58, 0x63, 0x6b, 0xea, 0x60, 0x73, 0x47, 0x8f, 0xa0, 0x2f, 0x08, 0x3f,
+	0xd0, 0x1d, 0xd9, 0x98, 0xd9, 0x8d, 0x99, 0xb9, 0x0d, 0x7b, 0xae, 0x95, 0x27, 0x00, 0xad, 0x42,
+	0xb3, 0xa0, 0xab, 0x85, 0xa5, 0x77, 0xfa, 0x35, 0x72, 0x5e, 0xd4, 0x34, 0x5e, 0x63, 0xa7, 0x11,
+	0xe2, 0x4c, 0xdb, 0x07, 0xca, 0xe5, 0x3e, 0x2d, 0x36, 0xb4, 0x0a, 0xec, 0x8b, 0xfd, 0xba, 0xa6,
+	0x71, 0x82, 0x9d, 0x46, 0x88, 0x2b, 0x34, 0x03, 0x97, 0x34, 0x4b, 0x6a, 0xfd, 0x9e, 0xd1, 0x7d,
+	0xa5, 0x43, 0xbb, 0xbb, 0xf2, 0xa1, 0x55, 0x54, 0x60, 0x01, 0x1e, 0x2d, 0x73, 0x4e, 0x84, 0xd8,
+	0x54, 0x8c, 0x4b, 0x11, 0xdc, 0x8e, 0xbb, 0x53, 0x77, 0xfe, 0x30, 0xba, 0x14, 0x12, 0x25, 0x6a,
+	0xb0, 0x62, 0xe5, 0x1b, 0x9a, 0xe3, 0x7e, 0x23, 0x6b, 0x24, 0x50, 0x00, 0x77, 0x69, 0x41, 0x53,
+	0x41, 0x44, 0x70, 0xa7, 0x62, 0x0e, 0x6e, 0x1f, 0x27, 0x7f, 0x2c, 0x80, 0x4b, 0xec, 0x6a, 0x53,
+	0x0b, 0xe8, 0x99, 0x66, 0x77, 0xac, 0x30, 0x2d, 0xf9, 0xf3, 0xd1, 0xf5, 0x8f, 0x46, 0x49, 0xa3,
+	0xe1, 0x73, 0x00, 0x8d, 0xc0, 0x95, 0x29, 0xcf, 0x89, 0x34, 0x5b, 0x9b, 0x12, 0x3d, 0x0c, 0x35,
+	0xd2, 0x49, 0xf4, 0x18, 0xfc, 0x6a, 0xbf, 0x2d, 0xa8, 0x78, 0x4b, 0xb2, 0xda, 0xb1, 0x8d, 0xe3,
+	0x9d, 0xa9, 0xd6, 0x26, 0x6b, 0xe8, 0xb5, 0x6f, 0x57, 0x7f, 0xd3, 0x7d, 0xb9, 0x4a, 0x06, 0x9d,
+	0xe1, 0xfd, 0xcf, 0x5f, 0xc6, 0x6e, 0x8b, 0x15, 0xd2, 0x93, 0x57, 0xeb, 0x64, 0x60, 0xfd, 0x3f,
+	0x51, 0x68, 0x68, 0x7f, 0xfa, 0x1a, 0x76, 0x96, 0xc1, 0xcf, 0x63, 0xd8, 0xf9, 0x7b, 0x0c, 0xad,
+	0x8f, 0xa7, 0xd0, 0xfa, 0xae, 0xce, 0x0f, 0x75, 0x7e, 0xab, 0xb3, 0xbd, 0x35, 0x1b, 0x3f, 0xfd,
+	0x17, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x58, 0xc7, 0xbd, 0x6d, 0x02, 0x00, 0x00,
 }
diff --git a/vendor/src/github.com/docker/libnetwork/agent.proto b/vendor/src/github.com/docker/libnetwork/agent.proto
index 5d2b096..2157ce4 100644
--- a/vendor/src/github.com/docker/libnetwork/agent.proto
+++ b/vendor/src/github.com/docker/libnetwork/agent.proto
@@ -31,6 +31,9 @@
 
 	// IngressPorts exposed by the service to which this endpoint belongs.
 	repeated PortConfig ingress_ports = 6;
+
+	// A list of aliases which are alternate names for the service
+	repeated string aliases = 7;
 }
 
 // PortConfig specifies an exposed port which can be
diff --git a/vendor/src/github.com/docker/libnetwork/controller.go b/vendor/src/github.com/docker/libnetwork/controller.go
index 3655707..1646568 100644
--- a/vendor/src/github.com/docker/libnetwork/controller.go
+++ b/vendor/src/github.com/docker/libnetwork/controller.go
@@ -203,6 +203,8 @@
 		}
 	}
 
+	c.WalkNetworks(populateSpecial)
+
 	// Reserve pools first before doing cleanup. Otherwise the
 	// cleanups of endpoint/network and sandbox below will
 	// generate many unnecessary warnings
@@ -808,12 +810,13 @@
 	// Create sandbox and process options first. Key generation depends on an option
 	if sb == nil {
 		sb = &sandbox{
-			id:          stringid.GenerateRandomID(),
-			containerID: containerID,
-			endpoints:   epHeap{},
-			epPriority:  map[string]int{},
-			config:      containerConfig{},
-			controller:  c,
+			id:                 stringid.GenerateRandomID(),
+			containerID:        containerID,
+			endpoints:          epHeap{},
+			epPriority:         map[string]int{},
+			populatedEndpoints: map[string]struct{}{},
+			config:             containerConfig{},
+			controller:         c,
 		}
 	}
 	sBox = sb
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
index 6d761a0..ce0aec8 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -588,13 +588,26 @@
 	defer osl.InitOSContext()()
 
 	networkList := d.getNetworks()
-	for _, nw := range networkList {
+	for i, nw := range networkList {
 		nw.Lock()
 		nwConfig := nw.config
 		nw.Unlock()
 		if err := nwConfig.Conflicts(config); err != nil {
-			return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s",
-				config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error())
+			if config.DefaultBridge {
+				// We encountered and identified a stale default network
+				// We must delete it as libnetwork is the source of thruth
+				// The default network being created must be the only one
+				// This can happen only from docker 1.12 on ward
+				logrus.Infof("Removing stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName)
+				if err := d.DeleteNetwork(nwConfig.ID); err != nil {
+					logrus.Warnf("Failed to remove stale default network: %s (%s): %v. Will remove from store.", nwConfig.ID, nwConfig.BridgeName, err)
+					d.storeDelete(nwConfig)
+				}
+				networkList = append(networkList[:i], networkList[i+1:]...)
+			} else {
+				return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s",
+					config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error())
+			}
 		}
 	}
 
@@ -762,12 +775,6 @@
 		return err
 	}
 
-	// Cannot remove network if endpoints are still present
-	if len(n.endpoints) != 0 {
-		err = ActiveEndpointsError(n.id)
-		return err
-	}
-
 	// We only delete the bridge when it's not the default bridge. This is keep the backward compatible behavior.
 	if !config.DefaultBridge {
 		if err := d.nlh.LinkDel(n.bridge.Link); err != nil {
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go
index 54844c9..d76858e 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go
@@ -83,8 +83,12 @@
 	if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil {
 		ns.NlHandle().LinkDel(link)
 	}
+
 	if err := d.storeDelete(ep); err != nil {
 		logrus.Warnf("Failed to remove macvlan endpoint %s from store: %v", ep.id[0:7], err)
 	}
+
+	n.deleteEndpoint(ep.id)
+
 	return nil
 }
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go b/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go
index 0f9a5e4..f27f860 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go
@@ -70,7 +70,7 @@
 }
 
 func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error {
-	log.Infof("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
+	log.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal)
 
 	n := d.network(nid)
 	if n == nil || !n.secure {
@@ -120,7 +120,7 @@
 }
 
 func setupEncryption(localIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error {
-	log.Infof("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
+	log.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP)
 	rIPs := remoteIP.String()
 
 	indices := make([]*spi, 0, len(keys))
@@ -242,7 +242,7 @@
 		}
 
 		if add != exists {
-			log.Infof("%s: rSA{%s}", action, rSA)
+			log.Debugf("%s: rSA{%s}", action, rSA)
 			if err := xfrmProgram(rSA); err != nil {
 				log.Warnf("Failed %s rSA{%s}: %v", action, rSA, err)
 			}
@@ -267,7 +267,7 @@
 		}
 
 		if add != exists {
-			log.Infof("%s fSA{%s}", action, fSA)
+			log.Debugf("%s fSA{%s}", action, fSA)
 			if err := xfrmProgram(fSA); err != nil {
 				log.Warnf("Failed %s fSA{%s}: %v.", action, fSA, err)
 			}
@@ -313,7 +313,7 @@
 	}
 
 	if add != exists {
-		log.Infof("%s fSP{%s}", action, fPol)
+		log.Debugf("%s fSP{%s}", action, fPol)
 		if err := xfrmProgram(fPol); err != nil {
 			log.Warnf("%s fSP{%s}: %v", action, fPol, err)
 		}
@@ -380,16 +380,16 @@
 		return types.ForbiddenErrorf("initial keys are already present")
 	}
 	d.keys = keys
-	log.Infof("Initial encryption keys: %v", d.keys)
+	log.Debugf("Initial encryption keys: %v", d.keys)
 	return nil
 }
 
 // updateKeys allows to add a new key and/or change the primary key and/or prune an existing key
 // The primary key is the key used in transmission and will go in first position in the list.
 func (d *driver) updateKeys(newKey, primary, pruneKey *key) error {
-	log.Infof("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
+	log.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey)
 
-	log.Infof("Current: %v", d.keys)
+	log.Debugf("Current: %v", d.keys)
 
 	var (
 		newIdx = -1
@@ -444,7 +444,7 @@
 	}
 	d.Unlock()
 
-	log.Infof("Updated: %v", d.keys)
+	log.Debugf("Updated: %v", d.keys)
 
 	return nil
 }
@@ -458,10 +458,10 @@
 
 // Spis and keys are sorted in such away the one in position 0 is the primary
 func updateNodeKey(lIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi {
-	log.Infof("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
+	log.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx)
 
 	spis := idxs
-	log.Infof("Current: %v", spis)
+	log.Debugf("Current: %v", spis)
 
 	// add new
 	if newIdx != -1 {
@@ -482,7 +482,7 @@
 			Crypt:  &netlink.XfrmStateAlgo{Name: "cbc(aes)", Key: curKeys[delIdx].value},
 			Limits: netlink.XfrmStateLimits{TimeSoft: timeout},
 		}
-		log.Infof("Updating rSA0{%s}", rSA0)
+		log.Debugf("Updating rSA0{%s}", rSA0)
 		if err := ns.NlHandle().XfrmStateUpdate(rSA0); err != nil {
 			log.Warnf("Failed to update rSA0{%s}: %v", rSA0, err)
 		}
@@ -518,7 +518,7 @@
 				},
 			},
 		}
-		log.Infof("Updating fSP{%s}", fSP1)
+		log.Debugf("Updating fSP{%s}", fSP1)
 		if err := ns.NlHandle().XfrmPolicyUpdate(fSP1); err != nil {
 			log.Warnf("Failed to update fSP{%s}: %v", fSP1, err)
 		}
@@ -533,7 +533,7 @@
 			Crypt:  &netlink.XfrmStateAlgo{Name: "cbc(aes)", Key: curKeys[0].value},
 			Limits: netlink.XfrmStateLimits{TimeHard: timeout},
 		}
-		log.Infof("Removing fSA0{%s}", fSA0)
+		log.Debugf("Removing fSA0{%s}", fSA0)
 		if err := ns.NlHandle().XfrmStateUpdate(fSA0); err != nil {
 			log.Warnf("Failed to remove fSA0{%s}: %v", fSA0, err)
 		}
@@ -553,7 +553,7 @@
 		spis = append(spis[:delIdx], spis[delIdx+1:]...)
 	}
 
-	log.Infof("Updated: %v", spis)
+	log.Debugf("Updated: %v", spis)
 
 	return spis
 }
diff --git a/vendor/src/github.com/docker/libnetwork/endpoint.go b/vendor/src/github.com/docker/libnetwork/endpoint.go
index 043c3f1..3fa1b6d 100644
--- a/vendor/src/github.com/docker/libnetwork/endpoint.go
+++ b/vendor/src/github.com/docker/libnetwork/endpoint.go
@@ -70,6 +70,7 @@
 	svcID             string
 	svcName           string
 	virtualIP         net.IP
+	svcAliases        []string
 	ingressPorts      []*PortConfig
 	dbIndex           uint64
 	dbExists          bool
@@ -98,6 +99,7 @@
 	epMap["svcID"] = ep.svcID
 	epMap["virtualIP"] = ep.virtualIP.String()
 	epMap["ingressPorts"] = ep.ingressPorts
+	epMap["svcAliases"] = ep.svcAliases
 
 	return json.Marshal(epMap)
 }
@@ -198,6 +200,11 @@
 		ep.virtualIP = net.ParseIP(vip.(string))
 	}
 
+	sal, _ := json.Marshal(epMap["svcAliases"])
+	var svcAliases []string
+	json.Unmarshal(sal, &svcAliases)
+	ep.svcAliases = svcAliases
+
 	pc, _ := json.Marshal(epMap["ingressPorts"])
 	var ingressPorts []*PortConfig
 	json.Unmarshal(pc, &ingressPorts)
@@ -231,6 +238,9 @@
 	dstEp.svcID = ep.svcID
 	dstEp.virtualIP = ep.virtualIP
 
+	dstEp.svcAliases = make([]string, len(ep.svcAliases))
+	copy(dstEp.svcAliases, ep.svcAliases)
+
 	dstEp.ingressPorts = make([]*PortConfig, len(ep.ingressPorts))
 	copy(dstEp.ingressPorts, ep.ingressPorts)
 
@@ -935,12 +945,13 @@
 }
 
 // CreateOptionService function returns an option setter for setting service binding configuration
-func CreateOptionService(name, id string, vip net.IP, ingressPorts []*PortConfig) EndpointOption {
+func CreateOptionService(name, id string, vip net.IP, ingressPorts []*PortConfig, aliases []string) EndpointOption {
 	return func(ep *endpoint) {
 		ep.svcName = name
 		ep.svcID = id
 		ep.virtualIP = vip
 		ep.ingressPorts = ingressPorts
+		ep.svcAliases = aliases
 	}
 }
 
@@ -976,7 +987,7 @@
 	var err error
 
 	n := ep.getNetwork()
-	if n.Type() == "host" || n.Type() == "null" {
+	if n.hasSpecialDriver() {
 		return nil
 	}
 
@@ -1056,7 +1067,7 @@
 
 func (ep *endpoint) releaseAddress() {
 	n := ep.getNetwork()
-	if n.Type() == "host" || n.Type() == "null" {
+	if n.hasSpecialDriver() {
 		return
 	}
 
diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go
index c4e988f..1bf003e 100644
--- a/vendor/src/github.com/docker/libnetwork/network.go
+++ b/vendor/src/github.com/docker/libnetwork/network.go
@@ -1123,8 +1123,7 @@
 }
 
 func (n *network) ipamAllocate() error {
-	// For now also exclude bridge from using new ipam
-	if n.Type() == "host" || n.Type() == "null" {
+	if n.hasSpecialDriver() {
 		return nil
 	}
 
@@ -1295,8 +1294,7 @@
 }
 
 func (n *network) ipamRelease() {
-	// For now exclude host and null
-	if n.Type() == "host" || n.Type() == "null" {
+	if n.hasSpecialDriver() {
 		return
 	}
 	ipam, _, err := n.getController().getIPAMDriver(n.ipamType)
@@ -1504,3 +1502,8 @@
 	n.driverTables = append(n.driverTables, tableName)
 	return nil
 }
+
+// Special drivers are ones which do not need to perform any network plumbing
+func (n *network) hasSpecialDriver() bool {
+	return n.Type() == "host" || n.Type() == "null"
+}
diff --git a/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go b/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go
index e11aae9..165ba6e 100644
--- a/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go
+++ b/vendor/src/github.com/docker/libnetwork/networkdb/cluster.go
@@ -330,11 +330,15 @@
 		// successfully completed bulk sync in this iteration.
 		updatedNetworks := make([]string, 0, len(networks))
 		for _, nid := range networks {
+			var found bool
 			for _, completedNid := range completed {
 				if nid == completedNid {
-					continue
+					found = true
+					break
 				}
+			}
 
+			if !found {
 				updatedNetworks = append(updatedNetworks, nid)
 			}
 		}
@@ -449,8 +453,9 @@
 	// Wait on a response only if it is unsolicited.
 	if unsolicited {
 		startTime := time.Now()
+		t := time.NewTimer(30 * time.Second)
 		select {
-		case <-time.After(30 * time.Second):
+		case <-t.C:
 			logrus.Errorf("Bulk sync to node %s timed out", node)
 		case <-ch:
 			nDB.Lock()
@@ -459,6 +464,7 @@
 
 			logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Now().Sub(startTime))
 		}
+		t.Stop()
 	}
 
 	return nil
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox.go
index dce169b..6bb2766 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox.go
@@ -68,23 +68,24 @@
 type epHeap []*endpoint
 
 type sandbox struct {
-	id            string
-	containerID   string
-	config        containerConfig
-	extDNS        []string
-	osSbox        osl.Sandbox
-	controller    *controller
-	resolver      Resolver
-	resolverOnce  sync.Once
-	refCnt        int
-	endpoints     epHeap
-	epPriority    map[string]int
-	joinLeaveDone chan struct{}
-	dbIndex       uint64
-	dbExists      bool
-	isStub        bool
-	inDelete      bool
-	ingress       bool
+	id                 string
+	containerID        string
+	config             containerConfig
+	extDNS             []string
+	osSbox             osl.Sandbox
+	controller         *controller
+	resolver           Resolver
+	resolverOnce       sync.Once
+	refCnt             int
+	endpoints          epHeap
+	epPriority         map[string]int
+	populatedEndpoints map[string]struct{}
+	joinLeaveDone      chan struct{}
+	dbIndex            uint64
+	dbExists           bool
+	isStub             bool
+	inDelete           bool
+	ingress            bool
 	sync.Mutex
 }
 
@@ -728,7 +729,7 @@
 			}
 		}
 		if ep.needResolver() {
-			sb.startResolver()
+			sb.startResolver(true)
 		}
 	}
 
@@ -761,7 +762,7 @@
 	ep.Unlock()
 
 	if ep.needResolver() {
-		sb.startResolver()
+		sb.startResolver(false)
 	}
 
 	if i != nil && i.srcName != "" {
@@ -798,6 +799,12 @@
 		}
 	}
 
+	// Make sure to add the endpoint to the populated endpoint set
+	// before populating loadbalancers.
+	sb.Lock()
+	sb.populatedEndpoints[ep.ID()] = struct{}{}
+	sb.Unlock()
+
 	// Populate load balancer only after updating all the other
 	// information including gateway and other routes so that
 	// loadbalancers are populated all the network state is in
@@ -830,6 +837,7 @@
 		releaseOSSboxResources(osSbox, ep)
 	}
 
+	delete(sb.populatedEndpoints, ep.ID())
 	sb.Lock()
 	if len(sb.endpoints) == 0 {
 		// sb.endpoints should never be empty and this is unexpected error condition
@@ -879,6 +887,13 @@
 	return nil
 }
 
+func (sb *sandbox) isEndpointPopulated(ep *endpoint) bool {
+	sb.Lock()
+	_, ok := sb.populatedEndpoints[ep.ID()]
+	sb.Unlock()
+	return ok
+}
+
 // joinLeaveStart waits to ensure there are no joins or leaves in progress and
 // marks this join/leave in progress without race
 func (sb *sandbox) joinLeaveStart() {
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go b/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go
index 3f531be..7357085 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go
@@ -21,7 +21,7 @@
 	filePerm      = 0644
 )
 
-func (sb *sandbox) startResolver() {
+func (sb *sandbox) startResolver(restore bool) {
 	sb.resolverOnce.Do(func() {
 		var err error
 		sb.resolver = NewResolver(sb)
@@ -31,10 +31,16 @@
 			}
 		}()
 
-		err = sb.rebuildDNS()
-		if err != nil {
-			log.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err)
-			return
+		// In the case of live restore container is already running with
+		// right resolv.conf contents created before. Just update the
+		// external DNS servers from the restored sandbox for embedded
+		// server to use.
+		if !restore {
+			err = sb.rebuildDNS()
+			if err != nil {
+				log.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err)
+				return
+			}
 		}
 		sb.resolver.SetExtServers(sb.extDNS)
 
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox_dns_windows.go b/vendor/src/github.com/docker/libnetwork/sandbox_dns_windows.go
index f2f58d5..e1ca73e 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox_dns_windows.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox_dns_windows.go
@@ -8,7 +8,7 @@
 
 // Stub implementations for DNS related functions
 
-func (sb *sandbox) startResolver() {
+func (sb *sandbox) startResolver(bool) {
 }
 
 func (sb *sandbox) setupResolutionFiles() error {
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox_store.go b/vendor/src/github.com/docker/libnetwork/sandbox_store.go
index 5aa4839..5b963e7 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox_store.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox_store.go
@@ -27,6 +27,7 @@
 	dbExists   bool
 	Eps        []epState
 	EpPriority map[string]int
+	ExtDNS     []string
 }
 
 func (sbs *sbState) Key() []string {
@@ -113,6 +114,10 @@
 		dstSbs.Eps = append(dstSbs.Eps, eps)
 	}
 
+	for _, dns := range sbs.ExtDNS {
+		dstSbs.ExtDNS = append(dstSbs.ExtDNS, dns)
+	}
+
 	return nil
 }
 
@@ -126,6 +131,7 @@
 		ID:         sb.id,
 		Cid:        sb.containerID,
 		EpPriority: sb.epPriority,
+		ExtDNS:     sb.extDNS,
 	}
 
 retry:
@@ -191,13 +197,15 @@
 		sbs := kvo.(*sbState)
 
 		sb := &sandbox{
-			id:          sbs.ID,
-			controller:  sbs.c,
-			containerID: sbs.Cid,
-			endpoints:   epHeap{},
-			dbIndex:     sbs.dbIndex,
-			isStub:      true,
-			dbExists:    true,
+			id:                 sbs.ID,
+			controller:         sbs.c,
+			containerID:        sbs.Cid,
+			endpoints:          epHeap{},
+			populatedEndpoints: map[string]struct{}{},
+			dbIndex:            sbs.dbIndex,
+			isStub:             true,
+			dbExists:           true,
+			extDNS:             sbs.ExtDNS,
 		}
 
 		msg := " for cleanup"
diff --git a/vendor/src/github.com/docker/libnetwork/service_linux.go b/vendor/src/github.com/docker/libnetwork/service_linux.go
index 204c59d..d6b4b4c 100644
--- a/vendor/src/github.com/docker/libnetwork/service_linux.go
+++ b/vendor/src/github.com/docker/libnetwork/service_linux.go
@@ -37,7 +37,7 @@
 	}
 }
 
-func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
 	var (
 		s          *service
 		addService bool
@@ -61,6 +61,9 @@
 	// Add endpoint IP to special "tasks.svc_name" so that the
 	// applications have access to DNS RR.
 	n.(*network).addSvcRecords("tasks."+name, ip, nil, false)
+	for _, alias := range aliases {
+		n.(*network).addSvcRecords("tasks."+alias, ip, nil, false)
+	}
 
 	// Add service name to vip in DNS, if vip is valid. Otherwise resort to DNS RR
 	svcIP := vip
@@ -68,6 +71,9 @@
 		svcIP = ip
 	}
 	n.(*network).addSvcRecords(name, svcIP, nil, false)
+	for _, alias := range aliases {
+		n.(*network).addSvcRecords(alias, svcIP, nil, false)
+	}
 
 	s.Lock()
 	defer s.Unlock()
@@ -107,7 +113,7 @@
 	return nil
 }
 
-func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
 	var rmService bool
 
 	n, err := c.NetworkByID(nid)
@@ -125,6 +131,9 @@
 
 	// Delete the special "tasks.svc_name" backend record.
 	n.(*network).deleteSvcRecords("tasks."+name, ip, nil, false)
+	for _, alias := range aliases {
+		n.(*network).deleteSvcRecords("tasks."+alias, ip, nil, false)
+	}
 
 	// Make sure to remove the right IP since if vip is
 	// not valid we would have added a DNS RR record.
@@ -133,6 +142,9 @@
 		svcIP = ip
 	}
 	n.(*network).deleteSvcRecords(name, svcIP, nil, false)
+	for _, alias := range aliases {
+		n.(*network).deleteSvcRecords(alias, svcIP, nil, false)
+	}
 
 	s.Lock()
 	defer s.Unlock()
@@ -172,14 +184,20 @@
 func (n *network) connectedLoadbalancers() []*loadBalancer {
 	c := n.getController()
 
+	serviceBindings := make([]*service, 0, len(c.serviceBindings))
 	c.Lock()
-	defer c.Unlock()
+	for _, s := range c.serviceBindings {
+		serviceBindings = append(serviceBindings, s)
+	}
+	c.Unlock()
 
 	var lbs []*loadBalancer
-	for _, s := range c.serviceBindings {
+	for _, s := range serviceBindings {
+		s.Lock()
 		if lb, ok := s.loadBalancers[n.ID()]; ok {
 			lbs = append(lbs, lb)
 		}
+		s.Unlock()
 	}
 
 	return lbs
@@ -217,12 +235,14 @@
 			continue
 		}
 
+		lb.service.Lock()
 		addService := true
 		for _, ip := range lb.backEnds {
 			sb.addLBBackend(ip, lb.vip, lb.fwMark, lb.service.ingressPorts,
 				eIP, gwIP, addService)
 			addService = false
 		}
+		lb.service.Unlock()
 	}
 }
 
@@ -233,6 +253,10 @@
 	n.WalkEndpoints(func(e Endpoint) bool {
 		ep := e.(*endpoint)
 		if sb, ok := ep.getSandbox(); ok {
+			if !sb.isEndpointPopulated(ep) {
+				return false
+			}
+
 			var gwIP net.IP
 			if ep := sb.getGatewayEndpoint(); ep != nil {
 				gwIP = ep.Iface().Address().IP
@@ -252,6 +276,10 @@
 	n.WalkEndpoints(func(e Endpoint) bool {
 		ep := e.(*endpoint)
 		if sb, ok := ep.getSandbox(); ok {
+			if !sb.isEndpointPopulated(ep) {
+				return false
+			}
+
 			var gwIP net.IP
 			if ep := sb.getGatewayEndpoint(); ep != nil {
 				gwIP = ep.Iface().Address().IP
@@ -344,15 +372,13 @@
 	}
 
 	if err := i.DelDestination(s, d); err != nil {
-		logrus.Errorf("Failed to delete real server %s for vip %s fwmark %d: %v", ip, vip, fwMark, err)
-		return
+		logrus.Infof("Failed to delete real server %s for vip %s fwmark %d: %v", ip, vip, fwMark, err)
 	}
 
 	if rmService {
 		s.SchedName = ipvs.RoundRobin
 		if err := i.DelService(s); err != nil {
-			logrus.Errorf("Failed to create a new service for vip %s fwmark %d: %v", vip, fwMark, err)
-			return
+			logrus.Errorf("Failed to delete a new service for vip %s fwmark %d: %v", vip, fwMark, err)
 		}
 
 		var iPorts []*PortConfig
@@ -360,13 +386,11 @@
 			iPorts = ingressPorts
 			if err := programIngress(gwIP, iPorts, true); err != nil {
 				logrus.Errorf("Failed to delete ingress: %v", err)
-				return
 			}
 		}
 
 		if err := invokeFWMarker(sb.Key(), vip, fwMark, iPorts, eIP, true); err != nil {
 			logrus.Errorf("Failed to add firewall mark rule in sbox %s: %v", sb.Key(), err)
-			return
 		}
 	}
 }
@@ -412,8 +436,8 @@
 		}
 
 		for _, chain := range []string{"OUTPUT", "PREROUTING"} {
-			if !iptables.Exists(iptables.Nat, chain, "-j", ingressChain) {
-				if err := iptables.RawCombinedOutput("-t", "nat", "-I", chain, "-j", ingressChain); err != nil {
+			if !iptables.Exists(iptables.Nat, chain, "-m", "addrtype", "--dst-type", "LOCAL", "-j", ingressChain) {
+				if err := iptables.RawCombinedOutput("-t", "nat", "-I", chain, "-m", "addrtype", "--dst-type", "LOCAL", "-j", ingressChain); err != nil {
 					return fmt.Errorf("failed to add jump rule in %s to ingress chain: %v", chain, err)
 				}
 			}
@@ -442,12 +466,17 @@
 			rule := strings.Fields(fmt.Sprintf("-t nat %s %s -p %s --dport %d -j DNAT --to-destination %s:%d",
 				addDelOpt, ingressChain, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, gwIP, iPort.PublishedPort))
 			if err := iptables.RawCombinedOutput(rule...); err != nil {
-				return fmt.Errorf("setting up rule failed, %v: %v", rule, err)
+				errStr := fmt.Sprintf("setting up rule failed, %v: %v", rule, err)
+				if !isDelete {
+					return fmt.Errorf("%s", errStr)
+				}
+
+				logrus.Infof("%s", errStr)
 			}
 		}
 
 		if err := plumbProxy(iPort, isDelete); err != nil {
-			return fmt.Errorf("failed to create proxy for port %d: %v", iPort.PublishedPort, err)
+			logrus.Warnf("failed to create proxy for port %d: %v", iPort.PublishedPort, err)
 		}
 	}
 
diff --git a/vendor/src/github.com/docker/libnetwork/service_unsupported.go b/vendor/src/github.com/docker/libnetwork/service_unsupported.go
index 67984e2..9668dcc 100644
--- a/vendor/src/github.com/docker/libnetwork/service_unsupported.go
+++ b/vendor/src/github.com/docker/libnetwork/service_unsupported.go
@@ -7,11 +7,11 @@
 	"net"
 )
 
-func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
 	return fmt.Errorf("not supported")
 }
 
-func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, ip net.IP) error {
+func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error {
 	return fmt.Errorf("not supported")
 }
 
diff --git a/vendor/src/github.com/docker/libnetwork/store.go b/vendor/src/github.com/docker/libnetwork/store.go
index 714d56b..b622836 100644
--- a/vendor/src/github.com/docker/libnetwork/store.go
+++ b/vendor/src/github.com/docker/libnetwork/store.go
@@ -464,3 +464,12 @@
 		}
 	}
 }
+
+var populateSpecial NetworkWalker = func(nw Network) bool {
+	if n := nw.(*network); n.hasSpecialDriver() {
+		if err := n.getController().addNetwork(n); err != nil {
+			log.Warnf("Failed to populate network %q with driver %q", nw.Name(), nw.Type())
+		}
+	}
+	return false
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go b/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go
index 59b54a1..b691a74 100644
--- a/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/controller.go
@@ -6,16 +6,10 @@
 	"github.com/Sirupsen/logrus"
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/log"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
 
-// ContainerController controls execution of container tasks.
-type ContainerController interface {
-	// ContainerStatus returns the status of the target container, if
-	// available. When the container is not available, the status will be nil.
-	ContainerStatus(ctx context.Context) (*api.ContainerStatus, error)
-}
-
 // Controller controls execution of a task.
 type Controller interface {
 	// Update the task definition seen by the controller. Will return
@@ -48,6 +42,15 @@
 	Close() error
 }
 
+// ContainerStatuser reports status of a container.
+//
+// This can be implemented by controllers or error types.
+type ContainerStatuser interface {
+	// ContainerStatus returns the status of the target container, if
+	// available. When the container is not available, the status will be nil.
+	ContainerStatus(ctx context.Context) (*api.ContainerStatus, error)
+}
+
 // Resolve attempts to get a controller from the executor and reports the
 // correct status depending on the tasks current state according to the result.
 //
@@ -121,6 +124,13 @@
 		return status, nil
 	}
 
+	// containerStatus exitCode keeps track of whether or not we've set it in
+	// this particular method. Eventually, we assemble this as part of a defer.
+	var (
+		containerStatus *api.ContainerStatus
+		exitCode        int
+	)
+
 	// returned when a fatal execution of the task is fatal. In this case, we
 	// proceed to a terminal error state and set the appropriate fields.
 	//
@@ -131,28 +141,37 @@
 			panic("err must not be nil when fatal")
 		}
 
-		if IsTemporary(err) {
-			switch Cause(err) {
-			case context.DeadlineExceeded, context.Canceled:
-				// no need to set these errors, since these will more common.
-			default:
-				status.Err = err.Error()
+		if cs, ok := err.(ContainerStatuser); ok {
+			var err error
+			containerStatus, err = cs.ContainerStatus(ctx)
+			if err != nil {
+				log.G(ctx).WithError(err).Error("error resolving container status on fatal")
 			}
+		}
 
+		// make sure we've set the *correct* exit code
+		if ec, ok := err.(ExitCoder); ok {
+			exitCode = ec.ExitCode()
+		}
+
+		if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled {
 			return retry()
 		}
 
-		if cause := Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled {
+		status.Err = err.Error() // still reported on temporary
+		if IsTemporary(err) {
 			return retry()
 		}
 
+		// only at this point do we consider the error fatal to the task.
 		log.G(ctx).WithError(err).Error("fatal task error")
-		status.Err = err.Error()
 
+		// NOTE(stevvooe): The following switch dictates the terminal failure
+		// state based on the state in which the failure was encountered.
 		switch {
 		case status.State < api.TaskStateStarting:
 			status.State = api.TaskStateRejected
-		case status.State > api.TaskStateStarting:
+		case status.State >= api.TaskStateStarting:
 			status.State = api.TaskStateFailed
 		}
 
@@ -172,21 +191,37 @@
 			return
 		}
 
-		cctlr, ok := ctlr.(ContainerController)
-		if !ok {
-			return
-		}
-
-		cstatus, err := cctlr.ContainerStatus(ctx)
-		if err != nil {
-			log.G(ctx).WithError(err).Error("container status unavailable")
-			return
-		}
-
-		if cstatus != nil {
-			status.RuntimeStatus = &api.TaskStatus_Container{
-				Container: cstatus,
+		if containerStatus == nil {
+			// collect this, if we haven't
+			cctlr, ok := ctlr.(ContainerStatuser)
+			if !ok {
+				return
 			}
+
+			var err error
+			containerStatus, err = cctlr.ContainerStatus(ctx)
+			if err != nil {
+				log.G(ctx).WithError(err).Error("container status unavailable")
+			}
+
+			// at this point, things have gone fairly wrong. Remain positive
+			// and let's get something out the door.
+			if containerStatus == nil {
+				containerStatus = new(api.ContainerStatus)
+				containerStatusTask := task.Status.GetContainer()
+				if containerStatusTask != nil {
+					*containerStatus = *containerStatusTask // copy it over.
+				}
+			}
+		}
+
+		// at this point, we *must* have a containerStatus.
+		if exitCode != 0 {
+			containerStatus.ExitCode = int32(exitCode)
+		}
+
+		status.RuntimeStatus = &api.TaskStatus_Container{
+			Container: containerStatus,
 		}
 	}()
 
@@ -222,17 +257,7 @@
 		return transition(api.TaskStateRunning, "started")
 	case api.TaskStateRunning:
 		if err := ctlr.Wait(ctx); err != nil {
-			// Wait should only proceed to failed if there is a terminal
-			// error. The only two conditions when this happens are when we
-			// get an exit code or when the container doesn't exist.
-			switch err := err.(type) {
-			case ExitCoder:
-				return transition(api.TaskStateFailed, "failed")
-			default:
-				// pursuant to the above comment, report fatal, but wrap as
-				// temporary.
-				return fatal(MakeTemporary(err))
-			}
+			return fatal(err)
 		}
 
 		return transition(api.TaskStateCompleted, "finished")
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go b/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go
index 7a45c9a..3b2a1a7 100644
--- a/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go
@@ -9,38 +9,6 @@
 	context "golang.org/x/net/context"
 )
 
-// Mock of ContainerController interface
-type MockContainerController struct {
-	ctrl     *gomock.Controller
-	recorder *_MockContainerControllerRecorder
-}
-
-// Recorder for MockContainerController (not exported)
-type _MockContainerControllerRecorder struct {
-	mock *MockContainerController
-}
-
-func NewMockContainerController(ctrl *gomock.Controller) *MockContainerController {
-	mock := &MockContainerController{ctrl: ctrl}
-	mock.recorder = &_MockContainerControllerRecorder{mock}
-	return mock
-}
-
-func (_m *MockContainerController) EXPECT() *_MockContainerControllerRecorder {
-	return _m.recorder
-}
-
-func (_m *MockContainerController) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
-	ret := _m.ctrl.Call(_m, "ContainerStatus", ctx)
-	ret0, _ := ret[0].(*api.ContainerStatus)
-	ret1, _ := ret[1].(error)
-	return ret0, ret1
-}
-
-func (_mr *_MockContainerControllerRecorder) ContainerStatus(arg0 interface{}) *gomock.Call {
-	return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerStatus", arg0)
-}
-
 // Mock of Controller interface
 type MockController struct {
 	ctrl     *gomock.Controller
@@ -141,3 +109,35 @@
 func (_mr *_MockControllerRecorder) Close() *gomock.Call {
 	return _mr.mock.ctrl.RecordCall(_mr.mock, "Close")
 }
+
+// Mock of ContainerStatuser interface
+type MockContainerStatuser struct {
+	ctrl     *gomock.Controller
+	recorder *_MockContainerStatuserRecorder
+}
+
+// Recorder for MockContainerStatuser (not exported)
+type _MockContainerStatuserRecorder struct {
+	mock *MockContainerStatuser
+}
+
+func NewMockContainerStatuser(ctrl *gomock.Controller) *MockContainerStatuser {
+	mock := &MockContainerStatuser{ctrl: ctrl}
+	mock.recorder = &_MockContainerStatuserRecorder{mock}
+	return mock
+}
+
+func (_m *MockContainerStatuser) EXPECT() *_MockContainerStatuserRecorder {
+	return _m.recorder
+}
+
+func (_m *MockContainerStatuser) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) {
+	ret := _m.ctrl.Call(_m, "ContainerStatus", ctx)
+	ret0, _ := ret[0].(*api.ContainerStatus)
+	ret1, _ := ret[1].(error)
+	return ret0, ret1
+}
+
+func (_mr *_MockContainerStatuserRecorder) ContainerStatus(arg0 interface{}) *gomock.Call {
+	return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerStatus", arg0)
+}
diff --git a/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go b/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go
index db6467c..4d082b7 100644
--- a/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go
+++ b/vendor/src/github.com/docker/swarmkit/agent/exec/errors.go
@@ -1,6 +1,6 @@
 package exec
 
-import "errors"
+import "github.com/pkg/errors"
 
 var (
 	// ErrRuntimeUnsupported encountered when a task requires a runtime
@@ -37,23 +37,6 @@
 	ExitCode() int
 }
 
-type causal interface {
-	Cause() error
-}
-
-// Cause returns the cause of the error, recursively.
-func Cause(err error) error {
-	for err != nil {
-		if causal, ok := err.(causal); ok {
-			err = causal.Cause()
-		} else {
-			break
-		}
-	}
-
-	return err
-}
-
 // Temporary indicates whether or not the error condition is temporary.
 //
 // If this is encountered in the controller, the failing operation will be
@@ -65,15 +48,19 @@
 
 // MakeTemporary makes the error temporary.
 func MakeTemporary(err error) error {
-	return &temporary{error: err}
+	if IsTemporary(err) {
+		return err
+	}
+
+	return temporary{err}
 }
 
 type temporary struct {
 	error
 }
 
-func (t *temporary) Cause() error    { return t.error }
-func (t *temporary) Temporary() bool { return true }
+func (t temporary) Cause() error    { return t.error }
+func (t temporary) Temporary() bool { return true }
 
 // IsTemporary returns true if the error or a recursive cause returns true for
 // temporary.
@@ -85,11 +72,12 @@
 			}
 		}
 
-		if causal, ok := err.(causal); !ok {
+		cause := errors.Cause(err)
+		if cause == err {
 			break
-		} else {
-			err = causal.Cause()
 		}
+
+		err = cause
 	}
 
 	return false
diff --git a/vendor/src/github.com/docker/swarmkit/agent/node.go b/vendor/src/github.com/docker/swarmkit/agent/node.go
index 9e40185..1f9fca4 100644
--- a/vendor/src/github.com/docker/swarmkit/agent/node.go
+++ b/vendor/src/github.com/docker/swarmkit/agent/node.go
@@ -78,21 +78,23 @@
 // cluster. Node handles workloads and may also run as a manager.
 type Node struct {
 	sync.RWMutex
-	config        *NodeConfig
-	remotes       *persistentRemotes
-	role          string
-	roleCond      *sync.Cond
-	conn          *grpc.ClientConn
-	connCond      *sync.Cond
-	nodeID        string
-	started       chan struct{}
-	stopped       chan struct{}
-	ready         chan struct{}
-	closed        chan struct{}
-	err           error
-	agent         *Agent
-	manager       *manager.Manager
-	roleChangeReq chan api.NodeRole
+	config               *NodeConfig
+	remotes              *persistentRemotes
+	role                 string
+	roleCond             *sync.Cond
+	conn                 *grpc.ClientConn
+	connCond             *sync.Cond
+	nodeID               string
+	nodeMembership       api.NodeSpec_Membership
+	started              chan struct{}
+	stopped              chan struct{}
+	ready                chan struct{} // closed when agent has completed registration and manager(if enabled) is ready to receive control requests
+	certificateRequested chan struct{} // closed when certificate issue request has been sent by node
+	closed               chan struct{}
+	err                  error
+	agent                *Agent
+	manager              *manager.Manager
+	roleChangeReq        chan api.NodeRole // used to send role updates from the dispatcher api on promotion/demotion
 }
 
 // NewNode returns new Node instance.
@@ -113,14 +115,15 @@
 	}
 
 	n := &Node{
-		remotes:       newPersistentRemotes(stateFile, p...),
-		role:          ca.AgentRole,
-		config:        c,
-		started:       make(chan struct{}),
-		stopped:       make(chan struct{}),
-		closed:        make(chan struct{}),
-		ready:         make(chan struct{}),
-		roleChangeReq: make(chan api.NodeRole, 1),
+		remotes:              newPersistentRemotes(stateFile, p...),
+		role:                 ca.AgentRole,
+		config:               c,
+		started:              make(chan struct{}),
+		stopped:              make(chan struct{}),
+		closed:               make(chan struct{}),
+		ready:                make(chan struct{}),
+		certificateRequested: make(chan struct{}),
+		roleChangeReq:        make(chan api.NodeRole, 1),
 	}
 	n.roleCond = sync.NewCond(n.RLocker())
 	n.connCond = sync.NewCond(n.RLocker())
@@ -171,14 +174,17 @@
 		}
 	}()
 
-	if (n.config.JoinAddr == "" && n.nodeID == "") || n.config.ForceNewCluster {
+	if n.config.JoinAddr == "" && n.nodeID == "" {
 		if err := n.bootstrapCA(); err != nil {
 			return err
 		}
 	}
 
 	if n.config.JoinAddr != "" || n.config.ForceNewCluster {
-		n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename), api.Peer{Addr: n.config.JoinAddr})
+		n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename))
+		if n.config.JoinAddr != "" {
+			n.remotes.Observe(api.Peer{Addr: n.config.JoinAddr}, 1)
+		}
 	}
 
 	csrRole := n.role
@@ -193,23 +199,22 @@
 	// - We wait for LoadOrCreateSecurityConfig to finish since we need a certificate to operate.
 	// - Given a valid certificate, spin a renewal go-routine that will ensure that certificates stay
 	// up to date.
-	nodeIDChan := make(chan string, 1)
-	caLoadDone := make(chan struct{})
+	issueResponseChan := make(chan api.IssueNodeCertificateResponse, 1)
 	go func() {
 		select {
 		case <-ctx.Done():
-		case <-caLoadDone:
-		case nodeID := <-nodeIDChan:
-			logrus.Debugf("Requesting certificate for NodeID: %v", nodeID)
+		case resp := <-issueResponseChan:
+			logrus.Debugf("Requesting certificate for NodeID: %v", resp.NodeID)
 			n.Lock()
-			n.nodeID = nodeID
+			n.nodeID = resp.NodeID
+			n.nodeMembership = resp.NodeMembership
 			n.Unlock()
+			close(n.certificateRequested)
 		}
 	}()
 
 	certDir := filepath.Join(n.config.StateDir, "certificates")
-	securityConfig, err := ca.LoadOrCreateSecurityConfig(ctx, certDir, n.config.CAHash, n.config.Secret, csrRole, picker.NewPicker(n.remotes), nodeIDChan)
-	close(caLoadDone)
+	securityConfig, err := ca.LoadOrCreateSecurityConfig(ctx, certDir, n.config.CAHash, n.config.Secret, csrRole, picker.NewPicker(n.remotes), issueResponseChan)
 	if err != nil {
 		return err
 	}
@@ -223,6 +228,7 @@
 	if err != nil {
 		return err
 	}
+	defer db.Close()
 
 	if err := n.loadCertificates(); err != nil {
 		return err
@@ -402,10 +408,17 @@
 
 // Ready returns a channel that is closed after node's initialization has
 // completes for the first time.
-func (n *Node) Ready(ctx context.Context) <-chan struct{} {
+func (n *Node) Ready() <-chan struct{} {
 	return n.ready
 }
 
+// CertificateRequested returns a channel that is closed after node has
+// requested a certificate. After this call a caller can expect calls to
+// NodeID() and `NodeMembership()` to succeed.
+func (n *Node) CertificateRequested() <-chan struct{} {
+	return n.certificateRequested
+}
+
 func (n *Node) waitRole(ctx context.Context, role string) <-chan struct{} {
 	c := make(chan struct{})
 	n.roleCond.L.Lock()
@@ -482,6 +495,13 @@
 	return n.nodeID
 }
 
+// NodeMembership returns current node's membership. May be empty if not set.
+func (n *Node) NodeMembership() api.NodeSpec_Membership {
+	n.RLock()
+	defer n.RUnlock()
+	return n.nodeMembership
+}
+
 // Manager return manager instance started by node. May be nil.
 func (n *Node) Manager() *manager.Manager {
 	n.RLock()
@@ -528,6 +548,7 @@
 	n.Lock()
 	n.role = clientTLSCreds.Role()
 	n.nodeID = clientTLSCreds.NodeID()
+	n.nodeMembership = api.NodeMembershipAccepted
 	n.roleCond.Broadcast()
 	n.Unlock()
 
@@ -623,11 +644,11 @@
 			select {
 			case <-ctx.Done():
 				m.Stop(context.Background()) // todo: this should be sync like other components
-			case <-n.waitRole(ctx, ca.AgentRole):
+				<-done
+			// in case of demotion manager will stop itself
+			case <-done:
 			}
 
-			<-done
-
 			ready = nil // ready event happens once, even on multiple starts
 			n.Lock()
 			n.manager = nil
diff --git a/vendor/src/github.com/docker/swarmkit/api/ca.pb.go b/vendor/src/github.com/docker/swarmkit/api/ca.pb.go
index c1c58d0..139c43c 100644
--- a/vendor/src/github.com/docker/swarmkit/api/ca.pb.go
+++ b/vendor/src/github.com/docker/swarmkit/api/ca.pb.go
@@ -63,7 +63,8 @@
 func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} }
 
 type IssueNodeCertificateResponse struct {
-	NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	NodeID         string              `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
+	NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"`
 }
 
 func (m *IssueNodeCertificateResponse) Reset()                    { *m = IssueNodeCertificateResponse{} }
@@ -178,7 +179,8 @@
 	}
 
 	o := &IssueNodeCertificateResponse{
-		NodeID: m.NodeID,
+		NodeID:         m.NodeID,
+		NodeMembership: m.NodeMembership,
 	}
 
 	return o
@@ -247,9 +249,10 @@
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 5)
+	s := make([]string, 0, 6)
 	s = append(s, "&api.IssueNodeCertificateResponse{")
 	s = append(s, "NodeID: "+fmt.Sprintf("%#v", this.NodeID)+",\n")
+	s = append(s, "NodeMembership: "+fmt.Sprintf("%#v", this.NodeMembership)+",\n")
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -583,6 +586,11 @@
 		i = encodeVarintCa(data, i, uint64(len(m.NodeID)))
 		i += copy(data[i:], m.NodeID)
 	}
+	if m.NodeMembership != 0 {
+		data[i] = 0x10
+		i++
+		i = encodeVarintCa(data, i, uint64(m.NodeMembership))
+	}
 	return i, nil
 }
 
@@ -842,6 +850,9 @@
 	if l > 0 {
 		n += 1 + l + sovCa(uint64(l))
 	}
+	if m.NodeMembership != 0 {
+		n += 1 + sovCa(uint64(m.NodeMembership))
+	}
 	return n
 }
 
@@ -913,6 +924,7 @@
 	}
 	s := strings.Join([]string{`&IssueNodeCertificateResponse{`,
 		`NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
+		`NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1326,6 +1338,25 @@
 			}
 			m.NodeID = string(data[iNdEx:postIndex])
 			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType)
+			}
+			m.NodeMembership = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowCa
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
 		default:
 			iNdEx = preIndex
 			skippy, err := skipCa(data[iNdEx:])
@@ -1584,33 +1615,36 @@
 )
 
 var fileDescriptorCa = []byte{
-	// 442 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x48, 0x4e, 0xd4, 0x2b,
-	0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2b, 0x2e, 0x4f,
-	0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, 0x2d, 0x86,
-	0x28, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, 0x41, 0x2c, 0xa8, 0xa8, 0x70, 0x41,
-	0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x3e, 0x84, 0x82, 0x08, 0x2a, 0x39, 0x73, 0xc9, 0xf8, 0xe5, 0xa7,
-	0xa4, 0x3a, 0xa7, 0x16, 0x95, 0x64, 0xa6, 0x65, 0x26, 0x27, 0x96, 0xa4, 0x06, 0x97, 0x24, 0x96,
-	0x94, 0x16, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x29, 0x73, 0xb1, 0xe7, 0x01, 0xe5,
-	0xe3, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0xb8, 0x1e, 0xdd, 0x93, 0x67, 0x03,
-	0x69, 0xf1, 0x74, 0x09, 0x62, 0x03, 0x49, 0x79, 0xa6, 0x28, 0xcd, 0x63, 0xe4, 0x92, 0xc5, 0x61,
-	0x4a, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x15, 0x17, 0x5b, 0x31, 0x58, 0x04, 0x6c, 0x0a,
-	0xb7, 0x91, 0x92, 0x1e, 0xa6, 0x1f, 0xf4, 0x3c, 0x8b, 0x8b, 0x4b, 0x13, 0xf3, 0x92, 0x61, 0x7a,
-	0xa1, 0x3a, 0x84, 0x1c, 0xb9, 0xb8, 0x93, 0x11, 0x06, 0x4b, 0x30, 0x81, 0x0d, 0x90, 0xc7, 0x66,
-	0x00, 0x92, 0xfd, 0x41, 0xc8, 0x7a, 0x94, 0x9a, 0x18, 0xb9, 0xa4, 0x41, 0xa6, 0xa7, 0xa2, 0xb9,
-	0x12, 0xe6, 0x4b, 0x03, 0x2e, 0x96, 0xa2, 0xfc, 0x9c, 0x54, 0xb0, 0xe3, 0xf8, 0x8c, 0x64, 0xb0,
-	0x99, 0x0d, 0xd2, 0x19, 0x04, 0x54, 0x13, 0x04, 0x56, 0x29, 0x24, 0xc9, 0xc5, 0x9c, 0x5c, 0x5c,
-	0x04, 0x76, 0x0c, 0x8f, 0x13, 0x3b, 0x30, 0x4c, 0x98, 0x9d, 0x83, 0x83, 0x82, 0x40, 0x62, 0x42,
-	0x62, 0x40, 0xbf, 0xa6, 0x26, 0x17, 0xa5, 0x96, 0x48, 0x30, 0x83, 0x42, 0x2c, 0x08, 0xca, 0x03,
-	0x05, 0x35, 0x76, 0x37, 0x40, 0xc3, 0x88, 0xa8, 0xa0, 0x96, 0xe5, 0x92, 0x76, 0x4f, 0x2d, 0x09,
-	0xca, 0xcf, 0x2f, 0x71, 0x76, 0xc4, 0xf4, 0x88, 0x92, 0x03, 0x97, 0x0c, 0x76, 0x69, 0xa8, 0x1d,
-	0x0a, 0xa8, 0x61, 0x09, 0xb2, 0x87, 0x07, 0x25, 0xa8, 0x8c, 0xba, 0x18, 0xb9, 0x98, 0x9c, 0x1d,
-	0x85, 0x9a, 0x19, 0xb9, 0x44, 0xb0, 0x99, 0x24, 0xa4, 0x8f, 0x2d, 0x70, 0xf0, 0x38, 0x49, 0xca,
-	0x80, 0x78, 0x0d, 0x10, 0x47, 0x2a, 0x71, 0x9c, 0x5a, 0xf7, 0x6e, 0x06, 0x13, 0x93, 0x00, 0xa3,
-	0xd1, 0x74, 0x26, 0x2e, 0x70, 0x00, 0x40, 0x1d, 0x84, 0x2d, 0xf8, 0xb0, 0x3b, 0x08, 0x4f, 0x64,
-	0x63, 0x77, 0x10, 0xbe, 0x98, 0x41, 0x38, 0x48, 0xa8, 0x8d, 0x91, 0x4b, 0x14, 0x6b, 0x4a, 0x17,
-	0x32, 0xc0, 0x95, 0x68, 0x70, 0x65, 0x2d, 0x29, 0x43, 0x12, 0x74, 0xa0, 0x3b, 0xc4, 0x49, 0xe6,
-	0xc4, 0x43, 0x39, 0x86, 0x1b, 0x40, 0xfc, 0xe1, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x13,
-	0x40, 0x7c, 0x01, 0x88, 0x1f, 0x00, 0x71, 0x12, 0x1b, 0x38, 0x73, 0x1b, 0x03, 0x02, 0x00, 0x00,
-	0xff, 0xff, 0x42, 0x13, 0xc9, 0x2a, 0x34, 0x04, 0x00, 0x00,
+	// 487 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
+	0x10, 0xc6, 0x59, 0x07, 0xa5, 0x65, 0x52, 0x05, 0xb4, 0x14, 0x14, 0x52, 0x37, 0xad, 0xcc, 0x01,
+	0x4e, 0x4e, 0x6a, 0x6e, 0x9c, 0x48, 0x8c, 0x84, 0x72, 0x00, 0xa1, 0xcd, 0x03, 0x20, 0xd7, 0x19,
+	0x82, 0xd5, 0x26, 0x6b, 0x76, 0x37, 0x20, 0x6e, 0x08, 0x24, 0x0e, 0xdc, 0x11, 0x9c, 0x78, 0x04,
+	0x9e, 0xa3, 0xe2, 0xc4, 0x91, 0x13, 0xa2, 0x7d, 0x00, 0xc4, 0x23, 0xb0, 0xbb, 0x71, 0x48, 0xff,
+	0xac, 0xa3, 0xf6, 0x30, 0x8a, 0x77, 0x76, 0xbe, 0x2f, 0xbf, 0x9d, 0xf1, 0x1a, 0x56, 0xd3, 0x24,
+	0xcc, 0x05, 0x57, 0x9c, 0xd2, 0x21, 0x4f, 0xf7, 0x50, 0x84, 0xf2, 0x75, 0x22, 0xc6, 0x7b, 0x99,
+	0x0a, 0x5f, 0xed, 0x34, 0x6b, 0xea, 0x4d, 0x8e, 0x72, 0x56, 0xd0, 0xac, 0xc9, 0x1c, 0xd3, 0xf9,
+	0x62, 0x7d, 0xc4, 0x47, 0xdc, 0x3e, 0xb6, 0xcd, 0x53, 0x91, 0xbd, 0x9e, 0xef, 0x4f, 0x47, 0xd9,
+	0xa4, 0x3d, 0xfb, 0x99, 0x25, 0x83, 0x18, 0xfc, 0x27, 0x7c, 0x88, 0x31, 0x0a, 0x95, 0x3d, 0xcf,
+	0xd2, 0x44, 0xe1, 0x40, 0x25, 0x6a, 0x2a, 0x19, 0xbe, 0x9c, 0xa2, 0x54, 0xf4, 0x36, 0xac, 0x4c,
+	0xf4, 0xfe, 0xb3, 0x6c, 0xd8, 0x20, 0xdb, 0xe4, 0xee, 0x95, 0x1e, 0x1c, 0xfd, 0xda, 0xaa, 0x1a,
+	0x49, 0xff, 0x21, 0xab, 0x9a, 0xad, 0xfe, 0x30, 0xf8, 0x4a, 0x60, 0xb3, 0xc4, 0x45, 0xe6, 0x7c,
+	0x22, 0x91, 0xde, 0x87, 0xaa, 0xb4, 0x19, 0xeb, 0x52, 0x8b, 0x82, 0xf0, 0xec, 0x81, 0xc2, 0xbe,
+	0x94, 0xd3, 0x64, 0x92, 0xce, 0xb5, 0x85, 0x82, 0x76, 0xa1, 0x96, 0x2e, 0x8c, 0x1b, 0x9e, 0x35,
+	0xd8, 0x72, 0x19, 0x1c, 0xfb, 0x7f, 0x76, 0x5c, 0x13, 0xbc, 0x23, 0xb0, 0x61, 0xdc, 0xf1, 0x14,
+	0xe5, 0xfc, 0x94, 0x1d, 0xb8, 0x2c, 0xf8, 0x3e, 0x5a, 0xb8, 0x7a, 0xe4, 0xbb, 0xbc, 0x8d, 0x92,
+	0xe9, 0x1a, 0x66, 0x2b, 0xe9, 0x2d, 0xa8, 0xa4, 0x52, 0x58, 0x98, 0xb5, 0xde, 0x8a, 0xee, 0x49,
+	0x25, 0x1e, 0x30, 0x66, 0x72, 0xf4, 0xa6, 0x3e, 0x2b, 0xa6, 0x02, 0x55, 0xa3, 0x62, 0x3a, 0xc6,
+	0x8a, 0x55, 0xf0, 0x89, 0x80, 0xef, 0x86, 0x28, 0x9a, 0x74, 0x9e, 0x5e, 0xd3, 0xa7, 0x70, 0xd5,
+	0x16, 0x8d, 0x71, 0xbc, 0x8b, 0x42, 0xbe, 0xc8, 0x72, 0x0b, 0x51, 0x8f, 0xee, 0x94, 0x51, 0x0f,
+	0xf4, 0x9b, 0x11, 0x3e, 0xfe, 0x5f, 0xce, 0xea, 0x46, 0xbf, 0x58, 0x07, 0x9b, 0xb0, 0xf1, 0x08,
+	0x15, 0xe3, 0x5c, 0xc5, 0xdd, 0xb3, 0xbd, 0x09, 0x1e, 0x80, 0xef, 0xde, 0x2e, 0xa8, 0xb7, 0x4f,
+	0x8e, 0xc7, 0x90, 0xaf, 0x9d, 0xe8, 0x7e, 0xf4, 0x91, 0x80, 0x17, 0x77, 0xe9, 0x7b, 0x02, 0xeb,
+	0x2e, 0x27, 0xda, 0x76, 0x91, 0x2f, 0x41, 0x6a, 0x76, 0xce, 0x2f, 0x98, 0x41, 0x06, 0xab, 0xdf,
+	0xbf, 0xfd, 0xf9, 0xe2, 0x79, 0xd7, 0x48, 0xf4, 0xd9, 0x03, 0xdb, 0xd2, 0x02, 0xc8, 0x35, 0x10,
+	0x37, 0xd0, 0x92, 0xf7, 0xc7, 0x0d, 0xb4, 0x6c, 0xd6, 0x0b, 0x20, 0xfa, 0x81, 0xc0, 0x0d, 0xe7,
+	0xe5, 0xa1, 0x9d, 0xb2, 0x89, 0x96, 0xdd, 0xd6, 0xe6, 0xce, 0x05, 0x14, 0xa7, 0x41, 0x7a, 0xfe,
+	0xc1, 0x61, 0xeb, 0xd2, 0x4f, 0x1d, 0x7f, 0x0f, 0x5b, 0xe4, 0xed, 0x51, 0x8b, 0x1c, 0xe8, 0xf8,
+	0xa1, 0xe3, 0xb7, 0x8e, 0xdd, 0xaa, 0xfd, 0x5e, 0xdc, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x72,
+	0xd0, 0xad, 0xdf, 0x94, 0x04, 0x00, 0x00,
 }
diff --git a/vendor/src/github.com/docker/swarmkit/api/ca.proto b/vendor/src/github.com/docker/swarmkit/api/ca.proto
index 149a8a1..0625008 100644
--- a/vendor/src/github.com/docker/swarmkit/api/ca.proto
+++ b/vendor/src/github.com/docker/swarmkit/api/ca.proto
@@ -3,6 +3,7 @@
 package docker.swarmkit.v1;
 
 import "types.proto";
+import "specs.proto";
 import "gogoproto/gogo.proto";
 import "plugin/plugin.proto";
 
@@ -42,6 +43,7 @@
 
 message IssueNodeCertificateResponse {
 	string node_id = 1 [(gogoproto.customname) = "NodeID"];
+	NodeSpec.Membership node_membership = 2;
 }
 
 message GetRootCACertificateRequest {}
diff --git a/vendor/src/github.com/docker/swarmkit/api/objects.pb.go b/vendor/src/github.com/docker/swarmkit/api/objects.pb.go
index cc99bf3..5a9ce55 100644
--- a/vendor/src/github.com/docker/swarmkit/api/objects.pb.go
+++ b/vendor/src/github.com/docker/swarmkit/api/objects.pb.go
@@ -175,6 +175,8 @@
 	// List of IPv4/IPv6 addresses that are assigned to the object
 	// as part of getting attached to this network.
 	Addresses []string `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"`
+	// List of aliases by which a task is resolved in a network
+	Aliases []string `protobuf:"bytes,3,rep,name=aliases" json:"aliases,omitempty"`
 }
 
 func (m *NetworkAttachment) Reset()                    { *m = NetworkAttachment{} }
@@ -360,6 +362,13 @@
 		}
 	}
 
+	if m.Aliases != nil {
+		o.Aliases = make([]string, 0, len(m.Aliases))
+		for _, v := range m.Aliases {
+			o.Aliases = append(o.Aliases, v)
+		}
+	}
+
 	return o
 }
 
@@ -514,12 +523,13 @@
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 6)
+	s := make([]string, 0, 7)
 	s = append(s, "&api.NetworkAttachment{")
 	if this.Network != nil {
 		s = append(s, "Network: "+fmt.Sprintf("%#v", this.Network)+",\n")
 	}
 	s = append(s, "Addresses: "+fmt.Sprintf("%#v", this.Addresses)+",\n")
+	s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n")
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -995,6 +1005,21 @@
 			i += copy(data[i:], s)
 		}
 	}
+	if len(m.Aliases) > 0 {
+		for _, s := range m.Aliases {
+			data[i] = 0x1a
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				data[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			data[i] = uint8(l)
+			i++
+			i += copy(data[i:], s)
+		}
+	}
 	return i, nil
 }
 
@@ -1308,6 +1333,12 @@
 			n += 1 + l + sovObjects(uint64(l))
 		}
 	}
+	if len(m.Aliases) > 0 {
+		for _, s := range m.Aliases {
+			l = len(s)
+			n += 1 + l + sovObjects(uint64(l))
+		}
+	}
 	return n
 }
 
@@ -1464,6 +1495,7 @@
 	s := strings.Join([]string{`&NetworkAttachment{`,
 		`Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`,
 		`Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`,
+		`Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2854,6 +2886,35 @@
 			}
 			m.Addresses = append(m.Addresses, string(data[iNdEx:postIndex]))
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowObjects
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := data[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthObjects
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Aliases = append(m.Aliases, string(data[iNdEx:postIndex]))
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipObjects(data[iNdEx:])
@@ -3405,65 +3466,66 @@
 )
 
 var fileDescriptorObjects = []byte{
-	// 949 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0xcf, 0x6e, 0x1b, 0x45,
-	0x18, 0xaf, 0x9d, 0x8d, 0xed, 0xfd, 0x9c, 0x44, 0x62, 0xa8, 0x2a, 0x37, 0x84, 0xa4, 0xb8, 0x02,
+	// 965 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
+	0x14, 0xaf, 0xed, 0xad, 0xed, 0x7d, 0x4e, 0x22, 0x31, 0x54, 0xd5, 0x36, 0x84, 0xa4, 0xb8, 0x02,
 	0x71, 0x40, 0xae, 0x28, 0x05, 0x81, 0xa0, 0x42, 0xb6, 0x13, 0x81, 0x05, 0x81, 0x68, 0x5a, 0x85,
-	0xa3, 0x35, 0xd9, 0x9d, 0xa6, 0x8b, 0xed, 0xdd, 0xd5, 0xcc, 0xc4, 0x55, 0x6e, 0x3c, 0x01, 0x12,
-	0x2f, 0xc0, 0xab, 0x70, 0x8d, 0x38, 0x71, 0xe4, 0x54, 0xd1, 0xde, 0x38, 0xc1, 0x23, 0xf0, 0xcd,
-	0xec, 0xb7, 0xeb, 0xad, 0xbc, 0x8e, 0x1a, 0x09, 0xe5, 0xb0, 0xf2, 0xce, 0xec, 0xef, 0xf7, 0x9b,
-	0xef, 0xff, 0x18, 0x36, 0x93, 0x93, 0x1f, 0x65, 0x60, 0x74, 0x2f, 0x55, 0x89, 0x49, 0x18, 0x0b,
-	0x93, 0x60, 0x22, 0x55, 0x4f, 0x3f, 0x13, 0x6a, 0x36, 0x89, 0x4c, 0x6f, 0xfe, 0xe1, 0x76, 0xdb,
-	0x9c, 0xa7, 0x92, 0x00, 0xdb, 0x6d, 0x9d, 0xca, 0x20, 0x5f, 0xdc, 0x36, 0xd1, 0x4c, 0x6a, 0x23,
-	0x66, 0xe9, 0xbd, 0xe2, 0x8d, 0x3e, 0xdd, 0x3c, 0x4d, 0x4e, 0x13, 0xf7, 0x7a, 0xcf, 0xbe, 0x65,
-	0xbb, 0xdd, 0xdf, 0x6a, 0xe0, 0x1d, 0x4a, 0x23, 0xd8, 0xe7, 0xd0, 0x9c, 0x4b, 0xa5, 0xa3, 0x24,
-	0xee, 0xd4, 0xee, 0xd4, 0xde, 0x6f, 0xdf, 0x7f, 0xab, 0xb7, 0x7c, 0x72, 0xef, 0x38, 0x83, 0x0c,
-	0xbc, 0x8b, 0xe7, 0x7b, 0x37, 0x78, 0xce, 0x60, 0x5f, 0x00, 0x04, 0x4a, 0x0a, 0x23, 0xc3, 0xb1,
-	0x30, 0x9d, 0xba, 0xe3, 0xbf, 0x5d, 0xc5, 0x7f, 0x9c, 0x1b, 0xc5, 0x7d, 0x22, 0xf4, 0x8d, 0x65,
-	0x9f, 0xa5, 0x61, 0xce, 0x5e, 0x7b, 0x2d, 0x36, 0x11, 0xfa, 0xa6, 0xfb, 0xf7, 0x1a, 0x78, 0xdf,
-	0x25, 0xa1, 0x64, 0xb7, 0xa0, 0x1e, 0x85, 0xce, 0x78, 0x7f, 0xd0, 0x78, 0xf9, 0x7c, 0xaf, 0x3e,
-	0xda, 0xe7, 0xb8, 0xc3, 0xee, 0x83, 0x37, 0x43, 0x0f, 0xc9, 0xac, 0x4e, 0x95, 0xb0, 0x8d, 0x00,
-	0xf9, 0xe4, 0xb0, 0xec, 0x13, 0xf0, 0x6c, 0x58, 0xc9, 0x98, 0x9d, 0x2a, 0x8e, 0x3d, 0xf3, 0x11,
-	0x62, 0x72, 0x9e, 0xc5, 0xb3, 0x03, 0x68, 0x87, 0x52, 0x07, 0x2a, 0x4a, 0x8d, 0x8d, 0xa4, 0xe7,
-	0xe8, 0x77, 0x57, 0xd1, 0xf7, 0x17, 0x50, 0x5e, 0xe6, 0x61, 0x44, 0x1a, 0xe8, 0xa7, 0x39, 0xd3,
-	0x9d, 0x75, 0xa7, 0xb0, 0xbb, 0xd2, 0x00, 0x87, 0x22, 0x13, 0x88, 0xc3, 0xbe, 0x86, 0xad, 0x99,
-	0x88, 0xc5, 0xa9, 0x54, 0x63, 0x52, 0x69, 0x38, 0x95, 0x77, 0x2a, 0x5d, 0xcf, 0x90, 0x99, 0x10,
-	0xdf, 0x9c, 0x95, 0x97, 0xe8, 0x0e, 0x08, 0x63, 0x44, 0xf0, 0x74, 0x26, 0x63, 0xd3, 0x69, 0x3a,
-	0x95, 0x77, 0x2b, 0x6d, 0x91, 0xe6, 0x59, 0xa2, 0x26, 0xfd, 0x02, 0xcc, 0x4b, 0x44, 0xf6, 0x15,
-	0xb4, 0x03, 0xa9, 0x4c, 0xf4, 0x24, 0x0a, 0x30, 0x69, 0x9d, 0x96, 0xd3, 0xd9, 0xab, 0xd2, 0x19,
-	0x2e, 0x60, 0xe4, 0x54, 0x99, 0xd9, 0xfd, 0xbd, 0x06, 0xcd, 0x47, 0x52, 0xcd, 0xa3, 0xe0, 0xff,
-	0x4d, 0xf7, 0x67, 0xaf, 0xa4, 0xbb, 0xd2, 0x32, 0x3a, 0x76, 0x29, 0xe3, 0x9f, 0x42, 0x4b, 0xc6,
-	0x61, 0x9a, 0x44, 0x18, 0x20, 0x6f, 0x75, 0xb5, 0x1c, 0x10, 0x86, 0x17, 0xe8, 0xee, 0xaf, 0x75,
-	0x68, 0xe5, 0xdb, 0xec, 0x01, 0x59, 0x90, 0xf5, 0xde, 0x9d, 0xcb, 0x24, 0xac, 0x09, 0x74, 0xf8,
-	0x03, 0x58, 0x4f, 0x13, 0x65, 0x34, 0x3a, 0xbb, 0xb6, 0xaa, 0x4c, 0x8e, 0x10, 0x30, 0x4c, 0xe2,
-	0x27, 0xd1, 0x29, 0xcf, 0xc0, 0xec, 0x07, 0x68, 0xcf, 0x23, 0x65, 0xce, 0xc4, 0x74, 0x1c, 0xa5,
-	0x1a, 0x9d, 0xb6, 0xdc, 0xf7, 0x2e, 0x3b, 0xb2, 0x77, 0x9c, 0xe1, 0x47, 0x47, 0x83, 0x2d, 0x0c,
-	0x35, 0x14, 0x4b, 0xcd, 0x81, 0xa4, 0x46, 0xa9, 0xde, 0x3e, 0x04, 0xbf, 0xf8, 0xc2, 0x3e, 0x00,
-	0x88, 0xb3, 0xaa, 0x18, 0x17, 0x79, 0xda, 0x44, 0xb2, 0x4f, 0xb5, 0x82, 0xe9, 0xf2, 0x09, 0x30,
-	0x0a, 0x19, 0x03, 0x4f, 0x84, 0xa1, 0x72, 0x59, 0xf3, 0xb9, 0x7b, 0xef, 0xfe, 0xb2, 0x0e, 0xde,
-	0x63, 0xa1, 0x27, 0xd7, 0xdd, 0xd9, 0xf6, 0xcc, 0xa5, 0x3c, 0xa3, 0x3b, 0x3a, 0x2b, 0x01, 0xeb,
-	0x8e, 0xb7, 0x70, 0x87, 0x0a, 0xc3, 0xba, 0x43, 0x80, 0xcc, 0x1d, 0x3d, 0x4d, 0x8c, 0x6b, 0x5f,
-	0x8f, 0xbb, 0x77, 0x76, 0x17, 0x9a, 0x31, 0xb6, 0xac, 0xa5, 0x37, 0x1c, 0x1d, 0x90, 0xde, 0xb0,
-	0x5d, 0x8c, 0xdc, 0x86, 0xfd, 0x84, 0x44, 0x6c, 0x15, 0x11, 0xc7, 0x09, 0xb6, 0x1f, 0xce, 0x01,
-	0x4d, 0x2d, 0x57, 0x59, 0x90, 0xfd, 0x05, 0x2c, 0x6f, 0x95, 0x12, 0x93, 0x1d, 0xc3, 0x9b, 0xb9,
-	0xbd, 0x65, 0xc1, 0xd6, 0x55, 0x04, 0x19, 0x29, 0x94, 0xbe, 0x94, 0x46, 0x93, 0xbf, 0x7a, 0x34,
-	0xb9, 0x08, 0x56, 0x8d, 0xa6, 0x01, 0x6c, 0xe2, 0x9c, 0x8b, 0x14, 0x8e, 0x7a, 0xbb, 0x23, 0x3b,
-	0x80, 0x22, 0x5b, 0x2b, 0xa6, 0x3d, 0x89, 0x48, 0xbe, 0x41, 0x1c, 0xb7, 0x62, 0x7d, 0x68, 0x51,
-	0xdd, 0xe8, 0x4e, 0xdb, 0xd5, 0xee, 0x6b, 0x8e, 0xa4, 0x82, 0xf6, 0x4a, 0xd3, 0x6e, 0x5c, 0xa9,
-	0x69, 0x9f, 0xc2, 0x1b, 0x4b, 0xc2, 0xec, 0x63, 0xcc, 0x6c, 0xb6, 0x79, 0xd9, 0xdd, 0x49, 0x3c,
-	0x9e, 0x63, 0xd9, 0x0e, 0xf8, 0xb6, 0xce, 0xa5, 0xd6, 0x32, 0xeb, 0x60, 0x9f, 0x2f, 0x36, 0xba,
-	0x3f, 0xd7, 0xa1, 0x49, 0x94, 0xeb, 0x9e, 0x75, 0x74, 0xec, 0x52, 0x0f, 0x3c, 0x84, 0x8d, 0x50,
-	0x45, 0x73, 0xba, 0x57, 0x24, 0xcd, 0xbb, 0xed, 0x2a, 0x89, 0x7d, 0x87, 0xc3, 0x5b, 0xcd, 0xfd,
-	0x66, 0x89, 0x7b, 0x08, 0x5e, 0x94, 0x8a, 0x19, 0xdd, 0x69, 0x95, 0x27, 0x8f, 0x8e, 0xfa, 0x87,
-	0xdf, 0xa7, 0x59, 0x0d, 0xb6, 0xd0, 0x51, 0xcf, 0x6e, 0x70, 0x47, 0xeb, 0xfe, 0x83, 0x01, 0x19,
-	0x4e, 0xcf, 0xb4, 0x91, 0xea, 0xba, 0x03, 0x42, 0xc7, 0x2e, 0x05, 0x64, 0x08, 0x4d, 0x95, 0x24,
-	0x66, 0x1c, 0x88, 0xcb, 0x62, 0xc1, 0x11, 0x32, 0xec, 0x0f, 0xb6, 0x2c, 0xd1, 0xb6, 0x7c, 0xb6,
-	0xe6, 0x0d, 0x4b, 0x1d, 0x0a, 0x1c, 0xc7, 0xb7, 0xf2, 0x41, 0x79, 0x82, 0x3b, 0xda, 0x28, 0x91,
-	0x8e, 0x27, 0xf2, 0xdc, 0x5e, 0xfe, 0x6b, 0xab, 0xae, 0xed, 0x83, 0x38, 0x50, 0xe7, 0x2e, 0x50,
-	0xdf, 0xc8, 0x73, 0x7e, 0x93, 0x04, 0x06, 0x39, 0x1f, 0x37, 0x35, 0xfb, 0x12, 0x76, 0x64, 0x01,
-	0xb3, 0x8a, 0xe3, 0x29, 0xfe, 0x77, 0xc2, 0x2b, 0x60, 0x1c, 0x4c, 0x51, 0xd1, 0x4d, 0x21, 0x8f,
-	0xdf, 0x96, 0x65, 0xa9, 0x6f, 0x33, 0xc4, 0xd0, 0x02, 0x06, 0x3b, 0x17, 0x2f, 0x76, 0x6f, 0xfc,
-	0x89, 0xcf, 0xbf, 0x2f, 0x76, 0x6b, 0x3f, 0xbd, 0xdc, 0xad, 0x5d, 0xe0, 0xf3, 0x07, 0x3e, 0x7f,
-	0xe1, 0x73, 0xd2, 0x70, 0xff, 0x20, 0x3f, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x76, 0xa2, 0xea,
-	0x9b, 0xb1, 0x0a, 0x00, 0x00,
+	0xa3, 0x35, 0xd9, 0x9d, 0x86, 0xc5, 0xf6, 0xee, 0x6a, 0x66, 0xe2, 0x2a, 0x37, 0xc4, 0x07, 0x40,
+	0xe2, 0x0b, 0xf0, 0x55, 0xb8, 0x46, 0x9c, 0x38, 0x72, 0xaa, 0x68, 0x6f, 0x9c, 0xe0, 0x23, 0xf0,
+	0x66, 0xf6, 0xad, 0xbd, 0x95, 0xd7, 0x51, 0x2b, 0xa1, 0x1c, 0x56, 0x9e, 0x3f, 0xbf, 0xdf, 0x6f,
+	0xde, 0x7b, 0xf3, 0xde, 0x1b, 0xc3, 0x66, 0x7a, 0xf2, 0x83, 0x0c, 0x8d, 0xee, 0x65, 0x2a, 0x35,
+	0x29, 0x63, 0x51, 0x1a, 0x4e, 0xa4, 0xea, 0xe9, 0x27, 0x42, 0xcd, 0x26, 0xb1, 0xe9, 0xcd, 0xdf,
+	0xdf, 0xee, 0x98, 0xf3, 0x4c, 0x12, 0x60, 0xbb, 0xa3, 0x33, 0x19, 0x16, 0x93, 0x5b, 0x26, 0x9e,
+	0x49, 0x6d, 0xc4, 0x2c, 0xbb, 0xbb, 0x18, 0xd1, 0xd6, 0x8d, 0xd3, 0xf4, 0x34, 0x75, 0xc3, 0xbb,
+	0x76, 0x94, 0xaf, 0x76, 0x7f, 0xab, 0x81, 0x77, 0x28, 0x8d, 0x60, 0x9f, 0x42, 0x6b, 0x2e, 0x95,
+	0x8e, 0xd3, 0x24, 0xa8, 0xdd, 0xae, 0xbd, 0xdb, 0xb9, 0xf7, 0x46, 0x6f, 0xf5, 0xe4, 0xde, 0x71,
+	0x0e, 0x19, 0x78, 0x17, 0x4f, 0xf7, 0xae, 0xf1, 0x82, 0xc1, 0x3e, 0x03, 0x08, 0x95, 0x14, 0x46,
+	0x46, 0x63, 0x61, 0x82, 0xba, 0xe3, 0xbf, 0x59, 0xc5, 0x7f, 0x54, 0x18, 0xc5, 0x7d, 0x22, 0xf4,
+	0x8d, 0x65, 0x9f, 0x65, 0x51, 0xc1, 0x6e, 0xbc, 0x14, 0x9b, 0x08, 0x7d, 0xd3, 0xfd, 0xbb, 0x01,
+	0xde, 0x37, 0x69, 0x24, 0xd9, 0x4d, 0xa8, 0xc7, 0x91, 0x33, 0xde, 0x1f, 0x34, 0x9f, 0x3f, 0xdd,
+	0xab, 0x8f, 0xf6, 0x39, 0xae, 0xb0, 0x7b, 0xe0, 0xcd, 0xd0, 0x43, 0x32, 0x2b, 0xa8, 0x12, 0xb6,
+	0x11, 0x20, 0x9f, 0x1c, 0x96, 0x7d, 0x04, 0x9e, 0x0d, 0x2b, 0x19, 0xb3, 0x53, 0xc5, 0xb1, 0x67,
+	0x3e, 0x44, 0x4c, 0xc1, 0xb3, 0x78, 0x76, 0x00, 0x9d, 0x48, 0xea, 0x50, 0xc5, 0x99, 0xb1, 0x91,
+	0xf4, 0x1c, 0xfd, 0xce, 0x3a, 0xfa, 0xfe, 0x12, 0xca, 0xcb, 0x3c, 0x8c, 0x48, 0x13, 0xfd, 0x34,
+	0x67, 0x3a, 0xb8, 0xee, 0x14, 0x76, 0xd7, 0x1a, 0xe0, 0x50, 0x64, 0x02, 0x71, 0xd8, 0x97, 0xb0,
+	0x35, 0x13, 0x89, 0x38, 0x95, 0x6a, 0x4c, 0x2a, 0x4d, 0xa7, 0xf2, 0x56, 0xa5, 0xeb, 0x39, 0x32,
+	0x17, 0xe2, 0x9b, 0xb3, 0xf2, 0x14, 0xdd, 0x01, 0x61, 0x8c, 0x08, 0xbf, 0x9f, 0xc9, 0xc4, 0x04,
+	0x2d, 0xa7, 0xf2, 0x76, 0xa5, 0x2d, 0xd2, 0x3c, 0x49, 0xd5, 0xa4, 0xbf, 0x00, 0xf3, 0x12, 0x91,
+	0x7d, 0x01, 0x9d, 0x50, 0x2a, 0x13, 0x3f, 0x8e, 0x43, 0xbc, 0xb4, 0xa0, 0xed, 0x74, 0xf6, 0xaa,
+	0x74, 0x86, 0x4b, 0x18, 0x39, 0x55, 0x66, 0x76, 0x7f, 0xaf, 0x41, 0xeb, 0xa1, 0x54, 0xf3, 0x38,
+	0xfc, 0x7f, 0xaf, 0xfb, 0x93, 0x17, 0xae, 0xbb, 0xd2, 0x32, 0x3a, 0x76, 0xe5, 0xc6, 0x3f, 0x86,
+	0xb6, 0x4c, 0xa2, 0x2c, 0x8d, 0x31, 0x40, 0xde, 0xfa, 0x6c, 0x39, 0x20, 0x0c, 0x5f, 0xa0, 0xbb,
+	0xbf, 0xd6, 0xa1, 0x5d, 0x2c, 0xb3, 0xfb, 0x64, 0x41, 0x5e, 0x7b, 0xb7, 0x2f, 0x93, 0xb0, 0x26,
+	0xd0, 0xe1, 0xf7, 0xe1, 0x7a, 0x96, 0x2a, 0xa3, 0xd1, 0xd9, 0xc6, 0xba, 0x34, 0x39, 0x42, 0xc0,
+	0x30, 0x4d, 0x1e, 0xc7, 0xa7, 0x3c, 0x07, 0xb3, 0xef, 0xa0, 0x33, 0x8f, 0x95, 0x39, 0x13, 0xd3,
+	0x71, 0x9c, 0x69, 0x74, 0xda, 0x72, 0xdf, 0xb9, 0xec, 0xc8, 0xde, 0x71, 0x8e, 0x1f, 0x1d, 0x0d,
+	0xb6, 0x30, 0xd4, 0xb0, 0x98, 0x6a, 0x0e, 0x24, 0x35, 0xca, 0xf4, 0xf6, 0x21, 0xf8, 0x8b, 0x1d,
+	0xf6, 0x1e, 0x40, 0x92, 0x67, 0xc5, 0x78, 0x71, 0x4f, 0x9b, 0x48, 0xf6, 0x29, 0x57, 0xf0, 0xba,
+	0x7c, 0x02, 0x8c, 0x22, 0xc6, 0xc0, 0x13, 0x51, 0xa4, 0xdc, 0xad, 0xf9, 0xdc, 0x8d, 0xbb, 0xbf,
+	0x5c, 0x07, 0xef, 0x91, 0xd0, 0x93, 0xab, 0xae, 0x6c, 0x7b, 0xe6, 0xca, 0x3d, 0xa3, 0x3b, 0x3a,
+	0x4f, 0x01, 0xeb, 0x8e, 0xb7, 0x74, 0x87, 0x12, 0xc3, 0xba, 0x43, 0x80, 0xdc, 0x1d, 0x3d, 0x4d,
+	0x8d, 0x2b, 0x5f, 0x8f, 0xbb, 0x31, 0xbb, 0x03, 0xad, 0x04, 0x4b, 0xd6, 0xd2, 0x9b, 0x8e, 0x0e,
+	0x48, 0x6f, 0xda, 0x2a, 0x46, 0x6e, 0xd3, 0x6e, 0x21, 0x11, 0x4b, 0x45, 0x24, 0x49, 0x8a, 0xe5,
+	0x87, 0x7d, 0x40, 0x53, 0xc9, 0x55, 0x26, 0x64, 0x7f, 0x09, 0x2b, 0x4a, 0xa5, 0xc4, 0x64, 0xc7,
+	0xf0, 0x7a, 0x61, 0x6f, 0x59, 0xb0, 0xfd, 0x2a, 0x82, 0x8c, 0x14, 0x4a, 0x3b, 0xa5, 0xd6, 0xe4,
+	0xaf, 0x6f, 0x4d, 0x2e, 0x82, 0x55, 0xad, 0x69, 0x00, 0x9b, 0xd8, 0xe7, 0x62, 0x85, 0xad, 0xde,
+	0xae, 0xc8, 0x00, 0x50, 0x64, 0x6b, 0x4d, 0xb7, 0x27, 0x11, 0xc9, 0x37, 0x88, 0xe3, 0x66, 0xac,
+	0x0f, 0x6d, 0xca, 0x1b, 0x1d, 0x74, 0x5c, 0xee, 0xbe, 0x64, 0x4b, 0x5a, 0xd0, 0x5e, 0x28, 0xda,
+	0x8d, 0x57, 0x2a, 0xda, 0x9f, 0x6a, 0xf0, 0xda, 0x8a, 0x32, 0xfb, 0x10, 0xaf, 0x36, 0x5f, 0xbc,
+	0xec, 0xf1, 0x24, 0x1e, 0x2f, 0xb0, 0x6c, 0x07, 0x7c, 0x9b, 0xe8, 0x52, 0x6b, 0x99, 0x97, 0xb0,
+	0xcf, 0x97, 0x0b, 0x2c, 0x80, 0x96, 0x98, 0xc6, 0xc2, 0xee, 0x35, 0xdc, 0x5e, 0x31, 0xed, 0xfe,
+	0x5c, 0x87, 0x16, 0x89, 0x5d, 0x75, 0x1b, 0xa4, 0x63, 0x57, 0xca, 0xe3, 0x01, 0x6c, 0x44, 0x2a,
+	0x9e, 0xd3, 0x93, 0x23, 0xa9, 0x15, 0x6e, 0x57, 0x49, 0xec, 0x3b, 0x1c, 0x3e, 0x78, 0xee, 0x37,
+	0xbf, 0xd3, 0x07, 0xe0, 0xc5, 0x99, 0x98, 0xd1, 0x73, 0x57, 0x79, 0xf2, 0xe8, 0xa8, 0x7f, 0xf8,
+	0x6d, 0x96, 0xa7, 0x67, 0x1b, 0x1d, 0xf5, 0xec, 0x02, 0x77, 0xb4, 0xee, 0x3f, 0x18, 0x90, 0xe1,
+	0xf4, 0x4c, 0x1b, 0xa9, 0xae, 0x3a, 0x20, 0x74, 0xec, 0x4a, 0x40, 0x86, 0xd0, 0x52, 0x69, 0x6a,
+	0xc6, 0xa1, 0xb8, 0x2c, 0x16, 0x1c, 0x21, 0xc3, 0xfe, 0x60, 0xcb, 0x12, 0x6d, 0x37, 0xc8, 0xe7,
+	0xbc, 0x69, 0xa9, 0x43, 0x81, 0x9d, 0xfa, 0x66, 0xd1, 0x43, 0x4f, 0x70, 0x45, 0x1b, 0x25, 0xb2,
+	0xf1, 0x44, 0x9e, 0xdb, 0xff, 0x05, 0x8d, 0x75, 0x2f, 0xfa, 0x41, 0x12, 0xaa, 0x73, 0x17, 0xa8,
+	0xaf, 0xe4, 0x39, 0xbf, 0x41, 0x02, 0x83, 0x82, 0x8f, 0x8b, 0x9a, 0x7d, 0x0e, 0x3b, 0x72, 0x01,
+	0xb3, 0x8a, 0xe3, 0x29, 0xfe, 0xad, 0xc2, 0xd7, 0x61, 0x1c, 0x4e, 0x51, 0xd1, 0x35, 0x28, 0x8f,
+	0xdf, 0x92, 0x65, 0xa9, 0xaf, 0x73, 0xc4, 0xd0, 0x02, 0x06, 0x3b, 0x17, 0xcf, 0x76, 0xaf, 0xfd,
+	0x89, 0xdf, 0xbf, 0xcf, 0x76, 0x6b, 0x3f, 0x3e, 0xdf, 0xad, 0x5d, 0xe0, 0xf7, 0x07, 0x7e, 0x7f,
+	0xe1, 0x77, 0xd2, 0x74, 0x7f, 0x2e, 0x3f, 0xf8, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x55, 0xec,
+	0x86, 0xcc, 0x0a, 0x00, 0x00,
 }
diff --git a/vendor/src/github.com/docker/swarmkit/api/objects.proto b/vendor/src/github.com/docker/swarmkit/api/objects.proto
index 39a1fe9..56d61a4 100644
--- a/vendor/src/github.com/docker/swarmkit/api/objects.proto
+++ b/vendor/src/github.com/docker/swarmkit/api/objects.proto
@@ -168,6 +168,9 @@
 	// List of IPv4/IPv6 addresses that are assigned to the object
 	// as part of getting attached to this network.
 	repeated string addresses = 2;
+
+	// List of aliases by which a task is resolved in a network
+	repeated string aliases = 3;
 }
 
 message Network {
diff --git a/vendor/src/github.com/docker/swarmkit/ca/certificates.go b/vendor/src/github.com/docker/swarmkit/ca/certificates.go
index cdda5ed..d39813b 100644
--- a/vendor/src/github.com/docker/swarmkit/ca/certificates.go
+++ b/vendor/src/github.com/docker/swarmkit/ca/certificates.go
@@ -122,13 +122,12 @@
 		return nil, err
 	}
 
-	var signedCert []byte
 	if !rca.CanSign() {
 		return nil, ErrNoValidSigner
 	}
 
 	// Obtain a signed Certificate
-	signedCert, err = rca.ParseValidateAndSignCSR(csr, cn, ou, org)
+	certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org)
 	if err != nil {
 		log.Debugf("failed to sign node certificate: %v", err)
 		return nil, err
@@ -141,12 +140,12 @@
 	}
 
 	// Write the chain to disk
-	if err := ioutils.AtomicWriteFile(paths.Cert, signedCert, 0644); err != nil {
+	if err := ioutils.AtomicWriteFile(paths.Cert, certChain, 0644); err != nil {
 		return nil, err
 	}
 
 	// Create a valid TLSKeyPair out of the PEM encoded private key and certificate
-	tlsKeyPair, err := tls.X509KeyPair(signedCert, key)
+	tlsKeyPair, err := tls.X509KeyPair(certChain, key)
 	if err != nil {
 		return nil, err
 	}
@@ -157,7 +156,7 @@
 
 // RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is
 // available, or by requesting them from the remote server at remoteAddr.
-func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, role, secret string, picker *picker.Picker, transport credentials.TransportAuthenticator, nodeInfo chan<- string) (*tls.Certificate, error) {
+func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths CertPaths, role, secret string, picker *picker.Picker, transport credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) (*tls.Certificate, error) {
 	// Create a new key/pair and CSR for the new manager
 	// Write the new CSR and the new key to a temporary location so we can survive crashes on rotation
 	tempPaths := genTempPaths(paths)
@@ -245,31 +244,56 @@
 		return nil, err
 	}
 
-	return cert, nil
+	// Append the first root CA Cert to the certificate, to create a valid chain
+	// Get the first Root CA Cert on the bundle
+	firstRootCA, _, err := helpers.ParseOneCertificateFromPEM(rca.Cert)
+	if err != nil {
+		return nil, err
+	}
+	if len(firstRootCA) < 1 {
+		return nil, fmt.Errorf("no valid Root CA certificates found")
+	}
+	// Convert the first root CA back to PEM
+	firstRootCAPEM := helpers.EncodeCertificatePEM(firstRootCA[0])
+	if firstRootCAPEM == nil {
+		return nil, fmt.Errorf("error while encoding the Root CA certificate")
+	}
+	// Append this Root CA to the certificate to make [Cert PEM]\n[Root PEM][EOF]
+	certChain := append(cert, firstRootCAPEM...)
+
+	return certChain, nil
 }
 
-// NewRootCA creates a new RootCA object from unparsed cert and key byte
+// NewRootCA creates a new RootCA object from unparsed PEM cert bundle and key byte
 // slices. key may be nil, and in this case NewRootCA will return a RootCA
 // without a signer.
-func NewRootCA(cert, key []byte, certExpiry time.Duration) (RootCA, error) {
-	// Check to see if the Certificate file is a valid, self-signed Cert
-	parsedCA, err := helpers.ParseSelfSignedCertificatePEM(cert)
+func NewRootCA(certBytes, keyBytes []byte, certExpiry time.Duration) (RootCA, error) {
+	// Parse all the certificates in the cert bundle
+	parsedCerts, err := helpers.ParseCertificatesPEM(certBytes)
 	if err != nil {
 		return RootCA{}, err
 	}
-
-	// Calculate the digest for our RootCACertificate
-	digest := digest.FromBytes(cert)
-
-	// Create a Pool with our RootCACertificate
-	pool := x509.NewCertPool()
-	if !pool.AppendCertsFromPEM(cert) {
-		return RootCA{}, fmt.Errorf("error while adding root CA cert to Cert Pool")
+	// Check to see if we have at least one valid cert
+	if len(parsedCerts) < 1 {
+		return RootCA{}, fmt.Errorf("no valid Root CA certificates found")
 	}
 
-	if len(key) == 0 {
+	// Create a Pool with all of the certificates found
+	pool := x509.NewCertPool()
+	for _, cert := range parsedCerts {
+		// Check to see if all of the certificates are valid, self-signed root CA certs
+		if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil {
+			return RootCA{}, fmt.Errorf("error while validating Root CA Certificate: %v", err)
+		}
+		pool.AddCert(cert)
+	}
+
+	// Calculate the digest for our Root CA bundle
+	digest := digest.FromBytes(certBytes)
+
+	if len(keyBytes) == 0 {
 		// This RootCA does not have a valid signer.
-		return RootCA{Cert: cert, Digest: digest, Pool: pool}, nil
+		return RootCA{Cert: certBytes, Digest: digest, Pool: pool}, nil
 	}
 
 	var (
@@ -288,39 +312,40 @@
 	}
 
 	// Attempt to decrypt the current private-key with the passphrases provided
-	priv, err = helpers.ParsePrivateKeyPEMWithPassword(key, passphrase)
+	priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrase)
 	if err != nil {
-		priv, err = helpers.ParsePrivateKeyPEMWithPassword(key, passphrasePrev)
+		priv, err = helpers.ParsePrivateKeyPEMWithPassword(keyBytes, passphrasePrev)
 		if err != nil {
 			log.Debug("Malformed private key %v", err)
 			return RootCA{}, err
 		}
 	}
 
-	if err := ensureCertKeyMatch(parsedCA, priv.Public()); err != nil {
+	// We will always use the first certificate inside of the root bundle as the active one
+	if err := ensureCertKeyMatch(parsedCerts[0], priv.Public()); err != nil {
 		return RootCA{}, err
 	}
 
-	signer, err := local.NewSigner(priv, parsedCA, cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
+	signer, err := local.NewSigner(priv, parsedCerts[0], cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry))
 	if err != nil {
 		return RootCA{}, err
 	}
 
 	// If the key was loaded from disk unencrypted, but there is a passphrase set,
 	// ensure it is encrypted, so it doesn't hit raft in plain-text
-	keyBlock, _ := pem.Decode(key)
+	keyBlock, _ := pem.Decode(keyBytes)
 	if keyBlock == nil {
 		// This RootCA does not have a valid signer.
-		return RootCA{Cert: cert, Digest: digest, Pool: pool}, nil
+		return RootCA{Cert: certBytes, Digest: digest, Pool: pool}, nil
 	}
 	if passphraseStr != "" && !x509.IsEncryptedPEMBlock(keyBlock) {
-		key, err = EncryptECPrivateKey(key, passphraseStr)
+		keyBytes, err = EncryptECPrivateKey(keyBytes, passphraseStr)
 		if err != nil {
 			return RootCA{}, err
 		}
 	}
 
-	return RootCA{Signer: signer, Key: key, Digest: digest, Cert: cert, Pool: pool}, nil
+	return RootCA{Signer: signer, Key: keyBytes, Digest: digest, Cert: certBytes, Pool: pool}, nil
 }
 
 func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error {
@@ -494,15 +519,12 @@
 	}
 
 	// Obtain a signed Certificate
-	cert, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
+	certChain, err := rootCA.ParseValidateAndSignCSR(csr, cn, ou, org)
 	if err != nil {
 		log.Debugf("failed to sign node certificate: %v", err)
 		return nil, err
 	}
 
-	// Append the root CA Key to the certificate, to create a valid chain
-	certChain := append(cert, rootCA.Cert...)
-
 	// Ensure directory exists
 	err = os.MkdirAll(filepath.Dir(paths.Cert), 0755)
 	if err != nil {
@@ -550,7 +572,7 @@
 
 // GetRemoteSignedCertificate submits a CSR together with the intended role to a remote CA server address
 // available through a picker, and that is part of a CA identified by a specific certificate pool.
-func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret string, rootCAPool *x509.CertPool, picker *picker.Picker, creds credentials.TransportAuthenticator, nodeInfo chan<- string) ([]byte, error) {
+func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret string, rootCAPool *x509.CertPool, picker *picker.Picker, creds credentials.TransportAuthenticator, nodeInfo chan<- api.IssueNodeCertificateResponse) ([]byte, error) {
 	if rootCAPool == nil {
 		return nil, fmt.Errorf("valid root CA pool required")
 	}
@@ -596,13 +618,12 @@
 		return nil, err
 	}
 
-	nodeID := issueResponse.NodeID
 	// Send back the NodeID on the nodeInfo, so the caller can know what ID was assigned by the CA
 	if nodeInfo != nil {
-		nodeInfo <- nodeID
+		nodeInfo <- *issueResponse
 	}
 
-	statusRequest := &api.NodeCertificateStatusRequest{NodeID: nodeID}
+	statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID}
 	expBackoff := events.NewExponentialBackoff(events.ExponentialBackoffConfig{
 		Base:   time.Second,
 		Factor: time.Second,
diff --git a/vendor/src/github.com/docker/swarmkit/ca/config.go b/vendor/src/github.com/docker/swarmkit/ca/config.go
index 876dd24..64eb6cf 100644
--- a/vendor/src/github.com/docker/swarmkit/ca/config.go
+++ b/vendor/src/github.com/docker/swarmkit/ca/config.go
@@ -139,7 +139,7 @@
 // LoadOrCreateSecurityConfig encapsulates the security logic behind joining a cluster.
 // Every node requires at least a set of TLS certificates with which to join the cluster with.
 // In the case of a manager, these certificates will be used both for client and server credentials.
-func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret, proposedRole string, picker *picker.Picker, nodeInfo chan<- string) (*SecurityConfig, error) {
+func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret, proposedRole string, picker *picker.Picker, nodeInfo chan<- api.IssueNodeCertificateResponse) (*SecurityConfig, error) {
 	paths := NewConfigPaths(baseCertDir)
 
 	var (
@@ -198,7 +198,10 @@
 			org := identity.NewID()
 
 			if nodeInfo != nil {
-				nodeInfo <- cn
+				nodeInfo <- api.IssueNodeCertificateResponse{
+					NodeID:         cn,
+					NodeMembership: api.NodeMembershipAccepted,
+				}
 			}
 			tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
 		} else {
@@ -225,7 +228,10 @@
 		log.Debugf("new TLS credentials generated: %s.", paths.Node.Cert)
 	} else {
 		if nodeInfo != nil {
-			nodeInfo <- clientTLSCreds.NodeID()
+			nodeInfo <- api.IssueNodeCertificateResponse{
+				NodeID:         clientTLSCreds.NodeID(),
+				NodeMembership: api.NodeMembershipAccepted,
+			}
 		}
 		log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
 	}
diff --git a/vendor/src/github.com/docker/swarmkit/ca/server.go b/vendor/src/github.com/docker/swarmkit/ca/server.go
index 0b2a30d..9d118cd 100644
--- a/vendor/src/github.com/docker/swarmkit/ca/server.go
+++ b/vendor/src/github.com/docker/swarmkit/ca/server.go
@@ -33,7 +33,7 @@
 
 	// Started is a channel which gets closed once the server is running
 	// and able to service RPCs.
-	Started chan struct{}
+	started chan struct{}
 }
 
 // DefaultAcceptancePolicy returns the default acceptance policy.
@@ -64,7 +64,7 @@
 	return &Server{
 		store:          store,
 		securityConfig: securityConfig,
-		Started:        make(chan struct{}),
+		started:        make(chan struct{}),
 	}
 }
 
@@ -249,7 +249,8 @@
 	}
 
 	return &api.IssueNodeCertificateResponse{
-		NodeID: nodeID,
+		NodeID:         nodeID,
+		NodeMembership: nodeMembership,
 	}, nil
 }
 
@@ -289,11 +290,14 @@
 // issueRenewCertificate receives a nodeID and a CSR and modifies the node's certificate entry with the new CSR
 // and changes the state to RENEW, so it can be picked up and signed by the signing reconciliation loop
 func (s *Server) issueRenewCertificate(ctx context.Context, nodeID string, csr []byte) (*api.IssueNodeCertificateResponse, error) {
-	var cert api.Certificate
+	var (
+		cert api.Certificate
+		node *api.Node
+	)
 	err := s.store.Update(func(tx store.Tx) error {
 
 		// Attempt to retrieve the node with nodeID
-		node := store.GetNode(tx, nodeID)
+		node = store.GetNode(tx, nodeID)
 		if node == nil {
 			log.G(ctx).WithFields(logrus.Fields{
 				"node.id": nodeID,
@@ -325,8 +329,10 @@
 		"cert.role": cert.Role,
 		"method":    "issueRenewCertificate",
 	}).Debugf("node certificate updated")
+
 	return &api.IssueNodeCertificateResponse{
-		NodeID: nodeID,
+		NodeID:         nodeID,
+		NodeMembership: node.Spec.Membership,
 	}, nil
 }
 
@@ -358,7 +364,14 @@
 	s.ctx, s.cancel = context.WithCancel(ctx)
 	s.mu.Unlock()
 
-	close(s.Started)
+	// Run() should never be called twice, but just in case, we're
+	// attempting to close the started channel in a safe way
+	select {
+	case <-s.started:
+		return fmt.Errorf("CA server cannot be started more than once")
+	default:
+		close(s.started)
+	}
 
 	// Retrieve the channels to keep track of changes in the cluster
 	// Retrieve all the currently registered nodes
@@ -439,6 +452,11 @@
 	return nil
 }
 
+// Ready waits on the ready channel and returns when the server is ready to serve.
+func (s *Server) Ready() <-chan struct{} {
+	return s.started
+}
+
 func (s *Server) addTask() error {
 	s.mu.Lock()
 	if !s.isRunning() {
@@ -600,8 +618,7 @@
 	// We were able to successfully sign the new CSR. Let's try to update the nodeStore
 	for {
 		err = s.store.Update(func(tx store.Tx) error {
-			// Remote nodes are expecting a full certificate chain, not just a signed certificate
-			node.Certificate.Certificate = append(cert, s.securityConfig.RootCA().Cert...)
+			node.Certificate.Certificate = cert
 			node.Certificate.Status = api.IssuanceStatus{
 				State: api.IssuanceStateIssued,
 			}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go b/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go
index 962ab88..e9a55b0 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/allocator/network.go
@@ -442,7 +442,11 @@
 		for _, na := range s.Spec.Networks {
 			n := store.GetNetwork(tx, na.Target)
 			if n != nil {
-				networks = append(networks, &api.NetworkAttachment{Network: n})
+				var aliases []string
+				for _, a := range na.Aliases {
+					aliases = append(aliases, a)
+				}
+				networks = append(networks, &api.NetworkAttachment{Network: n, Aliases: aliases})
 			}
 		}
 	})
diff --git a/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
index e35ea92..9ad726b 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
@@ -95,7 +95,6 @@
 	cluster              Cluster
 	ctx                  context.Context
 	cancel               context.CancelFunc
-	wg                   sync.WaitGroup
 
 	taskUpdates     map[string]*api.TaskStatus // indexed by task ID
 	taskUpdatesLock sync.Mutex
@@ -152,8 +151,6 @@
 		d.mu.Unlock()
 		return fmt.Errorf("dispatcher is stopped")
 	}
-	d.wg.Add(1)
-	defer d.wg.Done()
 	logger := log.G(ctx).WithField("module", "dispatcher")
 	ctx = log.WithLogger(ctx, logger)
 	if err := d.markNodesUnknown(ctx); err != nil {
@@ -236,27 +233,19 @@
 	d.cancel()
 	d.mu.Unlock()
 	d.nodes.Clean()
-	// wait for all handlers to finish their raft deals, because manager will
-	// set raftNode to nil
-	d.wg.Wait()
 	return nil
 }
 
-func (d *Dispatcher) addTask() error {
+func (d *Dispatcher) isRunningLocked() error {
 	d.mu.Lock()
 	if !d.isRunning() {
 		d.mu.Unlock()
 		return grpc.Errorf(codes.Aborted, "dispatcher is stopped")
 	}
-	d.wg.Add(1)
 	d.mu.Unlock()
 	return nil
 }
 
-func (d *Dispatcher) doneTask() {
-	d.wg.Done()
-}
-
 func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
 	log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
 	var nodes []*api.Node
@@ -325,10 +314,9 @@
 // register is used for registration of node with particular dispatcher.
 func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, string, error) {
 	// prevent register until we're ready to accept it
-	if err := d.addTask(); err != nil {
+	if err := d.isRunningLocked(); err != nil {
 		return "", "", err
 	}
-	defer d.doneTask()
 
 	// create or update node in store
 	// TODO(stevvooe): Validate node specification.
@@ -390,10 +378,9 @@
 	}
 	log := log.G(ctx).WithFields(fields)
 
-	if err := d.addTask(); err != nil {
+	if err := d.isRunningLocked(); err != nil {
 		return nil, err
 	}
-	defer d.doneTask()
 
 	if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
 		return nil, err
@@ -505,10 +492,9 @@
 	}
 	nodeID := nodeInfo.NodeID
 
-	if err := d.addTask(); err != nil {
+	if err := d.isRunningLocked(); err != nil {
 		return err
 	}
-	defer d.doneTask()
 
 	fields := logrus.Fields{
 		"node.id":      nodeID,
@@ -585,10 +571,9 @@
 }
 
 func (d *Dispatcher) nodeRemove(id string, status api.NodeStatus) error {
-	if err := d.addTask(); err != nil {
+	if err := d.isRunningLocked(); err != nil {
 		return err
 	}
-	defer d.doneTask()
 	// TODO(aaronl): Is it worth batching node removals?
 	err := d.store.Update(func(tx store.Tx) error {
 		node := store.GetNode(tx, id)
@@ -640,10 +625,9 @@
 	}
 	nodeID := nodeInfo.NodeID
 
-	if err := d.addTask(); err != nil {
+	if err := d.isRunningLocked(); err != nil {
 		return err
 	}
-	defer d.doneTask()
 
 	// register the node.
 	nodeID, sessionID, err := d.register(stream.Context(), nodeID, r.Description)
diff --git a/vendor/src/github.com/docker/swarmkit/manager/manager.go b/vendor/src/github.com/docker/swarmkit/manager/manager.go
index 1a89aa6..c22ec6c 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/manager.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/manager.go
@@ -3,6 +3,7 @@
 import (
 	"crypto/x509"
 	"encoding/pem"
+	"errors"
 	"fmt"
 	"net"
 	"os"
@@ -101,6 +102,10 @@
 
 	tcpAddr := config.ProtoAddr["tcp"]
 
+	if tcpAddr == "" {
+		return nil, errors.New("no tcp listen address or listener provided")
+	}
+
 	listenHost, listenPort, err := net.SplitHostPort(tcpAddr)
 	if err == nil {
 		ip := net.ParseIP(listenHost)
@@ -664,7 +669,7 @@
 	return s.Update(func(tx store.Tx) error {
 		cluster = store.GetCluster(tx, clusterID)
 		if cluster == nil {
-			return fmt.Errorf("cluster not found")
+			return fmt.Errorf("cluster not found: %s", clusterID)
 		}
 		cluster.RootCA.CAKey = finalKey
 		return store.UpdateCluster(tx, cluster)
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go
index efa6dcd..c0d23d3 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go
@@ -53,9 +53,6 @@
 
 // Check returns true if the task can be scheduled into the given node.
 func (f *ResourceFilter) Check(n *NodeInfo) bool {
-	if n.AvailableResources == nil {
-		return false
-	}
 	if f.reservations.NanoCPUs > n.AvailableResources.NanoCPUs {
 		return false
 	}
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
index a8be3b5..a6e92fa 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go
@@ -6,10 +6,10 @@
 type NodeInfo struct {
 	*api.Node
 	Tasks              map[string]*api.Task
-	AvailableResources *api.Resources
+	AvailableResources api.Resources
 }
 
-func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources *api.Resources) NodeInfo {
+func newNodeInfo(n *api.Node, tasks map[string]*api.Task, availableResources api.Resources) NodeInfo {
 	nodeInfo := NodeInfo{
 		Node:               n,
 		Tasks:              make(map[string]*api.Task),
@@ -31,11 +31,9 @@
 	}
 
 	delete(nodeInfo.Tasks, t.ID)
-	if nodeInfo.AvailableResources != nil {
-		reservations := taskReservations(t.Spec)
-		nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
-		nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
-	}
+	reservations := taskReservations(t.Spec)
+	nodeInfo.AvailableResources.MemoryBytes += reservations.MemoryBytes
+	nodeInfo.AvailableResources.NanoCPUs += reservations.NanoCPUs
 
 	return true
 }
@@ -49,11 +47,9 @@
 	}
 	if _, ok := nodeInfo.Tasks[t.ID]; !ok {
 		nodeInfo.Tasks[t.ID] = t
-		if nodeInfo.AvailableResources != nil {
-			reservations := taskReservations(t.Spec)
-			nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
-			nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
-		}
+		reservations := taskReservations(t.Spec)
+		nodeInfo.AvailableResources.MemoryBytes -= reservations.MemoryBytes
+		nodeInfo.AvailableResources.NanoCPUs -= reservations.NanoCPUs
 		return true
 	}
 
diff --git a/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go b/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go
index b26ba3a..6a57d48 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go
@@ -243,16 +243,18 @@
 
 func (s *Scheduler) createOrUpdateNode(n *api.Node) {
 	nodeInfo := s.nodeHeap.nodeInfo(n.ID)
+	var resources api.Resources
 	if n.Description != nil && n.Description.Resources != nil {
-		if nodeInfo.AvailableResources == nil {
-			// if nodeInfo.AvailableResources hasn't been initialized
-			// we copy resources information from node description and
-			// pass it to nodeInfo
-			resources := *n.Description.Resources
-			nodeInfo.AvailableResources = &resources
+		resources = *n.Description.Resources
+		// reconcile resources by looping over all tasks in this node
+		for _, task := range nodeInfo.Tasks {
+			reservations := taskReservations(task.Spec)
+			resources.MemoryBytes -= reservations.MemoryBytes
+			resources.NanoCPUs -= reservations.NanoCPUs
 		}
 	}
 	nodeInfo.Node = n
+	nodeInfo.AvailableResources = resources
 	s.nodeHeap.addOrUpdateNode(nodeInfo)
 }
 
@@ -422,10 +424,9 @@
 
 	i := 0
 	for _, n := range nodes {
-		var resources *api.Resources
+		var resources api.Resources
 		if n.Description != nil && n.Description.Resources != nil {
-			resources = &api.Resources{NanoCPUs: n.Description.Resources.NanoCPUs,
-				MemoryBytes: n.Description.Resources.MemoryBytes}
+			resources = *n.Description.Resources
 		}
 		s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
 		s.nodeHeap.index[n.ID] = i
diff --git a/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go b/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go
index 8b49bfe..dbefac5 100644
--- a/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go
+++ b/vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go
@@ -91,6 +91,10 @@
 	removed     uint32
 	joinAddr    string
 
+	// waitProp waits for all the proposals to be terminated before
+	// shutting down the node.
+	waitProp sync.WaitGroup
+
 	// forceNewCluster is a special flag used to recover from disaster
 	// scenario by pointing to an existing or backed up data directory.
 	forceNewCluster bool
@@ -420,6 +424,7 @@
 	defer n.stopMu.Unlock()
 
 	n.cancel()
+	n.waitProp.Wait()
 	n.asyncTasks.Wait()
 
 	members := n.cluster.Members()
@@ -762,6 +767,17 @@
 	return atomic.LoadUint32(&n.removed) == 1
 }
 
+// canSubmitProposal defines if any more proposals
+// could be submitted and processed.
+func (n *Node) canSubmitProposal() bool {
+	select {
+	case <-n.Ctx.Done():
+		return false
+	default:
+		return true
+	}
+}
+
 // Saves a log entry to our Store
 func (n *Node) saveToStorage(raftConfig *api.RaftConfig, hardState raftpb.HardState, entries []raftpb.Entry, snapshot raftpb.Snapshot) (err error) {
 	if !raft.IsEmptySnap(snapshot) {
@@ -842,7 +858,7 @@
 			}
 		}
 
-		if queryMember == nil {
+		if queryMember == nil || queryMember.RaftID == n.Config.ID {
 			n.Config.Logger.Error("could not find cluster member to query for leader address")
 			return
 		}
@@ -885,10 +901,19 @@
 	err  error
 }
 
-// processInternalRaftRequest sends a message through consensus
-// and then waits for it to be applies to the server. It will
-// block until the change is performed or there is an error
+// processInternalRaftRequest sends a message to nodes participating
+// in the raft to apply a log entry and then waits for it to be applied
+// on the server. It will block until the update is performed, there is
+// an error or until the raft node finalizes all the proposals on node
+// shutdown.
 func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRaftRequest, cb func()) (proto.Message, error) {
+	n.waitProp.Add(1)
+	defer n.waitProp.Done()
+
+	if !n.canSubmitProposal() {
+		return nil, ErrStopped
+	}
+
 	r.ID = n.reqIDGen.Next()
 
 	ch := n.wait.register(r.ID, cb)
@@ -923,7 +948,7 @@
 			return res.resp, res.err
 		}
 		return nil, ErrLostLeadership
-	case <-n.stopCh:
+	case <-n.Ctx.Done():
 		n.wait.cancel(r.ID)
 		return nil, ErrStopped
 	case <-ctx.Done():
@@ -956,7 +981,7 @@
 	case <-ctx.Done():
 		n.wait.trigger(cc.ID, nil)
 		return ctx.Err()
-	case <-n.stopCh:
+	case <-n.Ctx.Done():
 		return ErrStopped
 	}
 }
diff --git a/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
index 491faf2..1a7c4e1 100644
--- a/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
+++ b/vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go
@@ -262,6 +262,8 @@
 	return out, nil
 }
 
+// ParseCgroupFile parses the given cgroup file, typically from
+// /proc/<pid>/cgroup, into a map of subgroups to cgroup names.
 func ParseCgroupFile(path string) (map[string]string, error) {
 	f, err := os.Open(path)
 	if err != nil {
@@ -269,7 +271,12 @@
 	}
 	defer f.Close()
 
-	s := bufio.NewScanner(f)
+	return parseCgroupFromReader(f)
+}
+
+// helper function for ParseCgroupFile to make testing easier
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+	s := bufio.NewScanner(r)
 	cgroups := make(map[string]string)
 
 	for s.Scan() {
@@ -278,7 +285,16 @@
 		}
 
 		text := s.Text()
-		parts := strings.Split(text, ":")
+		// from cgroups(7):
+		// /proc/[pid]/cgroup
+		// ...
+		// For each cgroup hierarchy ... there is one entry
+		// containing three colon-separated fields of the form:
+		//     hierarchy-ID:subsystem-list:cgroup-path
+		parts := strings.SplitN(text, ":", 3)
+		if len(parts) < 3 {
+			return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text)
+		}
 
 		for _, subs := range strings.Split(parts[1], ",") {
 			cgroups[subs] = parts[2]
diff --git a/vendor/src/github.com/pkg/errors/.gitignore b/vendor/src/github.com/pkg/errors/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/src/github.com/pkg/errors/.travis.yml b/vendor/src/github.com/pkg/errors/.travis.yml
new file mode 100644
index 0000000..024e284
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+  - 1.4.3
+  - 1.5.4
+  - 1.6.2
+  - tip
+
+script:
+  - go test -v ./...
diff --git a/vendor/src/github.com/pkg/errors/LICENSE b/vendor/src/github.com/pkg/errors/LICENSE
new file mode 100644
index 0000000..fafcaaf
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/src/github.com/pkg/errors/README.md b/vendor/src/github.com/pkg/errors/README.md
new file mode 100644
index 0000000..6ea6422
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/README.md
@@ -0,0 +1,50 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
+
+Package errors provides simple error handling primitives.
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+        return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+        return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+        Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+        // handle specifically
+default:
+        // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## Licence
+
+BSD-2-Clause
diff --git a/vendor/src/github.com/pkg/errors/appveyor.yml b/vendor/src/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+  GOPATH: C:\gopath
+
+platform:
+  - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+  # some helpful output for debugging builds
+  - go version
+  - go env
+  # pre-installed MinGW at C:\MinGW is 32bit only
+  # but MSYS2 at C:\msys64 has mingw64
+  - set PATH=C:\msys64\mingw64\bin;%PATH%
+  - gcc --version
+  - g++ --version
+
+build_script:
+  - go install -v ./...
+
+test_script:
+  - set PATH=C:\gopath\bin;%PATH%
+  - go test -v ./...
+
+#artifacts:
+#  - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/src/github.com/pkg/errors/errors.go b/vendor/src/github.com/pkg/errors/errors.go
new file mode 100644
index 0000000..65bf7a0
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/errors.go
@@ -0,0 +1,211 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+//     if err != nil {
+//             return err
+//     }
+//
+// which applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error. For example
+//
+//     _, err := ioutil.ReadAll(r)
+//     if err != nil {
+//             return errors.Wrap(err, "read failed")
+//     }
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+//     type Causer interface {
+//             Cause() error
+//     }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error which does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+//     switch err := errors.Cause(err).(type) {
+//     case *MyError:
+//             // handle specifically
+//     default:
+//             // unknown error
+//     }
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported
+//
+//     %s    print the error. If the error has a Cause it will be
+//           printed recursively
+//     %v    see %s
+//     %+v   extended format. Each Frame of the error's StackTrace will
+//           be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface.
+//
+//     type StackTrace interface {
+//             StackTrace() errors.StackTrace
+//     }
+//
+// Where errors.StackTrace is defined as
+//
+//     type StackTrace []Frame
+//
+// The Frame type represents a call site in the stacktrace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stacktrace of this error. For example:
+//
+//     if err, ok := err.(StackTrace); ok {
+//             for _, f := range err.StackTrace() {
+//                     fmt.Printf("%+s:%d", f)
+//             }
+//     }
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+	"fmt"
+	"io"
+)
+
+// _error is an error implementation returned by New and Errorf
+// that implements its own fmt.Formatter.
+type _error struct {
+	msg string
+	*stack
+}
+
+func (e _error) Error() string { return e.msg }
+
+func (e _error) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			io.WriteString(s, e.msg)
+			fmt.Fprintf(s, "%+v", e.StackTrace())
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, e.msg)
+	}
+}
+
+// New returns an error with the supplied message.
+func New(message string) error {
+	return _error{
+		message,
+		callers(),
+	}
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+func Errorf(format string, args ...interface{}) error {
+	return _error{
+		fmt.Sprintf(format, args...),
+		callers(),
+	}
+}
+
+type cause struct {
+	cause error
+	msg   string
+}
+
+func (c cause) Error() string { return fmt.Sprintf("%s: %v", c.msg, c.Cause()) }
+func (c cause) Cause() error  { return c.cause }
+
+// wrapper is an error implementation returned by Wrap and Wrapf
+// that implements its own fmt.Formatter.
+type wrapper struct {
+	cause
+	*stack
+}
+
+func (w wrapper) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v\n", w.Cause())
+			fmt.Fprintf(s, "%+v: %s", w.StackTrace()[0], w.msg)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, w.Error())
+	}
+}
+
+// Wrap returns an error annotating err with message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	return wrapper{
+		cause: cause{
+			cause: err,
+			msg:   message,
+		},
+		stack: callers(),
+	}
+}
+
+// Wrapf returns an error annotating err with the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	return wrapper{
+		cause: cause{
+			cause: err,
+			msg:   fmt.Sprintf(format, args...),
+		},
+		stack: callers(),
+	}
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+//     type Causer interface {
+//            Cause() error
+//     }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+	type causer interface {
+		Cause() error
+	}
+
+	for err != nil {
+		cause, ok := err.(causer)
+		if !ok {
+			break
+		}
+		err = cause.Cause()
+	}
+	return err
+}
diff --git a/vendor/src/github.com/pkg/errors/stack.go b/vendor/src/github.com/pkg/errors/stack.go
new file mode 100644
index 0000000..243a64a
--- /dev/null
+++ b/vendor/src/github.com/pkg/errors/stack.go
@@ -0,0 +1,165 @@
+package errors
+
+import (
+	"fmt"
+	"io"
+	"path"
+	"runtime"
+	"strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	file, _ := fn.FileLine(f.pc())
+	return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return 0
+	}
+	_, line := fn.FileLine(f.pc())
+	return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+//    %s    source file
+//    %d    source line
+//    %n    function name
+//    %v    equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+s   path of source file relative to the compile time GOPATH
+//    %+v   equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 's':
+		switch {
+		case s.Flag('+'):
+			pc := f.pc()
+			fn := runtime.FuncForPC(pc)
+			if fn == nil {
+				io.WriteString(s, "unknown")
+			} else {
+				file, _ := fn.FileLine(pc)
+				fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
+			}
+		default:
+			io.WriteString(s, path.Base(f.file()))
+		}
+	case 'd':
+		fmt.Fprintf(s, "%d", f.line())
+	case 'n':
+		name := runtime.FuncForPC(f.pc()).Name()
+		io.WriteString(s, funcname(name))
+	case 'v':
+		f.Format(s, 's')
+		io.WriteString(s, ":")
+		f.Format(s, 'd')
+	}
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+func (st StackTrace) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case s.Flag('+'):
+			for _, f := range st {
+				fmt.Fprintf(s, "\n%+v", f)
+			}
+		case s.Flag('#'):
+			fmt.Fprintf(s, "%#v", []Frame(st))
+		default:
+			fmt.Fprintf(s, "%v", []Frame(st))
+		}
+	case 's':
+		fmt.Fprintf(s, "%s", []Frame(st))
+	}
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) StackTrace() StackTrace {
+	f := make([]Frame, len(*s))
+	for i := 0; i < len(f); i++ {
+		f[i] = Frame((*s)[i])
+	}
+	return f
+}
+
+func callers() *stack {
+	const depth = 32
+	var pcs [depth]uintptr
+	n := runtime.Callers(3, pcs[:])
+	var st stack = pcs[0:n]
+	return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+	i := strings.LastIndex(name, "/")
+	name = name[i+1:]
+	i = strings.Index(name, ".")
+	return name[i+1:]
+}
+
+func trimGOPATH(name, file string) string {
+	// Here we want to get the source file path relative to the compile time
+	// GOPATH. As of Go 1.6.x there is no direct way to know the compiled
+	// GOPATH at runtime, but we can infer the number of path segments in the
+	// GOPATH. We note that fn.Name() returns the function name qualified by
+	// the import path, which does not include the GOPATH. Thus we can trim
+	// segments from the beginning of the file path until the number of path
+	// separators remaining is one more than the number of path separators in
+	// the function name. For example, given:
+	//
+	//    GOPATH     /home/user
+	//    file       /home/user/src/pkg/sub/file.go
+	//    fn.Name()  pkg/sub.Type.Method
+	//
+	// We want to produce:
+	//
+	//    pkg/sub/file.go
+	//
+	// From this we can easily see that fn.Name() has one less path separator
+	// than our desired output. We count separators from the end of the file
+	// path until it finds two more than in the function name and then move
+	// one character forward to preserve the initial path segment without a
+	// leading separator.
+	const sep = "/"
+	goal := strings.Count(name, sep) + 2
+	i := len(file)
+	for n := 0; n < goal; n++ {
+		i = strings.LastIndex(file[:i], sep)
+		if i == -1 {
+			// not enough separators found, set i so that the slice expression
+			// below leaves file unmodified
+			i = -len(sep)
+			break
+		}
+	}
+	// get back to 0 or trim the leading separator
+	file = file[i+len(sep):]
+	return file
+}