Merge pull request #286 from thaJeztah/18.09_backport_cp_slash_fix

[18.09 backport] Fix docker cp when container source path is /
diff --git a/Dockerfile b/Dockerfile
index b76ae60..a2dcbb8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -77,7 +77,7 @@
 
 FROM base AS docker-py
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f
+ENV DOCKER_PY_COMMIT ac922192959870774ad8428344d9faa0555f7ba6
 RUN git clone https://github.com/docker/docker-py.git /build \
 	&& cd /build \
 	&& git checkout -q $DOCKER_PY_COMMIT
@@ -187,6 +187,9 @@
 	jq \
 	libcap2-bin \
 	libdevmapper-dev \
+# libffi-dev and libssl-dev appear to be required for compiling paramiko on s390x/ppc64le
+	libffi-dev \
+	libssl-dev \
 	libudev-dev \
 	libsystemd-dev \
 	binutils-mingw-w64 \
@@ -195,6 +198,8 @@
 	pigz \
 	python-backports.ssl-match-hostname \
 	python-dev \
+# python-cffi appears to be required for compiling paramiko on s390x/ppc64le
+	python-cffi \
 	python-mock \
 	python-pip \
 	python-requests \
@@ -227,7 +232,8 @@
 # split out into a separate image, including all the `python-*` deps installed
 # above.
 RUN cd /docker-py \
-	&& pip install docker-pycreds==0.2.1 \
+	&& pip install docker-pycreds==0.4.0 \
+	&& pip install paramiko==2.4.2 \
 	&& pip install yamllint==1.5.0 \
 	&& pip install -r test-requirements.txt
 
@@ -239,5 +245,7 @@
 VOLUME /var/lib/docker
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
 ENTRYPOINT ["hack/dind"]
+
+FROM dev AS final
 # Upload docker source
 COPY . /go/src/github.com/docker/docker
diff --git a/Makefile b/Makefile
index 7767409..e253968 100644
--- a/Makefile
+++ b/Makefile
@@ -1,10 +1,8 @@
-.PHONY: all binary dynbinary build cross help init-go-pkg-cache install manpages run shell test test-docker-py test-integration test-unit validate win
+.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate win
 
 # set the graph driver as the current graphdriver if not set
 DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
 export DOCKER_GRAPHDRIVER
-DOCKER_INCREMENTAL_BINARY := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_INCREMENTAL_BINARY),1)
-export DOCKER_INCREMENTAL_BINARY
 
 # get OS/Arch of docker engine
 DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH}')
@@ -36,6 +34,7 @@
 	-e KEEPBUNDLE \
 	-e DOCKER_BUILD_ARGS \
 	-e DOCKER_BUILD_GOGC \
+	-e DOCKER_BUILD_OPTS \
 	-e DOCKER_BUILD_PKGS \
 	-e DOCKER_BUILDKIT \
 	-e DOCKER_BASH_COMPLETION_PATH \
@@ -44,7 +43,6 @@
 	-e DOCKER_EXPERIMENTAL \
 	-e DOCKER_GITCOMMIT \
 	-e DOCKER_GRAPHDRIVER \
-	-e DOCKER_INCREMENTAL_BINARY \
 	-e DOCKER_LDFLAGS \
 	-e DOCKER_PORT \
 	-e DOCKER_REMAP_ROOT \
@@ -74,6 +72,9 @@
 # (default to no bind mount if DOCKER_HOST is set)
 # note: BINDDIR is supported for backwards-compatibility here
 BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles))
+
+# DOCKER_MOUNT can be overriden, but use at your own risk!
+ifndef DOCKER_MOUNT
 DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)")
 
 # This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs.
@@ -81,17 +82,14 @@
 # Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set.
 DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) -v "$(CURDIR)/.git:/go/src/github.com/docker/docker/.git"
 
-# This allows to set the docker-dev container name
-DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
-
-# enable package cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set
-PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64:/usr/local/go/pkg/linux_amd64 goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo
-PKGCACHE_VOLROOT := dockerdev-go-pkg-cache
-PKGCACHE_VOL := $(if $(PKGCACHE_DIR),$(CURDIR)/$(PKGCACHE_DIR)/,$(PKGCACHE_VOLROOT)-)
-DOCKER_MOUNT_PKGCACHE := $(if $(DOCKER_INCREMENTAL_BINARY),$(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(PKGCACHE_VOL)\1"@g'),)
+DOCKER_MOUNT_CACHE := -v docker-dev-cache:/root/.cache
 DOCKER_MOUNT_CLI := $(if $(DOCKER_CLI_PATH),-v $(shell dirname $(DOCKER_CLI_PATH)):/usr/local/cli,)
 DOCKER_MOUNT_BASH_COMPLETION := $(if $(DOCKER_BASH_COMPLETION_PATH),-v $(shell dirname $(DOCKER_BASH_COMPLETION_PATH)):/usr/local/completion/bash,)
-DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
+DOCKER_MOUNT := $(DOCKER_MOUNT) $(DOCKER_MOUNT_CACHE) $(DOCKER_MOUNT_CLI) $(DOCKER_MOUNT_BASH_COMPLETION)
+endif # ifndef DOCKER_MOUNT
+
+# This allows to set the docker-dev container name
+DOCKER_CONTAINER_NAME := $(if $(CONTAINER_NAME),--name $(CONTAINER_NAME),)
 
 GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
 GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g")
@@ -119,6 +117,9 @@
 ifeq ($(INTERACTIVE), 1)
 	DOCKER_FLAGS += -t
 endif
+ifeq ($(BIND_DIR), .)
+	DOCKER_BUILD_OPTS += --target=dev
+endif
 
 DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
 
@@ -133,28 +134,26 @@
 dynbinary: build ## build the linux dynbinaries
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary
 
-build: bundles init-go-pkg-cache
+build: DOCKER_BUILDKIT ?= 1
+build: bundles
 	$(warning The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:${\n} DOCKER_CLI_PATH=/host/path/to/cli/binary make shell ${\n} then change the cli and compile into a binary at the same location.${\n})
-	docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
+	DOCKER_BUILDKIT="${DOCKER_BUILDKIT}" docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" .
 
 bundles:
 	mkdir bundles
 
-clean: clean-pkg-cache-vol ## clean up cached resources
+.PHONY: clean
+clean: clean-cache
 
-clean-pkg-cache-vol:
-	@- $(foreach mapping,$(PKGCACHE_MAP), \
-		$(shell docker volume rm $(PKGCACHE_VOLROOT)-$(shell echo $(mapping) | awk -F':/' '{ print $$1 }') > /dev/null 2>&1) \
-	)
+.PHONY: clean-cache
+clean-cache:
+	docker volume rm -f docker-dev-cache
 
 cross: build ## cross build the binaries for darwin, freebsd and\nwindows
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross
 
 help: ## this help
-	@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
-
-init-go-pkg-cache:
-	$(if $(PKGCACHE_DIR), mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g'))
+	@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z0-9_-]+:.*?## / {gsub("\\\\n",sprintf("\n%22c",""), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
 
 install: ## install the linux binaries
 	KEEPBUNDLE=1 hack/make.sh install-binary
@@ -176,6 +175,9 @@
 test-integration: build ## run the integration tests
 	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration
 
+test-integration-flaky: build ## run the stress test for all new integration tests
+	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-flaky
+
 test-unit: build ## run the unit tests
 	$(DOCKER_RUN_DOCKER) hack/test/unit
 
@@ -206,12 +208,11 @@
 	go build -buildmode=pie -o ./hack/integration-cli-on-swarm/integration-cli-on-swarm ./hack/integration-cli-on-swarm/host
 	@echo "Building $(INTEGRATION_CLI_MASTER_IMAGE)"
 	docker build -t $(INTEGRATION_CLI_MASTER_IMAGE) hack/integration-cli-on-swarm/agent
-# For worker, we don't use `docker build` so as to enable DOCKER_INCREMENTAL_BINARY and so on
 	@echo "Building $(INTEGRATION_CLI_WORKER_IMAGE) from $(DOCKER_IMAGE)"
 	$(eval tmp := integration-cli-worker-tmp)
 # We mount pkgcache, but not bundle (bundle needs to be baked into the image)
 # For avoiding bakings DOCKER_GRAPHDRIVER and so on to image, we cannot use $(DOCKER_ENVS) here
-	docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS -e DOCKER_INCREMENTAL_BINARY --privileged $(DOCKER_MOUNT_PKGCACHE) $(DOCKER_IMAGE) top
+	docker run -t -d --name $(tmp) -e DOCKER_GITCOMMIT -e BUILDFLAGS --privileged $(DOCKER_IMAGE) top
 	docker exec $(tmp) hack/make.sh build-integration-test-binary dynbinary
 	docker exec $(tmp) go build -buildmode=pie -o /worker github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker
 	docker commit -c 'ENTRYPOINT ["/worker"]' $(tmp) $(INTEGRATION_CLI_WORKER_IMAGE)
diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go
index bb01b29..dfd5434 100644
--- a/builder/builder-next/adapters/containerimage/pull.go
+++ b/builder/builder-next/adapters/containerimage/pull.go
@@ -8,6 +8,7 @@
 	"io/ioutil"
 	"runtime"
 	"sync"
+	"sync/atomic"
 	"time"
 
 	"github.com/containerd/containerd/content"
@@ -57,13 +58,15 @@
 
 type imageSource struct {
 	SourceOpt
-	g flightcontrol.Group
+	g             flightcontrol.Group
+	resolverCache *resolverCache
 }
 
 // NewSource creates a new image source
 func NewSource(opt SourceOpt) (source.Source, error) {
 	is := &imageSource{
-		SourceOpt: opt,
+		SourceOpt:     opt,
+		resolverCache: newResolverCache(),
 	}
 
 	return is, nil
@@ -74,6 +77,9 @@
 }
 
 func (is *imageSource) getResolver(ctx context.Context, rfn resolver.ResolveOptionsFunc, ref string) remotes.Resolver {
+	if res := is.resolverCache.Get(ctx, ref); res != nil {
+		return res
+	}
 	opt := docker.ResolverOptions{
 		Client: tracing.DefaultClient,
 	}
@@ -82,6 +88,7 @@
 	}
 	opt.Credentials = is.getCredentialsFromSession(ctx)
 	r := docker.NewResolver(opt)
+	r = is.resolverCache.Add(ctx, ref, r)
 	return r
 }
 
@@ -380,6 +387,11 @@
 		return nil, err
 	}
 
+	// workaround for GCR bug that requires a request to manifest endpoint for authentication to work.
+	// if current resolver has not used manifests do a dummy request.
+	// in most cases resolver should be cached and extra request is not needed.
+	ensureManifestRequested(ctx, p.resolver, p.ref)
+
 	var (
 		schema1Converter *schema1.Converter
 		handlers         []images.Handler
@@ -791,3 +803,90 @@
 	}
 	return ""
 }
+
+type resolverCache struct {
+	mu sync.Mutex
+	m  map[string]cachedResolver
+}
+
+type cachedResolver struct {
+	timeout time.Time
+	remotes.Resolver
+	counter int64
+}
+
+func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
+	atomic.AddInt64(&cr.counter, 1)
+	return cr.Resolver.Resolve(ctx, ref)
+}
+
+func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	ref = r.repo(ref) + "-" + session.FromContext(ctx)
+
+	cr, ok := r.m[ref]
+	cr.timeout = time.Now().Add(time.Minute)
+	if ok {
+		return &cr
+	}
+
+	cr.Resolver = resolver
+	r.m[ref] = cr
+	return &cr
+}
+
+func (r *resolverCache) repo(refStr string) string {
+	ref, err := distreference.ParseNormalizedNamed(refStr)
+	if err != nil {
+		return refStr
+	}
+	return ref.Name()
+}
+
+func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	ref = r.repo(ref) + "-" + session.FromContext(ctx)
+
+	cr, ok := r.m[ref]
+	if !ok {
+		return nil
+	}
+	return &cr
+}
+
+func (r *resolverCache) clean(now time.Time) {
+	r.mu.Lock()
+	for k, cr := range r.m {
+		if now.After(cr.timeout) {
+			delete(r.m, k)
+		}
+	}
+	r.mu.Unlock()
+}
+
+func newResolverCache() *resolverCache {
+	rc := &resolverCache{
+		m: map[string]cachedResolver{},
+	}
+	t := time.NewTicker(time.Minute)
+	go func() {
+		for {
+			rc.clean(<-t.C)
+		}
+	}()
+	return rc
+}
+
+func ensureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) {
+	cr, ok := res.(*cachedResolver)
+	if !ok {
+		return
+	}
+	if atomic.LoadInt64(&cr.counter) == 0 {
+		res.Resolve(ctx, ref)
+	}
+}
diff --git a/builder/remotecontext/detect.go b/builder/remotecontext/detect.go
index 144eb57..1becd0f 100644
--- a/builder/remotecontext/detect.go
+++ b/builder/remotecontext/detect.go
@@ -12,6 +12,7 @@
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/dockerignore"
+	"github.com/docker/docker/errdefs"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/moby/buildkit/frontend/dockerfile/parser"
@@ -34,8 +35,9 @@
 	case remoteURL == ClientSessionRemote:
 		res, err := parser.Parse(config.Source)
 		if err != nil {
-			return nil, nil, err
+			return nil, nil, errdefs.InvalidParameter(err)
 		}
+
 		return nil, res, nil
 	case urlutil.IsGitURL(remoteURL):
 		remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
@@ -106,7 +108,7 @@
 	switch contentType {
 	case mimeTypes.TextPlain:
 		res, err := parser.Parse(progressReader(content))
-		return nil, res, err
+		return nil, res, errdefs.InvalidParameter(err)
 	default:
 		source, err := FromArchive(progressReader(content))
 		if err != nil {
@@ -146,11 +148,17 @@
 	br := bufio.NewReader(rc)
 	if _, err := br.Peek(1); err != nil {
 		if err == io.EOF {
-			return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
+			return nil, errdefs.InvalidParameter(errors.Errorf("the Dockerfile (%s) cannot be empty", name))
 		}
 		return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
 	}
-	return parser.Parse(br)
+
+	dockerfile, err := parser.Parse(br)
+	if err != nil {
+		return nil, errdefs.InvalidParameter(errors.Wrapf(err, "failed to parse %s", name))
+	}
+
+	return dockerfile, nil
 }
 
 func openAt(remote builder.Source, path string) (driver.File, error) {
diff --git a/container/container_unix.go b/container/container_unix.go
index 6d402be..8419bd5 100644
--- a/container/container_unix.go
+++ b/container/container_unix.go
@@ -136,7 +136,7 @@
 		return err
 	}
 
-	id := stringid.GenerateNonCryptoID()
+	id := stringid.GenerateRandomID()
 	path, err := v.Mount(id)
 	if err != nil {
 		return err
diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go
index 77d21d2..6311fc8 100644
--- a/daemon/cluster/executor/container/container.go
+++ b/daemon/cluster/executor/container/container.go
@@ -6,7 +6,6 @@
 	"net"
 	"strconv"
 	"strings"
-	"time"
 
 	"github.com/sirupsen/logrus"
 
@@ -31,10 +30,6 @@
 )
 
 const (
-	// Explicitly use the kernel's default setting for CPU quota of 100ms.
-	// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
-	cpuQuotaPeriod = 100 * time.Millisecond
-
 	// systemLabelPrefix represents the reserved namespace for system labels.
 	systemLabelPrefix = "com.docker.swarm"
 )
@@ -448,9 +443,7 @@
 	}
 
 	if r.Limits.NanoCPUs > 0 {
-		// CPU Period must be set in microseconds.
-		resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
-		resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
+		resources.NanoCPUs = r.Limits.NanoCPUs
 	}
 
 	return resources
diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go
index 8d07079..466bca2 100644
--- a/daemon/cluster/executor/container/controller.go
+++ b/daemon/cluster/executor/container/controller.go
@@ -369,11 +369,17 @@
 	}
 
 	if err := r.adapter.shutdown(ctx); err != nil {
-		if isUnknownContainer(err) || isStoppedContainer(err) {
-			return nil
+		if !(isUnknownContainer(err) || isStoppedContainer(err)) {
+			return err
 		}
+	}
 
-		return err
+	// Try removing networks referenced in this task in case this
+	// task is the last one referencing it
+	if err := r.adapter.removeNetworks(ctx); err != nil {
+		if !isUnknownContainer(err) {
+			return err
+		}
 	}
 
 	return nil
@@ -419,15 +425,6 @@
 		log.G(ctx).WithError(err).Debug("shutdown failed on removal")
 	}
 
-	// Try removing networks referenced in this task in case this
-	// task is the last one referencing it
-	if err := r.adapter.removeNetworks(ctx); err != nil {
-		if isUnknownContainer(err) {
-			return nil
-		}
-		return err
-	}
-
 	if err := r.adapter.remove(ctx); err != nil {
 		if isUnknownContainer(err) {
 			return nil
diff --git a/daemon/create_unix.go b/daemon/create_unix.go
index 13857ba..e78415a 100644
--- a/daemon/create_unix.go
+++ b/daemon/create_unix.go
@@ -41,7 +41,7 @@
 	}
 
 	for spec := range config.Volumes {
-		name := stringid.GenerateNonCryptoID()
+		name := stringid.GenerateRandomID()
 		destination := filepath.Clean(spec)
 
 		// Skip volumes for which we already have something mounted on that
diff --git a/daemon/create_windows.go b/daemon/create_windows.go
index 37e425a..3bce278 100644
--- a/daemon/create_windows.go
+++ b/daemon/create_windows.go
@@ -38,7 +38,7 @@
 
 		// If the mountpoint doesn't have a name, generate one.
 		if len(mp.Name) == 0 {
-			mp.Name = stringid.GenerateNonCryptoID()
+			mp.Name = stringid.GenerateRandomID()
 		}
 
 		// Skip volumes for which we already have something mounted on that
diff --git a/daemon/daemon.go b/daemon/daemon.go
index a307863..b2c02a9 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -11,6 +11,7 @@
 	"io/ioutil"
 	"math/rand"
 	"net"
+	"net/url"
 	"os"
 	"path"
 	"path/filepath"
@@ -157,15 +158,18 @@
 		)
 		// must trim "https://" or "http://" prefix
 		for i, v := range daemon.configStore.Mirrors {
-			v = strings.TrimPrefix(v, "https://")
-			v = strings.TrimPrefix(v, "http://")
+			if uri, err := url.Parse(v); err == nil {
+				v = uri.Host
+			}
 			mirrors[i] = v
 		}
 		// set "registry-mirrors"
 		m[registryKey] = resolver.RegistryConf{Mirrors: mirrors}
 		// set "insecure-registries"
 		for _, v := range daemon.configStore.InsecureRegistries {
-			v = strings.TrimPrefix(v, "http://")
+			if uri, err := url.Parse(v); err == nil {
+				v = uri.Host
+			}
 			m[v] = resolver.RegistryConf{
 				PlainHTTP: true,
 			}
diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go
index c036c46..cf2a955 100644
--- a/daemon/exec/exec.go
+++ b/daemon/exec/exec.go
@@ -39,7 +39,7 @@
 // NewConfig initializes the a new exec configuration
 func NewConfig() *Config {
 	return &Config{
-		ID:           stringid.GenerateNonCryptoID(),
+		ID:           stringid.GenerateRandomID(),
 		StreamConfig: stream.NewConfig(),
 		Started:      make(chan struct{}),
 	}
diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go
index fdc502b..0752c84 100644
--- a/daemon/graphdriver/aufs/aufs_test.go
+++ b/daemon/graphdriver/aufs/aufs_test.go
@@ -731,7 +731,7 @@
 	// create a bunch of ids
 	var ids []string
 	for i := 0; i < numConcurrent; i++ {
-		ids = append(ids, stringid.GenerateNonCryptoID())
+		ids = append(ids, stringid.GenerateRandomID())
 	}
 
 	if err := d.Create(ids[0], "", nil); err != nil {
diff --git a/daemon/logger/adapter.go b/daemon/logger/adapter.go
index d937035..97d59be 100644
--- a/daemon/logger/adapter.go
+++ b/daemon/logger/adapter.go
@@ -39,6 +39,13 @@
 	a.buf.TimeNano = msg.Timestamp.UnixNano()
 	a.buf.Partial = msg.PLogMetaData != nil
 	a.buf.Source = msg.Source
+	if msg.PLogMetaData != nil {
+		a.buf.PartialLogMetadata = &logdriver.PartialLogEntryMetadata{
+			Id:      msg.PLogMetaData.ID,
+			Last:    msg.PLogMetaData.Last,
+			Ordinal: int32(msg.PLogMetaData.Ordinal),
+		}
+	}
 
 	err := a.enc.Encode(&a.buf)
 	a.buf.Reset()
diff --git a/daemon/logger/plugin.go b/daemon/logger/plugin.go
index c66540c..8c155b0 100644
--- a/daemon/logger/plugin.go
+++ b/daemon/logger/plugin.go
@@ -81,7 +81,7 @@
 			return nil, err
 		}
 
-		id := stringid.GenerateNonCryptoID()
+		id := stringid.GenerateRandomID()
 		a := &pluginAdapter{
 			driverName: name,
 			id:         id,
diff --git a/daemon/names.go b/daemon/names.go
index 6c31949..4fa39af 100644
--- a/daemon/names.go
+++ b/daemon/names.go
@@ -38,7 +38,7 @@
 func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
 	var (
 		err error
-		id  = stringid.GenerateNonCryptoID()
+		id  = stringid.GenerateRandomID()
 	)
 
 	if name == "" {
diff --git a/docs/contributing/set-up-dev-env.md b/docs/contributing/set-up-dev-env.md
index 3d56c0b..f0619cb 100644
--- a/docs/contributing/set-up-dev-env.md
+++ b/docs/contributing/set-up-dev-env.md
@@ -130,7 +130,7 @@
    ```none
    Successfully built 3d872560918e
    Successfully tagged docker-dev:dry-run-test
-   docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash
+   docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash
    #
    ```
 
diff --git a/hack/ci/janky b/hack/ci/janky
index f2bdfbf..88cb9d9 100755
--- a/hack/ci/janky
+++ b/hack/ci/janky
@@ -13,5 +13,6 @@
 	binary-daemon \
 	dynbinary \
 	test-docker-py \
+	test-integration-flaky \
 	test-integration \
 	cross
diff --git a/hack/dockerfile/install/containerd.installer b/hack/dockerfile/install/containerd.installer
index 3b36925..8b15eb8 100755
--- a/hack/dockerfile/install/containerd.installer
+++ b/hack/dockerfile/install/containerd.installer
@@ -4,7 +4,7 @@
 # containerd is also pinned in vendor.conf. When updating the binary
 # version you may also need to update the vendor version to pick up bug
 # fixes or new APIs.
-CONTAINERD_COMMIT=bb71b10fd8f58240ca47fbb579b9d1028eea7c84 # v1.2.5
+CONTAINERD_COMMIT=894b81a4b802e4eb2a91d1ce216b8817763c29fb # v1.2.6
 
 install_containerd() {
 	echo "Install containerd version $CONTAINERD_COMMIT"
diff --git a/hack/dockerfile/install/runc.installer b/hack/dockerfile/install/runc.installer
index 532a7f7..a8156db 100755
--- a/hack/dockerfile/install/runc.installer
+++ b/hack/dockerfile/install/runc.installer
@@ -4,7 +4,7 @@
 # The version of runc should match the version that is used by the containerd
 # version that is used. If you need to update runc, open a pull request in
 # the containerd project first, and update both after that is merged.
-RUNC_COMMIT=2b18fe1d885ee5083ef9f0838fee39b62d653e30
+RUNC_COMMIT=425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
 
 install_runc() {
 	# If using RHEL7 kernels (3.10.0 el7), disable kmem accounting/limiting
diff --git a/hack/integration-cli-on-swarm/README.md b/hack/integration-cli-on-swarm/README.md
index 4f4f67d..852b36c 100644
--- a/hack/integration-cli-on-swarm/README.md
+++ b/hack/integration-cli-on-swarm/README.md
@@ -36,7 +36,6 @@
 Following environment variables are known to work in this step:
 
  - `BUILDFLAGS`
- - `DOCKER_INCREMENTAL_BINARY`
 
 Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. 
 
diff --git a/hack/make.sh b/hack/make.sh
index 2f4ece3..62c72a0 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -148,14 +148,6 @@
 ORIG_BUILDFLAGS=( -tags "autogen netgo osusergo static_build $DOCKER_BUILDTAGS" -installsuffix netgo )
 # see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here
 
-# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental
-# builds by installing dependent packages to the GOPATH.
-REBUILD_FLAG="-a"
-if [ "$DOCKER_INCREMENTAL_BINARY" == "1" ] || [ "$DOCKER_INCREMENTAL_BINARY" == "true" ]; then
-	REBUILD_FLAG="-i"
-fi
-ORIG_BUILDFLAGS+=( $REBUILD_FLAG )
-
 BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" )
 
 # Test timeout.
diff --git a/hack/make/test-integration-flaky b/hack/make/test-integration-flaky
new file mode 100644
index 0000000..14fb034
--- /dev/null
+++ b/hack/make/test-integration-flaky
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+set -e -o pipefail
+
+source hack/validate/.validate
+new_tests=$(
+    validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
+    grep -E '^(\+func )(.*)(\*testing)' || true
+)
+
+if [ -z "$new_tests" ]; then
+    echo 'No new tests added to integration.'
+    return
+fi
+
+echo
+echo "Found new integrations tests:"
+echo "$new_tests"
+echo "Running stress test for them."
+
+(
+    TESTARRAY=$(echo "$new_tests" | sed 's/+func //' | awk -F'\\(' '{print $1}' | tr '\n' '|')
+    # Note: TEST_REPEAT will make the test suite run 5 times, restarting the daemon
+    # whereas testcount will make each test run 5 times in a row under the same daemon.
+    # This will make a total of 25 runs for each test in TESTARRAY.
+    export TEST_REPEAT=5
+    local testcount=5
+    # However, TIMEOUT needs to take testcount into account, or a premature time out may happen.
+    # The following ugliness will:
+    # - remove last character (usually 'm' from '10m')
+    # - multiply by testcount
+    # - add last character back
+    export TIMEOUT=$((${TIMEOUT::-1} * $testcount))${TIMEOUT:$((${#TIMEOUT}-1)):1}
+
+    export TESTFLAGS="-test.count $testcount -test.run ${TESTARRAY%?}"
+    echo "Using test flags: $TESTFLAGS"
+    source hack/make/test-integration
+)
diff --git a/hack/test/unit b/hack/test/unit
index d0e85f1..ac27f68 100755
--- a/hack/test/unit
+++ b/hack/test/unit
@@ -19,10 +19,6 @@
 exclude_paths="/vendor/|/integration"
 pkg_list=$(go list $TESTDIRS | grep -vE "($exclude_paths)")
 
-# install test dependencies once before running tests for each package. This
-# significantly reduces the runtime.
-go test -i "${BUILDFLAGS[@]}" $pkg_list
-
 for pkg in $pkg_list; do
     go test "${BUILDFLAGS[@]}" \
         -cover \
diff --git a/integration-cli/docker_cli_external_volume_driver_unix_test.go b/integration-cli/docker_cli_external_volume_driver_unix_test.go
index b876d9f..e9e92d4 100644
--- a/integration-cli/docker_cli_external_volume_driver_unix_test.go
+++ b/integration-cli/docker_cli_external_volume_driver_unix_test.go
@@ -558,7 +558,7 @@
 }
 
 func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) {
-	driverName := stringid.GenerateNonCryptoID()
+	driverName := stringid.GenerateRandomID()
 	p := newVolumePlugin(c, driverName)
 	defer p.Close()
 
diff --git a/integration-cli/docker_cli_service_update_test.go b/integration-cli/docker_cli_service_update_test.go
deleted file mode 100644
index c729860..0000000
--- a/integration-cli/docker_cli_service_update_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// +build !windows
-
-package main
-
-import (
-	"encoding/json"
-	"fmt"
-
-	"github.com/docker/docker/api/types/swarm"
-	"github.com/docker/docker/integration-cli/checker"
-	"github.com/go-check/check"
-)
-
-func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) {
-	d := s.AddDaemon(c, true, true)
-	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name=test", "busybox", "top")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-	service := d.GetService(c, "test")
-	c.Assert(service.Spec.Labels, checker.HasLen, 0)
-
-	// add label to empty set
-	out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-	service = d.GetService(c, "test")
-	c.Assert(service.Spec.Labels, checker.HasLen, 1)
-	c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar")
-
-	// add label to non-empty set
-	out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo2=bar")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-	service = d.GetService(c, "test")
-	c.Assert(service.Spec.Labels, checker.HasLen, 2)
-	c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar")
-
-	out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo2")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-	service = d.GetService(c, "test")
-	c.Assert(service.Spec.Labels, checker.HasLen, 1)
-	c.Assert(service.Spec.Labels["foo2"], checker.Equals, "")
-
-	out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-	service = d.GetService(c, "test")
-	c.Assert(service.Spec.Labels, checker.HasLen, 0)
-	c.Assert(service.Spec.Labels["foo"], checker.Equals, "")
-
-	// now make sure we can add again
-	out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-	service = d.GetService(c, "test")
-	c.Assert(service.Spec.Labels, checker.HasLen, 1)
-	c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar")
-}
-
-func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) {
-	d := s.AddDaemon(c, true, true)
-	testName := "test_secret"
-	id := d.CreateSecret(c, swarm.SecretSpec{
-		Annotations: swarm.Annotations{
-			Name: testName,
-		},
-		Data: []byte("TESTINGDATA"),
-	})
-	c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
-	testTarget := "testing"
-	serviceName := "test"
-
-	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
-	// add secret
-	out, err = d.Cmd("service", "update", "--detach", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
-	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
-	c.Assert(err, checker.IsNil)
-
-	var refs []swarm.SecretReference
-	c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
-	c.Assert(refs, checker.HasLen, 1)
-
-	c.Assert(refs[0].SecretName, checker.Equals, testName)
-	c.Assert(refs[0].File, checker.Not(checker.IsNil))
-	c.Assert(refs[0].File.Name, checker.Equals, testTarget)
-
-	// remove
-	out, err = d.Cmd("service", "update", "--detach", "test", "--secret-rm", testName)
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
-	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
-	c.Assert(err, checker.IsNil)
-
-	c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
-	c.Assert(refs, checker.HasLen, 0)
-}
-
-func (s *DockerSwarmSuite) TestServiceUpdateConfigs(c *check.C) {
-	d := s.AddDaemon(c, true, true)
-	testName := "test_config"
-	id := d.CreateConfig(c, swarm.ConfigSpec{
-		Annotations: swarm.Annotations{
-			Name: testName,
-		},
-		Data: []byte("TESTINGDATA"),
-	})
-	c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id))
-	testTarget := "/testing"
-	serviceName := "test"
-
-	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top")
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
-	// add config
-	out, err = d.Cmd("service", "update", "--detach", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
-	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
-	c.Assert(err, checker.IsNil)
-
-	var refs []swarm.ConfigReference
-	c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
-	c.Assert(refs, checker.HasLen, 1)
-
-	c.Assert(refs[0].ConfigName, checker.Equals, testName)
-	c.Assert(refs[0].File, checker.Not(checker.IsNil))
-	c.Assert(refs[0].File.Name, checker.Equals, testTarget)
-
-	// remove
-	out, err = d.Cmd("service", "update", "--detach", "test", "--config-rm", testName)
-	c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
-	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
-	c.Assert(err, checker.IsNil)
-
-	c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
-	c.Assert(refs, checker.HasLen, 0)
-}
diff --git a/integration/build/build_test.go b/integration/build/build_test.go
index 6a2b1fd..6fe18fc 100644
--- a/integration/build/build_test.go
+++ b/integration/build/build_test.go
@@ -474,6 +474,61 @@
 	assert.Check(t, is.Contains(out.String(), "Successfully built"))
 }
 
+func TestBuildWithEmptyDockerfile(t *testing.T) {
+	ctx := context.TODO()
+	defer setupTest(t)()
+
+	tests := []struct {
+		name        string
+		dockerfile  string
+		expectedErr string
+	}{
+		{
+			name:        "empty-dockerfile",
+			dockerfile:  "",
+			expectedErr: "cannot be empty",
+		},
+		{
+			name: "empty-lines-dockerfile",
+			dockerfile: `
+			
+			
+			
+			`,
+			expectedErr: "file with no instructions",
+		},
+		{
+			name:        "comment-only-dockerfile",
+			dockerfile:  `# this is a comment`,
+			expectedErr: "file with no instructions",
+		},
+	}
+
+	apiclient := testEnv.APIClient()
+
+	for _, tc := range tests {
+		tc := tc
+		t.Run(tc.name, func(t *testing.T) {
+			t.Parallel()
+
+			buf := bytes.NewBuffer(nil)
+			w := tar.NewWriter(buf)
+			writeTarRecord(t, w, "Dockerfile", tc.dockerfile)
+			err := w.Close()
+			assert.NilError(t, err)
+
+			_, err = apiclient.ImageBuild(ctx,
+				buf,
+				types.ImageBuildOptions{
+					Remove:      true,
+					ForceRemove: true,
+				})
+
+			assert.Check(t, is.Contains(err.Error(), tc.expectedErr))
+		})
+	}
+}
+
 func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) {
 	err := w.WriteHeader(&tar.Header{
 		Name:     fn,
diff --git a/integration/container/rename_test.go b/integration/container/rename_test.go
index 25474a7..24bbe98 100644
--- a/integration/container/rename_test.go
+++ b/integration/container/rename_test.go
@@ -61,7 +61,7 @@
 	assert.NilError(t, err)
 	assert.Check(t, is.Equal("/"+oldName, inspect.Name))
 
-	newName := "new_name" + stringid.GenerateNonCryptoID()
+	newName := "new_name" + stringid.GenerateRandomID()
 	err = client.ContainerRename(ctx, oldName, newName)
 	assert.NilError(t, err)
 
@@ -79,7 +79,7 @@
 	cID := container.Run(t, ctx, client, container.WithName(oldName))
 	poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
 
-	newName := "new_name" + stringid.GenerateNonCryptoID()
+	newName := "new_name" + stringid.GenerateRandomID()
 	err := client.ContainerRename(ctx, oldName, newName)
 	assert.NilError(t, err)
 
diff --git a/integration/internal/swarm/states.go b/integration/internal/swarm/states.go
index 51d6200..c51e1ee 100644
--- a/integration/internal/swarm/states.go
+++ b/integration/internal/swarm/states.go
@@ -5,6 +5,7 @@
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
+	swarmtypes "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/client"
 	"gotest.tools/poll"
 )
@@ -45,3 +46,27 @@
 		}
 	}
 }
+
+// RunningTasksCount verifies there are `instances` tasks running for `serviceID`
+func RunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		filter := filters.NewArgs()
+		filter.Add("service", serviceID)
+		tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
+			Filters: filter,
+		})
+		switch {
+		case err != nil:
+			return poll.Error(err)
+		case len(tasks) == int(instances):
+			for _, task := range tasks {
+				if task.Status.State != swarmtypes.TaskStateRunning {
+					return poll.Continue("waiting for tasks to enter run state")
+				}
+			}
+			return poll.Success()
+		default:
+			return poll.Continue("task count at %d waiting for %d", len(tasks), instances)
+		}
+	}
+}
diff --git a/integration/network/inspect_test.go b/integration/network/inspect_test.go
index d12ad67..02d2b75 100644
--- a/integration/network/inspect_test.go
+++ b/integration/network/inspect_test.go
@@ -5,9 +5,6 @@
 	"testing"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/api/types/filters"
-	swarmtypes "github.com/docker/docker/api/types/swarm"
-	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration/internal/network"
 	"github.com/docker/docker/integration/internal/swarm"
 	"gotest.tools/assert"
@@ -38,7 +35,7 @@
 		swarm.ServiceWithNetwork(networkName),
 	)
 
-	poll.WaitOn(t, serviceRunningTasksCount(c, serviceID, instances), swarm.ServicePoll)
+	poll.WaitOn(t, swarm.RunningTasksCount(c, serviceID, instances), swarm.ServicePoll)
 
 	tests := []struct {
 		name    string
@@ -103,30 +100,3 @@
 	assert.NilError(t, err)
 	poll.WaitOn(t, network.IsRemoved(ctx, c, overlayID), swarm.NetworkPoll)
 }
-
-func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
-	return func(log poll.LogT) poll.Result {
-		tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
-			Filters: filters.NewArgs(
-				filters.Arg("service", serviceID),
-				filters.Arg("desired-state", string(swarmtypes.TaskStateRunning)),
-			),
-		})
-		switch {
-		case err != nil:
-			return poll.Error(err)
-		case len(tasks) == int(instances):
-			for _, task := range tasks {
-				if task.Status.Err != "" {
-					log.Log("task error:", task.Status.Err)
-				}
-				if task.Status.State != swarmtypes.TaskStateRunning {
-					return poll.Continue("waiting for tasks to enter run state (current status: %s)", task.Status.State)
-				}
-			}
-			return poll.Success()
-		default:
-			return poll.Continue("task count for service %s at %d waiting for %d", serviceID, len(tasks), instances)
-		}
-	}
-}
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
index 6e79bec..91e3274 100644
--- a/integration/service/create_test.go
+++ b/integration/service/create_test.go
@@ -42,18 +42,18 @@
 		booleanFalse := false
 
 		serviceID := swarm.CreateService(t, d)
-		poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
+		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
 		i := inspectServiceContainer(t, client, serviceID)
 		// HostConfig.Init == nil means that it delegates to daemon configuration
 		assert.Check(t, i.HostConfig.Init == nil)
 
 		serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue))
-		poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
+		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
 		i = inspectServiceContainer(t, client, serviceID)
 		assert.Check(t, is.Equal(true, *i.HostConfig.Init))
 
 		serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse))
-		poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
+		poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
 		i = inspectServiceContainer(t, client, serviceID)
 		assert.Check(t, is.Equal(false, *i.HostConfig.Init))
 	}
@@ -97,7 +97,7 @@
 	}
 
 	serviceID := swarm.CreateService(t, d, serviceSpec...)
-	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
+	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
 
 	_, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
 	assert.NilError(t, err)
@@ -108,7 +108,7 @@
 	poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
 
 	serviceID2 := swarm.CreateService(t, d, serviceSpec...)
-	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), swarm.ServicePoll)
+	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll)
 
 	err = client.ServiceRemove(context.Background(), serviceID2)
 	assert.NilError(t, err)
@@ -147,7 +147,7 @@
 		swarm.ServiceWithNetwork(name),
 	)
 
-	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
+	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
 
 	resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
 	assert.NilError(t, err)
@@ -210,7 +210,7 @@
 		}),
 	)
 
-	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
+	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
 
 	filter := filters.NewArgs()
 	filter.Add("service", serviceID)
@@ -274,7 +274,7 @@
 		}),
 	)
 
-	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances))
+	poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
 
 	filter := filters.NewArgs()
 	filter.Add("service", serviceID)
@@ -301,26 +301,3 @@
 	err = client.ConfigRemove(ctx, configName)
 	assert.NilError(t, err)
 }
-
-func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
-	return func(log poll.LogT) poll.Result {
-		filter := filters.NewArgs()
-		filter.Add("service", serviceID)
-		tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
-			Filters: filter,
-		})
-		switch {
-		case err != nil:
-			return poll.Error(err)
-		case len(tasks) == int(instances):
-			for _, task := range tasks {
-				if task.Status.State != swarmtypes.TaskStateRunning {
-					return poll.Continue("waiting for tasks to enter run state")
-				}
-			}
-			return poll.Success()
-		default:
-			return poll.Continue("task count at %d waiting for %d", len(tasks), instances)
-		}
-	}
-}
diff --git a/integration/service/update_test.go b/integration/service/update_test.go
new file mode 100644
index 0000000..8575e56
--- /dev/null
+++ b/integration/service/update_test.go
@@ -0,0 +1,287 @@
+package service // import "github.com/docker/docker/integration/service"
+
+import (
+	"context"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	swarmtypes "github.com/docker/docker/api/types/swarm"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration/internal/network"
+	"github.com/docker/docker/integration/internal/swarm"
+	"gotest.tools/assert"
+	is "gotest.tools/assert/cmp"
+	"gotest.tools/poll"
+	"gotest.tools/skip"
+)
+
+func TestServiceUpdateLabel(t *testing.T) {
+	skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+	defer setupTest(t)()
+	d := swarm.NewSwarm(t, testEnv)
+	defer d.Stop(t)
+	cli := d.NewClientT(t)
+	defer cli.Close()
+
+	ctx := context.Background()
+	serviceName := "TestService_" + t.Name()
+	serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName))
+	service := getService(t, cli, serviceID)
+	assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{}))
+
+	// add label to empty set
+	service.Spec.Labels["foo"] = "bar"
+	_, err := cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
+
+	// add label to non-empty set
+	service.Spec.Labels["foo2"] = "bar"
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar", "foo2": "bar"}))
+
+	delete(service.Spec.Labels, "foo2")
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
+
+	delete(service.Spec.Labels, "foo")
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{}))
+
+	// now make sure we can add again
+	service.Spec.Labels["foo"] = "bar"
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
+
+	err = cli.ServiceRemove(context.Background(), serviceID)
+	assert.NilError(t, err)
+}
+
+func TestServiceUpdateSecrets(t *testing.T) {
+	skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+	defer setupTest(t)()
+	d := swarm.NewSwarm(t, testEnv)
+	defer d.Stop(t)
+	cli := d.NewClientT(t)
+	defer cli.Close()
+
+	ctx := context.Background()
+	secretName := "TestSecret_" + t.Name()
+	secretTarget := "targetName"
+	resp, err := cli.SecretCreate(ctx, swarmtypes.SecretSpec{
+		Annotations: swarmtypes.Annotations{
+			Name: secretName,
+		},
+		Data: []byte("TESTINGDATA"),
+	})
+	assert.NilError(t, err)
+	assert.Check(t, resp.ID != "")
+
+	serviceName := "TestService_" + t.Name()
+	serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName))
+	service := getService(t, cli, serviceID)
+
+	// add secret
+	service.Spec.TaskTemplate.ContainerSpec.Secrets = append(service.Spec.TaskTemplate.ContainerSpec.Secrets,
+		&swarmtypes.SecretReference{
+			File: &swarmtypes.SecretReferenceFileTarget{
+				Name: secretTarget,
+				UID:  "0",
+				GID:  "0",
+				Mode: 0600,
+			},
+			SecretID:   resp.ID,
+			SecretName: secretName,
+		},
+	)
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+
+	service = getService(t, cli, serviceID)
+	secrets := service.Spec.TaskTemplate.ContainerSpec.Secrets
+	assert.Assert(t, is.Equal(1, len(secrets)))
+
+	secret := *secrets[0]
+	assert.Check(t, is.Equal(secretName, secret.SecretName))
+	assert.Check(t, nil != secret.File)
+	assert.Check(t, is.Equal(secretTarget, secret.File.Name))
+
+	// remove
+	service.Spec.TaskTemplate.ContainerSpec.Secrets = []*swarmtypes.SecretReference{}
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.Equal(0, len(service.Spec.TaskTemplate.ContainerSpec.Secrets)))
+
+	err = cli.ServiceRemove(context.Background(), serviceID)
+	assert.NilError(t, err)
+}
+
+func TestServiceUpdateConfigs(t *testing.T) {
+	skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+	defer setupTest(t)()
+	d := swarm.NewSwarm(t, testEnv)
+	defer d.Stop(t)
+	cli := d.NewClientT(t)
+	defer cli.Close()
+
+	ctx := context.Background()
+	configName := "TestConfig_" + t.Name()
+	configTarget := "targetName"
+	resp, err := cli.ConfigCreate(ctx, swarmtypes.ConfigSpec{
+		Annotations: swarmtypes.Annotations{
+			Name: configName,
+		},
+		Data: []byte("TESTINGDATA"),
+	})
+	assert.NilError(t, err)
+	assert.Check(t, resp.ID != "")
+
+	serviceName := "TestService_" + t.Name()
+	serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName))
+	service := getService(t, cli, serviceID)
+
+	// add config
+	service.Spec.TaskTemplate.ContainerSpec.Configs = append(service.Spec.TaskTemplate.ContainerSpec.Configs,
+		&swarmtypes.ConfigReference{
+			File: &swarmtypes.ConfigReferenceFileTarget{
+				Name: configTarget,
+				UID:  "0",
+				GID:  "0",
+				Mode: 0600,
+			},
+			ConfigID:   resp.ID,
+			ConfigName: configName,
+		},
+	)
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+
+	service = getService(t, cli, serviceID)
+	configs := service.Spec.TaskTemplate.ContainerSpec.Configs
+	assert.Assert(t, is.Equal(1, len(configs)))
+
+	config := *configs[0]
+	assert.Check(t, is.Equal(configName, config.ConfigName))
+	assert.Check(t, nil != config.File)
+	assert.Check(t, is.Equal(configTarget, config.File.Name))
+
+	// remove
+	service.Spec.TaskTemplate.ContainerSpec.Configs = []*swarmtypes.ConfigReference{}
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+	service = getService(t, cli, serviceID)
+	assert.Check(t, is.Equal(0, len(service.Spec.TaskTemplate.ContainerSpec.Configs)))
+
+	err = cli.ServiceRemove(context.Background(), serviceID)
+	assert.NilError(t, err)
+}
+
+func TestServiceUpdateNetwork(t *testing.T) {
+	skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+	defer setupTest(t)()
+	d := swarm.NewSwarm(t, testEnv)
+	defer d.Stop(t)
+	cli := d.NewClientT(t)
+	defer cli.Close()
+
+	ctx := context.Background()
+
+	// Create a overlay network
+	testNet := "testNet" + t.Name()
+	overlayID := network.CreateNoError(t, ctx, cli, testNet,
+		network.WithDriver("overlay"))
+
+	var instances uint64 = 1
+	// Create service with the overlay network
+	serviceName := "TestServiceUpdateNetworkRM_" + t.Name()
+	serviceID := swarm.CreateService(t, d,
+		swarm.ServiceWithReplicas(instances),
+		swarm.ServiceWithName(serviceName),
+		swarm.ServiceWithNetwork(testNet))
+
+	poll.WaitOn(t, swarm.RunningTasksCount(cli, serviceID, instances), swarm.ServicePoll)
+	service := getService(t, cli, serviceID)
+	netInfo, err := cli.NetworkInspect(ctx, testNet, types.NetworkInspectOptions{
+		Verbose: true,
+		Scope:   "swarm",
+	})
+	assert.NilError(t, err)
+	assert.Assert(t, len(netInfo.Containers) == 2, "Expected 2 endpoints, one for container and one for LB Sandbox")
+
+	//Remove network from service
+	service.Spec.TaskTemplate.Networks = []swarmtypes.NetworkAttachmentConfig{}
+	_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+	assert.NilError(t, err)
+	poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+
+	netInfo, err = cli.NetworkInspect(ctx, testNet, types.NetworkInspectOptions{
+		Verbose: true,
+		Scope:   "swarm",
+	})
+
+	assert.NilError(t, err)
+	assert.Assert(t, len(netInfo.Containers) == 0, "Load balancing endpoint still exists in network")
+
+	err = cli.NetworkRemove(ctx, overlayID)
+	assert.NilError(t, err)
+
+	err = cli.ServiceRemove(ctx, serviceID)
+	assert.NilError(t, err)
+}
+
+func getService(t *testing.T, cli client.ServiceAPIClient, serviceID string) swarmtypes.Service {
+	t.Helper()
+	service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+	assert.NilError(t, err)
+	return service
+}
+
+func serviceIsUpdated(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		service, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+		switch {
+		case err != nil:
+			return poll.Error(err)
+		case service.UpdateStatus != nil && service.UpdateStatus.State == swarmtypes.UpdateStateCompleted:
+			return poll.Success()
+		default:
+			if service.UpdateStatus != nil {
+				return poll.Continue("waiting for service %s to be updated, state: %s, message: %s", serviceID, service.UpdateStatus.State, service.UpdateStatus.Message)
+			}
+			return poll.Continue("waiting for service %s to be updated", serviceID)
+		}
+	}
+}
+
+func serviceSpecIsUpdated(client client.ServiceAPIClient, serviceID string, serviceOldVersion uint64) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		service, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+		switch {
+		case err != nil:
+			return poll.Error(err)
+		case service.Version.Index > serviceOldVersion:
+			return poll.Success()
+		default:
+			return poll.Continue("waiting for service %s to be updated", serviceID)
+		}
+	}
+}
diff --git a/internal/test/environment/environment.go b/internal/test/environment/environment.go
index 5538d20..76f94a5 100644
--- a/internal/test/environment/environment.go
+++ b/internal/test/environment/environment.go
@@ -75,10 +75,13 @@
 		}
 	case "windows":
 		baseImage := "microsoft/windowsservercore"
-		if override := os.Getenv("WINDOWS_BASE_IMAGE"); override != "" {
-			baseImage = override
-			fmt.Println("INFO: Windows Base image is ", baseImage)
+		if overrideBaseImage := os.Getenv("WINDOWS_BASE_IMAGE"); overrideBaseImage != "" {
+			baseImage = overrideBaseImage
+			if overrideBaseImageTag := os.Getenv("WINDOWS_BASE_IMAGE_TAG"); overrideBaseImageTag != "" {
+				baseImage = baseImage + ":" + overrideBaseImageTag
+			}
 		}
+		fmt.Println("INFO: Windows Base image is ", baseImage)
 		return PlatformDefaults{
 			BaseImage:            baseImage,
 			VolumesConfigPath:    filepath.FromSlash(volumesPath),
diff --git a/pkg/idtools/idtools.go b/pkg/idtools/idtools.go
index 230422e..b3af7a4 100644
--- a/pkg/idtools/idtools.go
+++ b/pkg/idtools/idtools.go
@@ -4,7 +4,6 @@
 	"bufio"
 	"fmt"
 	"os"
-	"sort"
 	"strconv"
 	"strings"
 )
@@ -203,8 +202,6 @@
 func createIDMap(subidRanges ranges) []IDMap {
 	idMap := []IDMap{}
 
-	// sort the ranges by lowest ID first
-	sort.Sort(subidRanges)
 	containerID := 0
 	for _, idrange := range subidRanges {
 		idMap = append(idMap, IDMap{
diff --git a/pkg/idtools/idtools_test.go b/pkg/idtools/idtools_test.go
new file mode 100644
index 0000000..7627d19
--- /dev/null
+++ b/pkg/idtools/idtools_test.go
@@ -0,0 +1,28 @@
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+	"testing"
+
+	"gotest.tools/assert"
+)
+
+func TestCreateIDMapOrder(t *testing.T) {
+	subidRanges := ranges{
+		{100000, 1000},
+		{1000, 1},
+	}
+
+	idMap := createIDMap(subidRanges)
+	assert.DeepEqual(t, idMap, []IDMap{
+		{
+			ContainerID: 0,
+			HostID:      100000,
+			Size:        1000,
+		},
+		{
+			ContainerID: 1000,
+			HostID:      1000,
+			Size:        1,
+		},
+	})
+}
diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go
index fa7d916..5fe071d 100644
--- a/pkg/stringid/stringid.go
+++ b/pkg/stringid/stringid.go
@@ -2,17 +2,12 @@
 package stringid // import "github.com/docker/docker/pkg/stringid"
 
 import (
-	cryptorand "crypto/rand"
+	"crypto/rand"
 	"encoding/hex"
 	"fmt"
-	"io"
-	"math"
-	"math/big"
-	"math/rand"
 	"regexp"
 	"strconv"
 	"strings"
-	"time"
 )
 
 const shortLen = 12
@@ -41,10 +36,11 @@
 	return id
 }
 
-func generateID(r io.Reader) string {
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
 	b := make([]byte, 32)
 	for {
-		if _, err := io.ReadFull(r, b); err != nil {
+		if _, err := rand.Read(b); err != nil {
 			panic(err) // This shouldn't happen
 		}
 		id := hex.EncodeToString(b)
@@ -58,18 +54,6 @@
 	}
 }
 
-// GenerateRandomID returns a unique id.
-func GenerateRandomID() string {
-	return generateID(cryptorand.Reader)
-}
-
-// GenerateNonCryptoID generates unique id without using cryptographically
-// secure sources of random.
-// It helps you to save entropy.
-func GenerateNonCryptoID() string {
-	return generateID(readerFunc(rand.Read))
-}
-
 // ValidateID checks whether an ID string is a valid image ID.
 func ValidateID(id string) error {
 	if ok := validHex.MatchString(id); !ok {
@@ -77,23 +61,3 @@
 	}
 	return nil
 }
-
-func init() {
-	// safely set the seed globally so we generate random ids. Tries to use a
-	// crypto seed before falling back to time.
-	var seed int64
-	if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
-		// This should not happen, but worst-case fallback to time-based seed.
-		seed = time.Now().UnixNano()
-	} else {
-		seed = cryptoseed.Int64()
-	}
-
-	rand.Seed(seed)
-}
-
-type readerFunc func(p []byte) (int, error)
-
-func (fn readerFunc) Read(p []byte) (int, error) {
-	return fn(p)
-}
diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go
index a7ccd5f..2660d2e 100644
--- a/pkg/stringid/stringid_test.go
+++ b/pkg/stringid/stringid_test.go
@@ -13,14 +13,6 @@
 	}
 }
 
-func TestGenerateNonCryptoID(t *testing.T) {
-	id := GenerateNonCryptoID()
-
-	if len(id) != 64 {
-		t.Fatalf("Id returned is incorrect: %s", id)
-	}
-}
-
 func TestShortenId(t *testing.T) {
 	id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2"
 	truncID := TruncateID(id)
diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go
index e259017..6d00a24 100644
--- a/pkg/truncindex/truncindex_test.go
+++ b/pkg/truncindex/truncindex_test.go
@@ -158,7 +158,7 @@
 func BenchmarkTruncIndexAdd100(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -174,7 +174,7 @@
 func BenchmarkTruncIndexAdd250(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -190,7 +190,7 @@
 func BenchmarkTruncIndexAdd500(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -207,7 +207,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	index := NewTruncIndex([]string{})
 	for _, id := range testSet {
@@ -231,7 +231,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	index := NewTruncIndex([]string{})
 	for _, id := range testSet {
@@ -255,7 +255,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	index := NewTruncIndex([]string{})
 	for _, id := range testSet {
@@ -278,7 +278,7 @@
 func BenchmarkTruncIndexDelete100(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -301,7 +301,7 @@
 func BenchmarkTruncIndexDelete250(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -324,7 +324,7 @@
 func BenchmarkTruncIndexDelete500(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -347,7 +347,7 @@
 func BenchmarkTruncIndexNew100(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 100; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -358,7 +358,7 @@
 func BenchmarkTruncIndexNew250(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 250; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -369,7 +369,7 @@
 func BenchmarkTruncIndexNew500(b *testing.B) {
 	var testSet []string
 	for i := 0; i < 500; i++ {
-		testSet = append(testSet, stringid.GenerateNonCryptoID())
+		testSet = append(testSet, stringid.GenerateRandomID())
 	}
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
@@ -381,7 +381,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		id := stringid.GenerateNonCryptoID()
+		id := stringid.GenerateRandomID()
 		testSet = append(testSet, id)
 		l := rand.Intn(12) + 12
 		testKeys = append(testKeys, id[:l])
@@ -406,7 +406,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		id := stringid.GenerateNonCryptoID()
+		id := stringid.GenerateRandomID()
 		testSet = append(testSet, id)
 		l := rand.Intn(12) + 12
 		testKeys = append(testKeys, id[:l])
@@ -431,7 +431,7 @@
 	var testSet []string
 	var testKeys []string
 	for i := 0; i < 500; i++ {
-		id := stringid.GenerateNonCryptoID()
+		id := stringid.GenerateRandomID()
 		testSet = append(testSet, id)
 		l := rand.Intn(12) + 12
 		testKeys = append(testKeys, id[:l])
diff --git a/plugin/manager_linux_test.go b/plugin/manager_linux_test.go
index fd8fa85..1b6a3bf 100644
--- a/plugin/manager_linux_test.go
+++ b/plugin/manager_linux_test.go
@@ -70,7 +70,7 @@
 }
 
 func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
-	id := stringid.GenerateNonCryptoID()
+	id := stringid.GenerateRandomID()
 	rootfs := filepath.Join(root, id)
 	if err := os.MkdirAll(rootfs, 0755); err != nil {
 		t.Fatal(err)
diff --git a/vendor.conf b/vendor.conf
index fac0ede..c508c5f 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -26,7 +26,7 @@
 golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
 
 # buildkit
-github.com/moby/buildkit ed4da8b4a9661f278ae8433056ca37d0727c408b # docker-18.09 branch
+github.com/moby/buildkit 05766c5c21a1e528eeb1c3522b2f05493fe9ac47 # docker-18.09 branch
 github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca
 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
@@ -121,7 +121,7 @@
 github.com/containerd/containerd 9754871865f7fe2f4e74d43e2fc7ccd237edcbce # v1.2.2
 github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
 github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
-github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
+github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
 github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
 github.com/containerd/cri 0d5cabd006cb5319dc965046067b8432d9fa5ef8 # release/1.2 branch
 github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go
index 9fbea82..e3ef076 100644
--- a/vendor/github.com/containerd/cgroups/cgroup.go
+++ b/vendor/github.com/containerd/cgroups/cgroup.go
@@ -105,6 +105,10 @@
 		}
 		activeSubsystems = append(activeSubsystems, s)
 	}
+	// if we do not have any active systems then the cgroup is deleted
+	if len(activeSubsystems) == 0 {
+		return nil, ErrCgroupDeleted
+	}
 	return &cgroup{
 		path:       path,
 		subsystems: activeSubsystems,
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
index 0453f3a..262a768 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
@@ -275,6 +275,11 @@
 	if len(warnings) > 0 {
 		warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
 	}
+
+	if root.StartLine < 0 {
+		return nil, errors.New("file with no instructions.")
+	}
+
 	return &Result{
 		AST:         root,
 		Warnings:    warnings,
diff --git a/volume/mounts/linux_parser.go b/volume/mounts/linux_parser.go
index 8e436ae..22ba747 100644
--- a/volume/mounts/linux_parser.go
+++ b/volume/mounts/linux_parser.go
@@ -82,7 +82,10 @@
 		}
 
 		if validateBindSourceExists {
-			exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source)
+			exists, _, err := currentFileInfoProvider.fileInfo(mnt.Source)
+			if err != nil {
+				return &errMountConfig{mnt, err}
+			}
 			if !exists {
 				return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)}
 			}
@@ -292,7 +295,7 @@
 	switch cfg.Type {
 	case mount.TypeVolume:
 		if cfg.Source == "" {
-			mp.Name = stringid.GenerateNonCryptoID()
+			mp.Name = stringid.GenerateRandomID()
 		} else {
 			mp.Name = cfg.Source
 		}
diff --git a/volume/mounts/mounts.go b/volume/mounts/mounts.go
index 63a1406..5bf169f 100644
--- a/volume/mounts/mounts.go
+++ b/volume/mounts/mounts.go
@@ -125,7 +125,7 @@
 	if m.Volume != nil {
 		id := m.ID
 		if id == "" {
-			id = stringid.GenerateNonCryptoID()
+			id = stringid.GenerateRandomID()
 		}
 		path, err := m.Volume.Mount(id)
 		if err != nil {
diff --git a/volume/mounts/parser_test.go b/volume/mounts/parser_test.go
index 27257d6..f9b32e5 100644
--- a/volume/mounts/parser_test.go
+++ b/volume/mounts/parser_test.go
@@ -1,6 +1,7 @@
 package mounts // import "github.com/docker/docker/volume/mounts"
 
 import (
+	"errors"
 	"io/ioutil"
 	"os"
 	"runtime"
@@ -8,6 +9,8 @@
 	"testing"
 
 	"github.com/docker/docker/api/types/mount"
+	"gotest.tools/assert"
+	"gotest.tools/assert/cmp"
 )
 
 type parseMountRawTestSet struct {
@@ -477,4 +480,51 @@
 			t.Errorf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData)
 		}
 	}
+
+}
+
+// always returns the configured error
+// this is used to test error handling
+type mockFiProviderWithError struct{ err error }
+
+func (m mockFiProviderWithError) fileInfo(path string) (bool, bool, error) {
+	return false, false, m.err
+}
+
+// TestParseMountSpecBindWithFileinfoError makes sure that the parser returns
+// the error produced by the fileinfo provider.
+//
+// Some extra context for the future in case of changes and possible wtf are we
+// testing this for:
+//
+// Currently this "fileInfoProvider" returns (bool, bool, error)
+// The 1st bool is "does this path exist"
+// The 2nd bool is "is this path a dir"
+// Then of course the error is an error.
+//
+// The issue is the parser was ignoring the error and only looking at the
+// "does this path exist" boolean, which is always false if there is an error.
+// Then the error returned to the caller was a (slightly, maybe) friendlier
+// error string than what comes from `os.Stat`
+// So ...the caller was always getting an error saying the path doesn't exist
+// even if it does exist but got some other error (like a permission error).
+// This is confusing to users.
+func TestParseMountSpecBindWithFileinfoError(t *testing.T) {
+	previousProvider := currentFileInfoProvider
+	defer func() { currentFileInfoProvider = previousProvider }()
+
+	testErr := errors.New("some crazy error")
+	currentFileInfoProvider = &mockFiProviderWithError{err: testErr}
+
+	p := "/bananas"
+	if runtime.GOOS == "windows" {
+		p = `c:\bananas`
+	}
+	m := mount.Mount{Type: mount.TypeBind, Source: p, Target: p}
+
+	parser := NewParser(runtime.GOOS)
+
+	_, err := parser.ParseMountSpec(m)
+	assert.Assert(t, err != nil)
+	assert.Assert(t, cmp.Contains(err.Error(), "some crazy error"))
 }
diff --git a/volume/mounts/windows_parser.go b/volume/mounts/windows_parser.go
index ac61044..8f427d8 100644
--- a/volume/mounts/windows_parser.go
+++ b/volume/mounts/windows_parser.go
@@ -385,7 +385,7 @@
 	switch cfg.Type {
 	case mount.TypeVolume:
 		if cfg.Source == "" {
-			mp.Name = stringid.GenerateNonCryptoID()
+			mp.Name = stringid.GenerateRandomID()
 		} else {
 			mp.Name = cfg.Source
 		}
diff --git a/volume/service/service.go b/volume/service/service.go
index ebb5e20..f1fe5e7 100644
--- a/volume/service/service.go
+++ b/volume/service/service.go
@@ -56,7 +56,7 @@
 // Create creates a volume
 func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) {
 	if name == "" {
-		name = stringid.GenerateNonCryptoID()
+		name = stringid.GenerateRandomID()
 	}
 	v, err := s.vs.Create(ctx, name, driverName, opts...)
 	if err != nil {