Merge pull request #203 from thaJeztah/18.09_backport_gcr_workaround
[18.09 backport] builder: add workaround for gcr auth issue
diff --git a/Dockerfile b/Dockerfile
index b76ae60..15ccf0f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -77,7 +77,7 @@
FROM base AS docker-py
# Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT 8b246db271a85d6541dc458838627e89c683e42f
+ENV DOCKER_PY_COMMIT ac922192959870774ad8428344d9faa0555f7ba6
RUN git clone https://github.com/docker/docker-py.git /build \
&& cd /build \
&& git checkout -q $DOCKER_PY_COMMIT
@@ -187,6 +187,9 @@
jq \
libcap2-bin \
libdevmapper-dev \
+# libffi-dev and libssl-dev appear to be required for compiling paramiko on s390x/ppc64le
+ libffi-dev \
+ libssl-dev \
libudev-dev \
libsystemd-dev \
binutils-mingw-w64 \
@@ -195,6 +198,8 @@
pigz \
python-backports.ssl-match-hostname \
python-dev \
+# python-cffi appears to be required for compiling paramiko on s390x/ppc64le
+ python-cffi \
python-mock \
python-pip \
python-requests \
@@ -227,7 +232,8 @@
# split out into a separate image, including all the `python-*` deps installed
# above.
RUN cd /docker-py \
- && pip install docker-pycreds==0.2.1 \
+ && pip install docker-pycreds==0.4.0 \
+ && pip install paramiko==2.4.2 \
&& pip install yamllint==1.5.0 \
&& pip install -r test-requirements.txt
diff --git a/api/swagger.yaml b/api/swagger.yaml
index ca9d29e..1cf310d 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -1082,6 +1082,7 @@
type: "object"
additionalProperties:
type: "array"
+ x-nullable: true
items:
$ref: "#/definitions/PortBinding"
example:
@@ -1106,7 +1107,6 @@
PortBinding represents a binding between a host IP address and a host
port.
type: "object"
- x-nullable: true
properties:
HostIp:
description: "Host IP address that the container's port is mapped to."
@@ -5351,7 +5351,7 @@
/containers/{id}/resize:
post:
summary: "Resize a container TTY"
- description: "Resize the TTY for a container. You must restart the container for the resize to take effect."
+ description: "Resize the TTY for a container."
operationId: "ContainerResize"
consumes:
- "application/octet-stream"
@@ -6105,12 +6105,17 @@
in: "query"
description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa."
type: "string"
+ - name: "copyUIDGID"
+ in: "query"
+ description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir"
+ type: "string"
- name: "inputStream"
in: "body"
required: true
description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
schema:
type: "string"
+ format: "binary"
tags: ["Container"]
/containers/prune:
post:
@@ -8932,7 +8937,9 @@
type: "string"
RemoteAddrs:
description: "Addresses of manager nodes already participating in the swarm."
- type: "string"
+ type: "array"
+ items:
+ type: "string"
JoinToken:
description: "Secret token for joining this swarm."
type: "string"
diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go
index c3fa916..dfd5434 100644
--- a/builder/builder-next/adapters/containerimage/pull.go
+++ b/builder/builder-next/adapters/containerimage/pull.go
@@ -527,10 +527,10 @@
r := image.NewRootFS()
rootFS, release, err := p.is.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, pkgprogress.ChanOutput(pchan))
+ stopProgress()
if err != nil {
return nil, err
}
- stopProgress()
ref, err := p.is.CacheAccessor.GetFromSnapshotter(ctx, string(rootFS.ChainID()), cache.WithDescription(fmt.Sprintf("pulled from %s", p.ref)))
release()
diff --git a/builder/remotecontext/detect.go b/builder/remotecontext/detect.go
index 144eb57..1becd0f 100644
--- a/builder/remotecontext/detect.go
+++ b/builder/remotecontext/detect.go
@@ -12,6 +12,7 @@
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerignore"
+ "github.com/docker/docker/errdefs"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/urlutil"
"github.com/moby/buildkit/frontend/dockerfile/parser"
@@ -34,8 +35,9 @@
case remoteURL == ClientSessionRemote:
res, err := parser.Parse(config.Source)
if err != nil {
- return nil, nil, err
+ return nil, nil, errdefs.InvalidParameter(err)
}
+
return nil, res, nil
case urlutil.IsGitURL(remoteURL):
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
@@ -106,7 +108,7 @@
switch contentType {
case mimeTypes.TextPlain:
res, err := parser.Parse(progressReader(content))
- return nil, res, err
+ return nil, res, errdefs.InvalidParameter(err)
default:
source, err := FromArchive(progressReader(content))
if err != nil {
@@ -146,11 +148,17 @@
br := bufio.NewReader(rc)
if _, err := br.Peek(1); err != nil {
if err == io.EOF {
- return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
+ return nil, errdefs.InvalidParameter(errors.Errorf("the Dockerfile (%s) cannot be empty", name))
}
return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
}
- return parser.Parse(br)
+
+ dockerfile, err := parser.Parse(br)
+ if err != nil {
+ return nil, errdefs.InvalidParameter(errors.Wrapf(err, "failed to parse %s", name))
+ }
+
+ return dockerfile, nil
}
func openAt(remote builder.Source, path string) (driver.File, error) {
diff --git a/container/container_unix.go b/container/container_unix.go
index 1a18455..8419bd5 100644
--- a/container/container_unix.go
+++ b/container/container_unix.go
@@ -136,7 +136,7 @@
return err
}
- id := stringid.GenerateNonCryptoID()
+ id := stringid.GenerateRandomID()
path, err := v.Mount(id)
if err != nil {
return err
@@ -175,8 +175,8 @@
return false
}
-// UnmountIpcMount uses the provided unmount function to unmount shm if it was mounted
-func (container *Container) UnmountIpcMount(unmount func(pth string) error) error {
+// UnmountIpcMount unmounts shm if it was mounted
+func (container *Container) UnmountIpcMount() error {
if container.HasMountFor("/dev/shm") {
return nil
}
@@ -190,10 +190,8 @@
if shmPath == "" {
return nil
}
- if err = unmount(shmPath); err != nil && !os.IsNotExist(err) {
- if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil {
- return errors.Wrapf(err, "umount %s", shmPath)
- }
+ if err = mount.Unmount(shmPath); err != nil && !os.IsNotExist(err) {
+ return err
}
return nil
}
@@ -383,7 +381,8 @@
for _, mountPath := range mountPaths {
if err := mount.Unmount(mountPath); err != nil {
- logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err)
+ logrus.WithError(err).WithField("container", container.ID).
+ Warn("Unable to unmount")
}
}
return container.UnmountVolumes(volumeEventLog)
diff --git a/container/container_windows.go b/container/container_windows.go
index b5bdb5b..090db12 100644
--- a/container/container_windows.go
+++ b/container/container_windows.go
@@ -22,7 +22,7 @@
// UnmountIpcMount unmounts Ipc related mounts.
// This is a NOOP on windows.
-func (container *Container) UnmountIpcMount(unmount func(pth string) error) error {
+func (container *Container) UnmountIpcMount() error {
return nil
}
diff --git a/daemon/archive.go b/daemon/archive.go
index 9c7971b..109376b 100644
--- a/daemon/archive.go
+++ b/daemon/archive.go
@@ -31,18 +31,19 @@
}
// helper functions to extract or archive
-func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
+func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions, root string) error {
if ea, ok := i.(extractor); ok {
return ea.ExtractArchive(src, dst, opts)
}
- return chrootarchive.Untar(src, dst, opts)
+
+ return chrootarchive.UntarWithRoot(src, dst, opts, root)
}
-func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
+func archivePath(i interface{}, src string, opts *archive.TarOptions, root string) (io.ReadCloser, error) {
if ap, ok := i.(archiver); ok {
return ap.ArchivePath(src, opts)
}
- return archive.TarWithOptions(src, opts)
+ return chrootarchive.Tar(src, opts, root)
}
// ContainerCopy performs a deprecated operation of archiving the resource at
@@ -238,7 +239,7 @@
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
- data, err := archivePath(driver, sourceDir, opts)
+ data, err := archivePath(driver, sourceDir, opts, container.BaseFS.Path())
if err != nil {
return nil, nil, err
}
@@ -367,7 +368,7 @@
}
}
- if err := extractArchive(driver, content, resolvedPath, options); err != nil {
+ if err := extractArchive(driver, content, resolvedPath, options, container.BaseFS.Path()); err != nil {
return err
}
@@ -432,7 +433,7 @@
archive, err := archivePath(driver, basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
- })
+ }, container.BaseFS.Path())
if err != nil {
return nil, err
}
diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go
index 77d21d2..6311fc8 100644
--- a/daemon/cluster/executor/container/container.go
+++ b/daemon/cluster/executor/container/container.go
@@ -6,7 +6,6 @@
"net"
"strconv"
"strings"
- "time"
"github.com/sirupsen/logrus"
@@ -31,10 +30,6 @@
)
const (
- // Explicitly use the kernel's default setting for CPU quota of 100ms.
- // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
- cpuQuotaPeriod = 100 * time.Millisecond
-
// systemLabelPrefix represents the reserved namespace for system labels.
systemLabelPrefix = "com.docker.swarm"
)
@@ -448,9 +443,7 @@
}
if r.Limits.NanoCPUs > 0 {
- // CPU Period must be set in microseconds.
- resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond)
- resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9
+ resources.NanoCPUs = r.Limits.NanoCPUs
}
return resources
diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go
index 8d07079..466bca2 100644
--- a/daemon/cluster/executor/container/controller.go
+++ b/daemon/cluster/executor/container/controller.go
@@ -369,11 +369,17 @@
}
if err := r.adapter.shutdown(ctx); err != nil {
- if isUnknownContainer(err) || isStoppedContainer(err) {
- return nil
+ if !(isUnknownContainer(err) || isStoppedContainer(err)) {
+ return err
}
+ }
- return err
+ // Try removing networks referenced in this task in case this
+ // task is the last one referencing it
+ if err := r.adapter.removeNetworks(ctx); err != nil {
+ if !isUnknownContainer(err) {
+ return err
+ }
}
return nil
@@ -419,15 +425,6 @@
log.G(ctx).WithError(err).Debug("shutdown failed on removal")
}
- // Try removing networks referenced in this task in case this
- // task is the last one referencing it
- if err := r.adapter.removeNetworks(ctx); err != nil {
- if isUnknownContainer(err) {
- return nil
- }
- return err
- }
-
if err := r.adapter.remove(ctx); err != nil {
if isUnknownContainer(err) {
return nil
diff --git a/daemon/cluster/nodes.go b/daemon/cluster/nodes.go
index 3c073b0..dffd755 100644
--- a/daemon/cluster/nodes.go
+++ b/daemon/cluster/nodes.go
@@ -8,6 +8,7 @@
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/errdefs"
swarmapi "github.com/docker/swarmkit/api"
+ "google.golang.org/grpc"
)
// GetNodes returns a list of all nodes known to a cluster.
@@ -30,7 +31,9 @@
r, err := state.controlClient.ListNodes(
ctx,
- &swarmapi.ListNodesRequest{Filters: filters})
+ &swarmapi.ListNodesRequest{Filters: filters},
+ grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
+ )
if err != nil {
return nil, err
}
diff --git a/daemon/cluster/secrets.go b/daemon/cluster/secrets.go
index c6fd842..6f652eb 100644
--- a/daemon/cluster/secrets.go
+++ b/daemon/cluster/secrets.go
@@ -7,6 +7,7 @@
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
+ "google.golang.org/grpc"
)
// GetSecret returns a secret from a managed swarm cluster
@@ -44,7 +45,9 @@
defer cancel()
r, err := state.controlClient.ListSecrets(ctx,
- &swarmapi.ListSecretsRequest{Filters: filters})
+ &swarmapi.ListSecretsRequest{Filters: filters},
+ grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
+ )
if err != nil {
return nil, err
}
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index c0aab72..03b01b8 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -351,10 +351,6 @@
return nil
}
-func detachMounted(path string) error {
- return unix.Unmount(path, unix.MNT_DETACH)
-}
-
func isLinkable(child *container.Container) bool {
// A container is linkable only if it belongs to the default network
_, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()]
diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go
index 349d3a1..10bfd53 100644
--- a/daemon/container_operations_windows.go
+++ b/daemon/container_operations_windows.go
@@ -78,10 +78,6 @@
return nil
}
-func detachMounted(path string) error {
- return nil
-}
-
func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) {
if len(c.SecretReferences) == 0 {
return nil
diff --git a/daemon/create_unix.go b/daemon/create_unix.go
index 13857ba..e78415a 100644
--- a/daemon/create_unix.go
+++ b/daemon/create_unix.go
@@ -41,7 +41,7 @@
}
for spec := range config.Volumes {
- name := stringid.GenerateNonCryptoID()
+ name := stringid.GenerateRandomID()
destination := filepath.Clean(spec)
// Skip volumes for which we already have something mounted on that
diff --git a/daemon/create_windows.go b/daemon/create_windows.go
index 37e425a..3bce278 100644
--- a/daemon/create_windows.go
+++ b/daemon/create_windows.go
@@ -38,7 +38,7 @@
// If the mountpoint doesn't have a name, generate one.
if len(mp.Name) == 0 {
- mp.Name = stringid.GenerateNonCryptoID()
+ mp.Name = stringid.GenerateRandomID()
}
// Skip volumes for which we already have something mounted on that
diff --git a/daemon/daemon.go b/daemon/daemon.go
index a307863..b2c02a9 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -11,6 +11,7 @@
"io/ioutil"
"math/rand"
"net"
+ "net/url"
"os"
"path"
"path/filepath"
@@ -157,15 +158,18 @@
)
// must trim "https://" or "http://" prefix
for i, v := range daemon.configStore.Mirrors {
- v = strings.TrimPrefix(v, "https://")
- v = strings.TrimPrefix(v, "http://")
+ if uri, err := url.Parse(v); err == nil {
+ v = uri.Host
+ }
mirrors[i] = v
}
// set "registry-mirrors"
m[registryKey] = resolver.RegistryConf{Mirrors: mirrors}
// set "insecure-registries"
for _, v := range daemon.configStore.InsecureRegistries {
- v = strings.TrimPrefix(v, "http://")
+ if uri, err := url.Parse(v); err == nil {
+ v = uri.Host
+ }
m[v] = resolver.RegistryConf{
PlainHTTP: true,
}
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index 5234201..8b59b52 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -174,8 +174,8 @@
}
weight := weightDevice.Weight
d := specs.LinuxWeightDevice{Weight: &weight}
- d.Major = int64(stat.Rdev / 256)
- d.Minor = int64(stat.Rdev % 256)
+ d.Major = int64(unix.Major(stat.Rdev))
+ d.Minor = int64(unix.Minor(stat.Rdev))
blkioWeightDevices = append(blkioWeightDevices, d)
}
@@ -245,8 +245,8 @@
return nil, err
}
d := specs.LinuxThrottleDevice{Rate: d.Rate}
- d.Major = int64(stat.Rdev / 256)
- d.Minor = int64(stat.Rdev % 256)
+ d.Major = int64(unix.Major(stat.Rdev))
+ d.Minor = int64(unix.Minor(stat.Rdev))
throttleDevices = append(throttleDevices, d)
}
diff --git a/daemon/daemon_unix_test.go b/daemon/daemon_unix_test.go
index 36c6030..9ea2560 100644
--- a/daemon/daemon_unix_test.go
+++ b/daemon/daemon_unix_test.go
@@ -6,11 +6,16 @@
"errors"
"io/ioutil"
"os"
+ "path/filepath"
"testing"
+ "github.com/docker/docker/api/types/blkiodev"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
+ "golang.org/x/sys/unix"
+ "gotest.tools/assert"
+ is "gotest.tools/assert/cmp"
)
type fakeContainerGetter struct {
@@ -266,3 +271,61 @@
t.Fatal("Expected networkOptions error, got nil")
}
}
+
+const (
+ // prepare major 0x1FD(509 in decimal) and minor 0x130(304)
+ DEVNO = 0x11FD30
+ MAJOR = 509
+ MINOR = 304
+ WEIGHT = 1024
+)
+
+func deviceTypeMock(t *testing.T, testAndCheck func(string)) {
+ if os.Getuid() != 0 {
+ t.Skip("root required") // for mknod
+ }
+
+ t.Parallel()
+
+ tempDir, err := ioutil.TempDir("", "tempDevDir"+t.Name())
+ assert.NilError(t, err, "create temp file")
+ tempFile := filepath.Join(tempDir, "dev")
+
+ defer os.RemoveAll(tempDir)
+
+ if err = unix.Mknod(tempFile, unix.S_IFCHR, DEVNO); err != nil {
+ t.Fatalf("mknod error %s(%x): %v", tempFile, DEVNO, err)
+ }
+
+ testAndCheck(tempFile)
+}
+
+func TestGetBlkioWeightDevices(t *testing.T) {
+ deviceTypeMock(t, func(tempFile string) {
+ mockResource := containertypes.Resources{
+ BlkioWeightDevice: []*blkiodev.WeightDevice{{Path: tempFile, Weight: WEIGHT}},
+ }
+
+ weightDevs, err := getBlkioWeightDevices(mockResource)
+
+ assert.NilError(t, err, "getBlkioWeightDevices")
+ assert.Check(t, is.Len(weightDevs, 1), "getBlkioWeightDevices")
+ assert.Check(t, weightDevs[0].Major == MAJOR, "get major device type")
+ assert.Check(t, weightDevs[0].Minor == MINOR, "get minor device type")
+ assert.Check(t, *weightDevs[0].Weight == WEIGHT, "get device weight")
+ })
+}
+
+func TestGetBlkioThrottleDevices(t *testing.T) {
+ deviceTypeMock(t, func(tempFile string) {
+ mockDevs := []*blkiodev.ThrottleDevice{{Path: tempFile, Rate: WEIGHT}}
+
+ retDevs, err := getBlkioThrottleDevices(mockDevs)
+
+ assert.NilError(t, err, "getBlkioThrottleDevices")
+ assert.Check(t, is.Len(retDevs, 1), "getBlkioThrottleDevices")
+ assert.Check(t, retDevs[0].Major == MAJOR, "get major device type")
+ assert.Check(t, retDevs[0].Minor == MINOR, "get minor device type")
+ assert.Check(t, retDevs[0].Rate == WEIGHT, "get device rate")
+ })
+}
diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go
index c036c46..cf2a955 100644
--- a/daemon/exec/exec.go
+++ b/daemon/exec/exec.go
@@ -39,7 +39,7 @@
// NewConfig initializes the a new exec configuration
func NewConfig() *Config {
return &Config{
- ID: stringid.GenerateNonCryptoID(),
+ ID: stringid.GenerateRandomID(),
StreamConfig: stream.NewConfig(),
Started: make(chan struct{}),
}
diff --git a/daemon/export.go b/daemon/export.go
index 27bc359..01593f4 100644
--- a/daemon/export.go
+++ b/daemon/export.go
@@ -70,7 +70,7 @@
Compression: archive.Uncompressed,
UIDMaps: daemon.idMapping.UIDs(),
GIDMaps: daemon.idMapping.GIDs(),
- })
+ }, basefs.Path())
if err != nil {
rwlayer.Unmount()
return nil, err
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index 114aa9a..bbd19a8 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -43,7 +43,7 @@
"github.com/docker/docker/pkg/directory"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/locker"
- mountpk "github.com/docker/docker/pkg/mount"
+ "github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -72,7 +72,6 @@
// Driver contains information about the filesystem mounted.
type Driver struct {
- sync.Mutex
root string
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
@@ -81,6 +80,7 @@
pathCache map[string]string
naiveDiff graphdriver.DiffDriver
locker *locker.Locker
+ mntL sync.Mutex
}
// Init returns a new AUFS driver.
@@ -327,11 +327,11 @@
break
}
- if err != unix.EBUSY {
- return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
+ if errors.Cause(err) != unix.EBUSY {
+ return errors.Wrap(err, "aufs: unmount error")
}
if retries >= 5 {
- return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
+ return errors.Wrap(err, "aufs: unmount error after retries")
}
// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
retries++
@@ -437,7 +437,7 @@
err := a.unmount(m)
if err != nil {
- logger.Debugf("Failed to unmount %s aufs: %v", id, err)
+ logger.WithError(err).WithField("method", "Put()").Warn()
}
return err
}
@@ -547,9 +547,6 @@
}
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
- a.Lock()
- defer a.Unlock()
-
// If the id is mounted or we get an error return
if mounted, err := a.mounted(target); err != nil || mounted {
return err
@@ -564,9 +561,6 @@
}
func (a *Driver) unmount(mountPath string) error {
- a.Lock()
- defer a.Unlock()
-
if mounted, err := a.mounted(mountPath); err != nil || !mounted {
return err
}
@@ -579,32 +573,29 @@
// Cleanup aufs and unmount all mountpoints
func (a *Driver) Cleanup() error {
- var dirs []string
- if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if !info.IsDir() {
- return nil
- }
- dirs = append(dirs, path)
- return nil
- }); err != nil {
- return err
+ dir := a.mntPath()
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return errors.Wrap(err, "aufs readdir error")
}
+ for _, f := range files {
+ if !f.IsDir() {
+ continue
+ }
- for _, m := range dirs {
+ m := path.Join(dir, f.Name())
+
if err := a.unmount(m); err != nil {
- logger.Debugf("error unmounting %s: %s", m, err)
+ logger.WithError(err).WithField("method", "Cleanup()").Warn()
}
}
- return mountpk.RecursiveUnmount(a.root)
+ return mount.RecursiveUnmount(a.root)
}
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
defer func() {
if err != nil {
- Unmount(target)
+ mount.Unmount(target)
}
}()
@@ -632,14 +623,29 @@
opts += ",dirperm1"
}
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
- if err = mount("none", target, "aufs", 0, data); err != nil {
+ a.mntL.Lock()
+ err = unix.Mount("none", target, "aufs", 0, data)
+ a.mntL.Unlock()
+ if err != nil {
+ err = errors.Wrap(err, "mount target="+target+" data="+data)
return
}
- for ; index < len(ro); index++ {
- layer := fmt.Sprintf(":%s=ro+wh", ro[index])
- data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
- if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
+ for index < len(ro) {
+ bp = 0
+ for ; index < len(ro); index++ {
+ layer := fmt.Sprintf("append:%s=ro+wh,", ro[index])
+ if bp+len(layer) > len(b) {
+ break
+ }
+ bp += copy(b[bp:], layer)
+ }
+ data := label.FormatMountLabel(string(b[:bp]), mountLabel)
+ a.mntL.Lock()
+ err = unix.Mount("none", target, "aufs", unix.MS_REMOUNT, data)
+ a.mntL.Unlock()
+ if err != nil {
+ err = errors.Wrap(err, "mount target="+target+" flags=MS_REMOUNT data="+data)
return
}
}
@@ -666,7 +672,7 @@
defer os.RemoveAll(union)
opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base)
- if err := mount("none", union, "aufs", 0, opts); err != nil {
+ if err := unix.Mount("none", union, "aufs", 0, opts); err != nil {
return
}
enableDirperm = true
diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go
index fdc502b..0752c84 100644
--- a/daemon/graphdriver/aufs/aufs_test.go
+++ b/daemon/graphdriver/aufs/aufs_test.go
@@ -731,7 +731,7 @@
// create a bunch of ids
var ids []string
for i := 0; i < numConcurrent; i++ {
- ids = append(ids, stringid.GenerateNonCryptoID())
+ ids = append(ids, stringid.GenerateRandomID())
}
if err := d.Create(ids[0], "", nil); err != nil {
diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go
index 9f55103..fc20a5e 100644
--- a/daemon/graphdriver/aufs/mount.go
+++ b/daemon/graphdriver/aufs/mount.go
@@ -4,14 +4,38 @@
import (
"os/exec"
+ "syscall"
- "golang.org/x/sys/unix"
+ "github.com/docker/docker/pkg/mount"
)
// Unmount the target specified.
func Unmount(target string) error {
- if err := exec.Command("auplink", target, "flush").Run(); err != nil {
- logger.WithError(err).Warnf("Couldn't run auplink before unmount %s", target)
+ const (
+ EINVAL = 22 // if auplink returns this,
+ retries = 3 // retry a few times
+ )
+
+ for i := 0; ; i++ {
+ out, err := exec.Command("auplink", target, "flush").CombinedOutput()
+ if err == nil {
+ break
+ }
+ rc := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ rc = status.ExitStatus()
+ }
+ }
+ if i >= retries || rc != EINVAL {
+ logger.WithError(err).WithField("method", "Unmount").Warnf("auplink flush failed: %s", out)
+ break
+ }
+ // auplink failed to find target in /proc/self/mounts because
+ // kernel can't guarantee continuity while reading from it
+ // while mounts table is being changed
+ logger.Debugf("auplink flush error (retrying %d/%d): %s", i+1, retries, out)
}
- return unix.Unmount(target, 0)
+
+ return mount.Unmount(target)
}
diff --git a/daemon/graphdriver/aufs/mount_linux.go b/daemon/graphdriver/aufs/mount_linux.go
deleted file mode 100644
index 8d5ad8f..0000000
--- a/daemon/graphdriver/aufs/mount_linux.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs"
-
-import "golang.org/x/sys/unix"
-
-func mount(source string, target string, fstype string, flags uintptr, data string) error {
- return unix.Mount(source, target, fstype, flags, data)
-}
diff --git a/daemon/graphdriver/aufs/mount_unsupported.go b/daemon/graphdriver/aufs/mount_unsupported.go
deleted file mode 100644
index cf7f58c..0000000
--- a/daemon/graphdriver/aufs/mount_unsupported.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !linux
-
-package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs"
-
-import "errors"
-
-// MsRemount declared to specify a non-linux system mount.
-const MsRemount = 0
-
-func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
- return errors.New("mount is not implemented on this platform")
-}
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index 7ce7ede..fcaedc6 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -178,7 +178,7 @@
}
if umountErr != nil {
- return errors.Wrapf(umountErr, "error unmounting %s", d.home)
+ return umountErr
}
return nil
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 5dc01d7..c41b50c 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -1200,7 +1200,7 @@
options = joinMountOptions(options, devices.mountOptions)
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
- return fmt.Errorf("Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options, err, string(dmesg.Dmesg(256)))
+ return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
}
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
@@ -2381,7 +2381,7 @@
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
- return fmt.Errorf("devmapper: Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), path, fstype, options, err, string(dmesg.Dmesg(256)))
+ return errors.Wrapf(err, "Failed to mount; dmesg: %s", string(dmesg.Dmesg(256)))
}
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index 899b1f8..a42a03b 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -16,7 +16,6 @@
"github.com/docker/docker/pkg/locker"
"github.com/docker/docker/pkg/mount"
"github.com/docker/go-units"
- "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -129,11 +128,7 @@
return err
}
- if umountErr != nil {
- return errors.Wrapf(umountErr, "error unmounting %s", d.home)
- }
-
- return nil
+ return umountErr
}
// CreateReadWrite creates a layer that is writable for use as a container
diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go
index 52b0c6d..699e434 100644
--- a/daemon/graphdriver/windows/windows.go
+++ b/daemon/graphdriver/windows/windows.go
@@ -338,11 +338,14 @@
// If permission denied, it's possible that the scratch is still mounted, an
// artifact after a hard daemon crash for example. Worth a shot to try detaching it
// before retrying the rename.
- if detachErr := vhd.DetachVhd(filepath.Join(layerPath, "sandbox.vhdx")); detachErr != nil {
- return errors.Wrapf(err, "failed to detach VHD: %s", detachErr)
- }
- if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) {
- return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr)
+ sandbox := filepath.Join(layerPath, "sandbox.vhdx")
+ if _, statErr := os.Stat(sandbox); statErr == nil {
+ if detachErr := vhd.DetachVhd(sandbox); detachErr != nil {
+ return errors.Wrapf(err, "failed to detach VHD: %s", detachErr)
+ }
+ if renameErr := os.Rename(layerPath, tmpLayerPath); renameErr != nil && !os.IsNotExist(renameErr) {
+ return errors.Wrapf(err, "second rename attempt following detach failed: %s", renameErr)
+ }
}
}
if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil {
diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go
index 8a79877..c83446c 100644
--- a/daemon/graphdriver/zfs/zfs.go
+++ b/daemon/graphdriver/zfs/zfs.go
@@ -19,6 +19,7 @@
"github.com/docker/docker/pkg/parsers"
"github.com/mistifyio/go-zfs"
"github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -390,7 +391,7 @@
}
if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
- return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
+ return nil, errors.Wrap(err, "error creating zfs mount")
}
// this could be our first mount after creation of the filesystem, and the root dir may still have root
diff --git a/daemon/logger/plugin.go b/daemon/logger/plugin.go
index c66540c..8c155b0 100644
--- a/daemon/logger/plugin.go
+++ b/daemon/logger/plugin.go
@@ -81,7 +81,7 @@
return nil, err
}
- id := stringid.GenerateNonCryptoID()
+ id := stringid.GenerateRandomID()
a := &pluginAdapter{
driverName: name,
id: id,
diff --git a/daemon/names.go b/daemon/names.go
index 6c31949..4fa39af 100644
--- a/daemon/names.go
+++ b/daemon/names.go
@@ -38,7 +38,7 @@
func (daemon *Daemon) generateIDAndName(name string) (string, string, error) {
var (
err error
- id = stringid.GenerateNonCryptoID()
+ id = stringid.GenerateRandomID()
)
if name == "" {
diff --git a/daemon/start.go b/daemon/start.go
index e2265a4..e2416ef 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -229,7 +229,7 @@
func (daemon *Daemon) Cleanup(container *container.Container) {
daemon.releaseNetwork(container)
- if err := container.UnmountIpcMount(detachMounted); err != nil {
+ if err := container.UnmountIpcMount(); err != nil {
logrus.Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err)
}
diff --git a/docs/api/version-history.md b/docs/api/version-history.md
index 6f0083e..d98c169 100644
--- a/docs/api/version-history.md
+++ b/docs/api/version-history.md
@@ -159,6 +159,7 @@
* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret
* `GET /events` now supports network remove event which is emitted when users remove a swarm scoped network
* `GET /events` now supports a filter type `scope` in which supported value could be swarm and local
+* `PUT /containers/(name)/archive` now accepts a `copyUIDGID` parameter to allow copy UID/GID maps to dest file or dir.
## v1.29 API changes
diff --git a/hack/ci/windows.ps1 b/hack/ci/windows.ps1
index c2c937f..6d87f32 100644
--- a/hack/ci/windows.ps1
+++ b/hack/ci/windows.ps1
@@ -119,6 +119,7 @@
#$env:INTEGRATION_IN_CONTAINER="yes"
#$env:WINDOWS_BASE_IMAGE=""
#$env:SKIP_COPY_GO="yes"
+#$env:INTEGRATION_TESTFLAGS="-test.v"
Function Nuke-Everything {
$ErrorActionPreference = 'SilentlyContinue'
@@ -409,7 +410,7 @@
# Redirect to a temporary location.
$TEMPORIG=$env:TEMP
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\CI-$COMMITHASH"
- $env:LOCALAPPDATA="$TEMP\localappdata"
+ $env:LOCALAPPDATA="$env:TEMP\localappdata"
$errorActionPreference='Stop'
New-Item -ItemType Directory "$env:TEMP" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\userprofile" -ErrorAction SilentlyContinue | Out-Null
@@ -825,18 +826,32 @@
docker `
"`$env`:PATH`='c`:\target;'+`$env:PATH`; `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
} else {
- Write-Host -ForegroundColor Green "INFO: Integration tests being run from the host:"
- Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
$env:DOCKER_HOST=$DASHH_CUT
$env:PATH="$env:TEMP\binary;$env:PATH;" # Force to use the test binaries, not the host ones.
- Write-Host -ForegroundColor Green "INFO: $c"
Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
+
+ $ErrorActionPreference = "SilentlyContinue"
+ Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
+ if (!($env:INTEGRATION_TESTFLAGS)) {
+ $env:INTEGRATION_TESTFLAGS = "-test.v"
+ }
+ Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker"
+ $start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
+ $ErrorActionPreference = "Stop"
+ if (-not($LastExitCode -eq 0)) {
+ Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
+ }
+
+ $ErrorActionPreference = "SilentlyContinue"
+ Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
+ Write-Host -ForegroundColor Green "INFO: $c"
+ Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
# Explicit to not use measure-command otherwise don't get output as it goes
$start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
}
$ErrorActionPreference = "Stop"
if (-not($LastExitCode -eq 0)) {
- Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
+ Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
}
Write-Host -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
} else {
diff --git a/hack/dockerfile/install/containerd.installer b/hack/dockerfile/install/containerd.installer
index 3b36925..8b15eb8 100755
--- a/hack/dockerfile/install/containerd.installer
+++ b/hack/dockerfile/install/containerd.installer
@@ -4,7 +4,7 @@
# containerd is also pinned in vendor.conf. When updating the binary
# version you may also need to update the vendor version to pick up bug
# fixes or new APIs.
-CONTAINERD_COMMIT=bb71b10fd8f58240ca47fbb579b9d1028eea7c84 # v1.2.5
+CONTAINERD_COMMIT=894b81a4b802e4eb2a91d1ce216b8817763c29fb # v1.2.6
install_containerd() {
echo "Install containerd version $CONTAINERD_COMMIT"
diff --git a/hack/dockerfile/install/proxy.installer b/hack/dockerfile/install/proxy.installer
index 82a90f6..36b4ca0 100755
--- a/hack/dockerfile/install/proxy.installer
+++ b/hack/dockerfile/install/proxy.installer
@@ -3,7 +3,7 @@
# LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When
# updating the binary version, consider updating github.com/docker/libnetwork
# in vendor.conf accordingly
-LIBNETWORK_COMMIT=872f0a83c98add6cae255c8859e29532febc0039 # bump_18.09 branch
+LIBNETWORK_COMMIT=e7933d41e7b206756115aa9df5e0599fc5169742 # bump_18.09 branch
install_proxy() {
case "$1" in
diff --git a/hack/dockerfile/install/runc.installer b/hack/dockerfile/install/runc.installer
index 532a7f7..a8156db 100755
--- a/hack/dockerfile/install/runc.installer
+++ b/hack/dockerfile/install/runc.installer
@@ -4,7 +4,7 @@
# The version of runc should match the version that is used by the containerd
# version that is used. If you need to update runc, open a pull request in
# the containerd project first, and update both after that is merged.
-RUNC_COMMIT=2b18fe1d885ee5083ef9f0838fee39b62d653e30
+RUNC_COMMIT=425e105d5a03fabd737a126ad93d62a9eeede87f # v1.0.0-rc8
install_runc() {
# If using RHEL7 kernels (3.10.0 el7), disable kmem accounting/limiting
diff --git a/hack/make.ps1 b/hack/make.ps1
index 6221e36..ab2a978 100644
--- a/hack/make.ps1
+++ b/hack/make.ps1
@@ -60,6 +60,9 @@
.PARAMETER TestUnit
Runs unit tests.
+.PARAMETER TestIntegration
+ Runs integration tests.
+
.PARAMETER All
Runs everything this script knows about that can run in a container.
@@ -84,6 +87,7 @@
[Parameter(Mandatory=$False)][switch]$PkgImports,
[Parameter(Mandatory=$False)][switch]$GoFormat,
[Parameter(Mandatory=$False)][switch]$TestUnit,
+ [Parameter(Mandatory=$False)][switch]$TestIntegration,
[Parameter(Mandatory=$False)][switch]$All
)
@@ -320,6 +324,39 @@
if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" }
}
+# Run the integration tests
+Function Run-IntegrationTests() {
+ $env:DOCKER_INTEGRATION_DAEMON_DEST = $root + "\bundles\tmp"
+ $dirs = Get-ChildItem -Path integration -Directory -Recurse
+ $integration_api_dirs = @()
+ ForEach($dir in $dirs) {
+ $RelativePath = "." + $dir.FullName -replace "$($PWD.Path -replace "\\","\\")",""
+ If ($RelativePath -notmatch '(^.\\integration($|\\internal)|\\testdata)') {
+ $integration_api_dirs += $dir
+ Write-Host "Building test suite binary $RelativePath"
+ go test -c -o "$RelativePath\test.exe" $RelativePath
+ }
+ }
+
+ ForEach($dir in $integration_api_dirs) {
+ Set-Location $dir.FullName
+ Write-Host "Running $($PWD.Path)"
+ $pinfo = New-Object System.Diagnostics.ProcessStartInfo
+ $pinfo.FileName = "$($PWD.Path)\test.exe"
+ $pinfo.RedirectStandardError = $true
+ $pinfo.UseShellExecute = $false
+ $pinfo.Arguments = $env:INTEGRATION_TESTFLAGS
+ $p = New-Object System.Diagnostics.Process
+ $p.StartInfo = $pinfo
+ $p.Start() | Out-Null
+ $p.WaitForExit()
+ $err = $p.StandardError.ReadToEnd()
+ if (($LASTEXITCODE -ne 0) -and ($err -notlike "*warning: no tests to run*")) {
+ Throw "Integration tests failed: $err"
+ }
+ }
+}
+
# Start of main code.
Try {
Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)"
@@ -331,13 +368,13 @@
# Handle the "-All" shortcut to turn on all things we can handle.
# Note we expressly only include the items which can run in a container - the validations tests cannot
# as they require the .git directory which is excluded from the image by .dockerignore
- if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True }
+ if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True; }
# Handle the "-Binary" shortcut to build both client and daemon.
if ($Binary) { $Client = $True; $Daemon = $True }
# Default to building the daemon if not asked for anything explicitly.
- if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True }
+ if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit) -and -not($TestIntegration)) { $Daemon=$True }
# Verify git is installed
if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" }
@@ -351,6 +388,8 @@
# Get the version of docker (eg 17.04.0-dev)
$dockerVersion="0.0.0-dev"
+ # Overwrite dockerVersion if VERSION Environment variable is available
+ if (Test-Path Env:\VERSION) { $dockerVersion=$env:VERSION }
# Give a warning if we are not running in a container and are building binaries or running unit tests.
# Not relevant for validation tests as these are fine to run outside of a container.
@@ -423,6 +462,9 @@
# Run unit tests
if ($TestUnit) { Run-UnitTests }
+ # Run integration tests
+ if ($TestIntegration) { Run-IntegrationTests }
+
# Gratuitous ASCII art.
if ($Daemon -or $Client) {
Write-Host
diff --git a/image/rootfs.go b/image/rootfs.go
index 84843e1..f73a066 100644
--- a/image/rootfs.go
+++ b/image/rootfs.go
@@ -38,7 +38,8 @@
func (r *RootFS) Clone() *RootFS {
newRoot := NewRootFS()
newRoot.Type = r.Type
- newRoot.DiffIDs = append(r.DiffIDs)
+ newRoot.DiffIDs = make([]layer.DiffID, len(r.DiffIDs))
+ copy(newRoot.DiffIDs, r.DiffIDs)
return newRoot
}
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index d1a709b..96cd2dc 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -1777,7 +1777,7 @@
dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0")
icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success)
- dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", "mkdir -p /test/test-mount/vfs && mount -n /test/testfs.img /test/test-mount/vfs")
+ dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", "mkdir -p /test/test-mount/vfs && mount -n -t ext4 /test/testfs.img /test/test-mount/vfs")
defer mount.Unmount(filepath.Join(testDir, "test-mount"))
s.d.Start(c, "--storage-driver", "vfs", "--data-root", filepath.Join(testDir, "test-mount"))
diff --git a/integration-cli/docker_cli_external_volume_driver_unix_test.go b/integration-cli/docker_cli_external_volume_driver_unix_test.go
index b876d9f..e9e92d4 100644
--- a/integration-cli/docker_cli_external_volume_driver_unix_test.go
+++ b/integration-cli/docker_cli_external_volume_driver_unix_test.go
@@ -558,7 +558,7 @@
}
func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverOutOfBandDelete(c *check.C) {
- driverName := stringid.GenerateNonCryptoID()
+ driverName := stringid.GenerateRandomID()
p := newVolumePlugin(c, driverName)
defer p.Close()
diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go
index f34cd0f..cab75f4 100644
--- a/integration-cli/docker_cli_ps_test.go
+++ b/integration-cli/docker_cli_ps_test.go
@@ -439,15 +439,11 @@
func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) {
runSleepingContainer(c, "--name=sleep")
- dockerCmd(c, "run", "--name", "zero1", "busybox", "true")
- firstZero := getIDByName(c, "zero1")
-
- dockerCmd(c, "run", "--name", "zero2", "busybox", "true")
- secondZero := getIDByName(c, "zero2")
+ firstZero, _ := dockerCmd(c, "run", "-d", "busybox", "true")
+ secondZero, _ := dockerCmd(c, "run", "-d", "busybox", "true")
out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false")
c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err))
-
firstNonZero := getIDByName(c, "nonzero1")
out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false")
@@ -456,17 +452,16 @@
// filter containers by exited=0
out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0")
- ids := strings.Split(strings.TrimSpace(out), "\n")
- c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out))
- c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0]))
- c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1]))
+ c.Assert(out, checker.Contains, strings.TrimSpace(firstZero))
+ c.Assert(out, checker.Contains, strings.TrimSpace(secondZero))
+ c.Assert(out, checker.Not(checker.Contains), strings.TrimSpace(firstNonZero))
+ c.Assert(out, checker.Not(checker.Contains), strings.TrimSpace(secondNonZero))
out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1")
- ids = strings.Split(strings.TrimSpace(out), "\n")
- c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids)))
- c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0]))
- c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1]))
-
+ c.Assert(out, checker.Contains, strings.TrimSpace(firstNonZero))
+ c.Assert(out, checker.Contains, strings.TrimSpace(secondNonZero))
+ c.Assert(out, checker.Not(checker.Contains), strings.TrimSpace(firstZero))
+ c.Assert(out, checker.Not(checker.Contains), strings.TrimSpace(secondZero))
}
func (s *DockerSuite) TestPsRightTagName(c *check.C) {
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index cd3f5e3..46dff49 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -1544,6 +1544,10 @@
{
"name": "chmod",
"action": "SCMP_ACT_ERRNO"
+ },
+ {
+ "name": "fchmodat",
+ "action": "SCMP_ACT_ERRNO"
}
]
}`
diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go
index 2f811d4..95ad9ce 100644
--- a/integration-cli/docker_cli_search_test.go
+++ b/integration-cli/docker_cli_search_test.go
@@ -44,54 +44,6 @@
assert.Assert(c, strings.Contains(out, "invalid syntax"), "couldn't find the invalid value warning")
}
-func (s *DockerSuite) TestSearchCmdOptions(c *check.C) {
- testRequires(c, Network, DaemonIsLinux)
-
- out, _ := dockerCmd(c, "search", "--help")
- assert.Assert(c, strings.Contains(out, "Usage:\tdocker search [OPTIONS] TERM"))
-
- outSearchCmd, _ := dockerCmd(c, "search", "busybox")
- outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox")
-
- assert.Assert(c, len(outSearchCmd) <= len(outSearchCmdNotrunc), "The no-trunc option can't take effect.")
-
- outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
- outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n")
- for i := range outSearchCmdautomatedSlice {
- assert.Assert(c, !strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), "The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)
- }
-
- outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image.
- outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n")
- for i := range outSearchCmdNotOfficialSlice {
- assert.Assert(c, !strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), "The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)
- }
-
- outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image.
- outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n")
- assert.Equal(c, len(outSearchCmdOfficialSlice), 3) // 1 header, 1 line, 1 carriage return
- assert.Assert(c, strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), "The busybox is an OFFICIAL image: %s", outSearchCmdOfficial)
-
- outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox")
- assert.Assert(c, strings.Count(outSearchCmdStars, "[OK]") <= strings.Count(outSearchCmd, "[OK]"), "The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)
-
- dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox")
-
- // --automated deprecated since Docker 1.13
- outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image.
- outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n")
- for i := range outSearchCmdautomatedSlice1 {
- assert.Assert(c, !strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), "The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)
- }
-
- // -s --stars deprecated since Docker 1.13
- outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox")
- assert.Assert(c, strings.Count(outSearchCmdStars1, "[OK]") <= strings.Count(outSearchCmd, "[OK]"), "The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)
-
- // -s --stars deprecated since Docker 1.13
- dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox")
-}
-
// search for repos which start with "ubuntu-" on the central registry
func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) {
testRequires(c, Network, DaemonIsLinux)
diff --git a/integration-cli/docker_cli_service_update_test.go b/integration-cli/docker_cli_service_update_test.go
deleted file mode 100644
index c729860..0000000
--- a/integration-cli/docker_cli_service_update_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// +build !windows
-
-package main
-
-import (
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types/swarm"
- "github.com/docker/docker/integration-cli/checker"
- "github.com/go-check/check"
-)
-
-func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) {
- d := s.AddDaemon(c, true, true)
- out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name=test", "busybox", "top")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
- service := d.GetService(c, "test")
- c.Assert(service.Spec.Labels, checker.HasLen, 0)
-
- // add label to empty set
- out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
- service = d.GetService(c, "test")
- c.Assert(service.Spec.Labels, checker.HasLen, 1)
- c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar")
-
- // add label to non-empty set
- out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo2=bar")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
- service = d.GetService(c, "test")
- c.Assert(service.Spec.Labels, checker.HasLen, 2)
- c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar")
-
- out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo2")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
- service = d.GetService(c, "test")
- c.Assert(service.Spec.Labels, checker.HasLen, 1)
- c.Assert(service.Spec.Labels["foo2"], checker.Equals, "")
-
- out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
- service = d.GetService(c, "test")
- c.Assert(service.Spec.Labels, checker.HasLen, 0)
- c.Assert(service.Spec.Labels["foo"], checker.Equals, "")
-
- // now make sure we can add again
- out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
- service = d.GetService(c, "test")
- c.Assert(service.Spec.Labels, checker.HasLen, 1)
- c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar")
-}
-
-func (s *DockerSwarmSuite) TestServiceUpdateSecrets(c *check.C) {
- d := s.AddDaemon(c, true, true)
- testName := "test_secret"
- id := d.CreateSecret(c, swarm.SecretSpec{
- Annotations: swarm.Annotations{
- Name: testName,
- },
- Data: []byte("TESTINGDATA"),
- })
- c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
- testTarget := "testing"
- serviceName := "test"
-
- out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
- // add secret
- out, err = d.Cmd("service", "update", "--detach", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
- out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
- c.Assert(err, checker.IsNil)
-
- var refs []swarm.SecretReference
- c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
- c.Assert(refs, checker.HasLen, 1)
-
- c.Assert(refs[0].SecretName, checker.Equals, testName)
- c.Assert(refs[0].File, checker.Not(checker.IsNil))
- c.Assert(refs[0].File.Name, checker.Equals, testTarget)
-
- // remove
- out, err = d.Cmd("service", "update", "--detach", "test", "--secret-rm", testName)
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
- out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
- c.Assert(err, checker.IsNil)
-
- c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
- c.Assert(refs, checker.HasLen, 0)
-}
-
-func (s *DockerSwarmSuite) TestServiceUpdateConfigs(c *check.C) {
- d := s.AddDaemon(c, true, true)
- testName := "test_config"
- id := d.CreateConfig(c, swarm.ConfigSpec{
- Annotations: swarm.Annotations{
- Name: testName,
- },
- Data: []byte("TESTINGDATA"),
- })
- c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id))
- testTarget := "/testing"
- serviceName := "test"
-
- out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top")
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
- // add config
- out, err = d.Cmd("service", "update", "--detach", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
- out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
- c.Assert(err, checker.IsNil)
-
- var refs []swarm.ConfigReference
- c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
- c.Assert(refs, checker.HasLen, 1)
-
- c.Assert(refs[0].ConfigName, checker.Equals, testName)
- c.Assert(refs[0].File, checker.Not(checker.IsNil))
- c.Assert(refs[0].File.Name, checker.Equals, testTarget)
-
- // remove
- out, err = d.Cmd("service", "update", "--detach", "test", "--config-rm", testName)
- c.Assert(err, checker.IsNil, check.Commentf("%s", out))
-
- out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
- c.Assert(err, checker.IsNil)
-
- c.Assert(json.Unmarshal([]byte(out), &refs), checker.IsNil)
- c.Assert(refs, checker.HasLen, 0)
-}
diff --git a/integration/build/build_test.go b/integration/build/build_test.go
index 6a2b1fd..6fe18fc 100644
--- a/integration/build/build_test.go
+++ b/integration/build/build_test.go
@@ -474,6 +474,61 @@
assert.Check(t, is.Contains(out.String(), "Successfully built"))
}
+func TestBuildWithEmptyDockerfile(t *testing.T) {
+ ctx := context.TODO()
+ defer setupTest(t)()
+
+ tests := []struct {
+ name string
+ dockerfile string
+ expectedErr string
+ }{
+ {
+ name: "empty-dockerfile",
+ dockerfile: "",
+ expectedErr: "cannot be empty",
+ },
+ {
+ name: "empty-lines-dockerfile",
+ dockerfile: `
+
+
+
+ `,
+ expectedErr: "file with no instructions",
+ },
+ {
+ name: "comment-only-dockerfile",
+ dockerfile: `# this is a comment`,
+ expectedErr: "file with no instructions",
+ },
+ }
+
+ apiclient := testEnv.APIClient()
+
+ for _, tc := range tests {
+ tc := tc
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+
+ buf := bytes.NewBuffer(nil)
+ w := tar.NewWriter(buf)
+ writeTarRecord(t, w, "Dockerfile", tc.dockerfile)
+ err := w.Close()
+ assert.NilError(t, err)
+
+ _, err = apiclient.ImageBuild(ctx,
+ buf,
+ types.ImageBuildOptions{
+ Remove: true,
+ ForceRemove: true,
+ })
+
+ assert.Check(t, is.Contains(err.Error(), tc.expectedErr))
+ })
+ }
+}
+
func writeTarRecord(t *testing.T, w *tar.Writer, fn, contents string) {
err := w.WriteHeader(&tar.Header{
Name: fn,
diff --git a/integration/container/rename_test.go b/integration/container/rename_test.go
index 25474a7..24bbe98 100644
--- a/integration/container/rename_test.go
+++ b/integration/container/rename_test.go
@@ -61,7 +61,7 @@
assert.NilError(t, err)
assert.Check(t, is.Equal("/"+oldName, inspect.Name))
- newName := "new_name" + stringid.GenerateNonCryptoID()
+ newName := "new_name" + stringid.GenerateRandomID()
err = client.ContainerRename(ctx, oldName, newName)
assert.NilError(t, err)
@@ -79,7 +79,7 @@
cID := container.Run(t, ctx, client, container.WithName(oldName))
poll.WaitOn(t, container.IsInState(ctx, client, cID, "running"), poll.WithDelay(100*time.Millisecond))
- newName := "new_name" + stringid.GenerateNonCryptoID()
+ newName := "new_name" + stringid.GenerateRandomID()
err := client.ContainerRename(ctx, oldName, newName)
assert.NilError(t, err)
diff --git a/integration/internal/container/container.go b/integration/internal/container/container.go
index 20ad774..85e6a24 100644
--- a/integration/internal/container/container.go
+++ b/integration/internal/container/container.go
@@ -2,6 +2,7 @@
import (
"context"
+ "runtime"
"testing"
"github.com/docker/docker/api/types"
@@ -24,10 +25,14 @@
// nolint: golint
func Create(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint
t.Helper()
+ cmd := []string{"top"}
+ if runtime.GOOS == "windows" {
+ cmd = []string{"sleep", "240"}
+ }
config := &TestContainerConfig{
Config: &container.Config{
Image: "busybox",
- Cmd: []string{"top"},
+ Cmd: cmd,
},
HostConfig: &container.HostConfig{},
NetworkingConfig: &network.NetworkingConfig{},
diff --git a/integration/internal/swarm/states.go b/integration/internal/swarm/states.go
index 51d6200..c51e1ee 100644
--- a/integration/internal/swarm/states.go
+++ b/integration/internal/swarm/states.go
@@ -5,6 +5,7 @@
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
+ swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
"gotest.tools/poll"
)
@@ -45,3 +46,27 @@
}
}
}
+
+// RunningTasksCount verifies there are `instances` tasks running for `serviceID`
+func RunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ filter := filters.NewArgs()
+ filter.Add("service", serviceID)
+ tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
+ Filters: filter,
+ })
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case len(tasks) == int(instances):
+ for _, task := range tasks {
+ if task.Status.State != swarmtypes.TaskStateRunning {
+ return poll.Continue("waiting for tasks to enter run state")
+ }
+ }
+ return poll.Success()
+ default:
+ return poll.Continue("task count at %d waiting for %d", len(tasks), instances)
+ }
+ }
+}
diff --git a/integration/network/inspect_test.go b/integration/network/inspect_test.go
index d12ad67..02d2b75 100644
--- a/integration/network/inspect_test.go
+++ b/integration/network/inspect_test.go
@@ -5,9 +5,6 @@
"testing"
"github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- swarmtypes "github.com/docker/docker/api/types/swarm"
- "github.com/docker/docker/client"
"github.com/docker/docker/integration/internal/network"
"github.com/docker/docker/integration/internal/swarm"
"gotest.tools/assert"
@@ -38,7 +35,7 @@
swarm.ServiceWithNetwork(networkName),
)
- poll.WaitOn(t, serviceRunningTasksCount(c, serviceID, instances), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(c, serviceID, instances), swarm.ServicePoll)
tests := []struct {
name string
@@ -103,30 +100,3 @@
assert.NilError(t, err)
poll.WaitOn(t, network.IsRemoved(ctx, c, overlayID), swarm.NetworkPoll)
}
-
-func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
- return func(log poll.LogT) poll.Result {
- tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
- Filters: filters.NewArgs(
- filters.Arg("service", serviceID),
- filters.Arg("desired-state", string(swarmtypes.TaskStateRunning)),
- ),
- })
- switch {
- case err != nil:
- return poll.Error(err)
- case len(tasks) == int(instances):
- for _, task := range tasks {
- if task.Status.Err != "" {
- log.Log("task error:", task.Status.Err)
- }
- if task.Status.State != swarmtypes.TaskStateRunning {
- return poll.Continue("waiting for tasks to enter run state (current status: %s)", task.Status.State)
- }
- }
- return poll.Success()
- default:
- return poll.Continue("task count for service %s at %d waiting for %d", serviceID, len(tasks), instances)
- }
- }
-}
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
index 6e79bec..91e3274 100644
--- a/integration/service/create_test.go
+++ b/integration/service/create_test.go
@@ -42,18 +42,18 @@
booleanFalse := false
serviceID := swarm.CreateService(t, d)
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
i := inspectServiceContainer(t, client, serviceID)
// HostConfig.Init == nil means that it delegates to daemon configuration
assert.Check(t, i.HostConfig.Init == nil)
serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanTrue))
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
i = inspectServiceContainer(t, client, serviceID)
assert.Check(t, is.Equal(true, *i.HostConfig.Init))
serviceID = swarm.CreateService(t, d, swarm.ServiceWithInit(&booleanFalse))
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, 1), swarm.ServicePoll)
i = inspectServiceContainer(t, client, serviceID)
assert.Check(t, is.Equal(false, *i.HostConfig.Init))
}
@@ -97,7 +97,7 @@
}
serviceID := swarm.CreateService(t, d, serviceSpec...)
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
_, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
assert.NilError(t, err)
@@ -108,7 +108,7 @@
poll.WaitOn(t, swarm.NoTasksForService(ctx, client, serviceID), swarm.ServicePoll)
serviceID2 := swarm.CreateService(t, d, serviceSpec...)
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID2, instances), swarm.ServicePoll)
err = client.ServiceRemove(context.Background(), serviceID2)
assert.NilError(t, err)
@@ -147,7 +147,7 @@
swarm.ServiceWithNetwork(name),
)
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
resp, _, err := client.ServiceInspectWithRaw(ctx, serviceID, types.ServiceInspectOptions{})
assert.NilError(t, err)
@@ -210,7 +210,7 @@
}),
)
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances), swarm.ServicePoll)
filter := filters.NewArgs()
filter.Add("service", serviceID)
@@ -274,7 +274,7 @@
}),
)
- poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances))
+ poll.WaitOn(t, swarm.RunningTasksCount(client, serviceID, instances))
filter := filters.NewArgs()
filter.Add("service", serviceID)
@@ -301,26 +301,3 @@
err = client.ConfigRemove(ctx, configName)
assert.NilError(t, err)
}
-
-func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
- return func(log poll.LogT) poll.Result {
- filter := filters.NewArgs()
- filter.Add("service", serviceID)
- tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
- Filters: filter,
- })
- switch {
- case err != nil:
- return poll.Error(err)
- case len(tasks) == int(instances):
- for _, task := range tasks {
- if task.Status.State != swarmtypes.TaskStateRunning {
- return poll.Continue("waiting for tasks to enter run state")
- }
- }
- return poll.Success()
- default:
- return poll.Continue("task count at %d waiting for %d", len(tasks), instances)
- }
- }
-}
diff --git a/integration/service/update_test.go b/integration/service/update_test.go
new file mode 100644
index 0000000..8575e56
--- /dev/null
+++ b/integration/service/update_test.go
@@ -0,0 +1,287 @@
+package service // import "github.com/docker/docker/integration/service"
+
+import (
+ "context"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ swarmtypes "github.com/docker/docker/api/types/swarm"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/integration/internal/network"
+ "github.com/docker/docker/integration/internal/swarm"
+ "gotest.tools/assert"
+ is "gotest.tools/assert/cmp"
+ "gotest.tools/poll"
+ "gotest.tools/skip"
+)
+
+func TestServiceUpdateLabel(t *testing.T) {
+ skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+ defer setupTest(t)()
+ d := swarm.NewSwarm(t, testEnv)
+ defer d.Stop(t)
+ cli := d.NewClientT(t)
+ defer cli.Close()
+
+ ctx := context.Background()
+ serviceName := "TestService_" + t.Name()
+ serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName))
+ service := getService(t, cli, serviceID)
+ assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{}))
+
+ // add label to empty set
+ service.Spec.Labels["foo"] = "bar"
+ _, err := cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
+
+ // add label to non-empty set
+ service.Spec.Labels["foo2"] = "bar"
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar", "foo2": "bar"}))
+
+ delete(service.Spec.Labels, "foo2")
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
+
+ delete(service.Spec.Labels, "foo")
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{}))
+
+ // now make sure we can add again
+ service.Spec.Labels["foo"] = "bar"
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
+
+ err = cli.ServiceRemove(context.Background(), serviceID)
+ assert.NilError(t, err)
+}
+
+func TestServiceUpdateSecrets(t *testing.T) {
+ skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+ defer setupTest(t)()
+ d := swarm.NewSwarm(t, testEnv)
+ defer d.Stop(t)
+ cli := d.NewClientT(t)
+ defer cli.Close()
+
+ ctx := context.Background()
+ secretName := "TestSecret_" + t.Name()
+ secretTarget := "targetName"
+ resp, err := cli.SecretCreate(ctx, swarmtypes.SecretSpec{
+ Annotations: swarmtypes.Annotations{
+ Name: secretName,
+ },
+ Data: []byte("TESTINGDATA"),
+ })
+ assert.NilError(t, err)
+ assert.Check(t, resp.ID != "")
+
+ serviceName := "TestService_" + t.Name()
+ serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName))
+ service := getService(t, cli, serviceID)
+
+ // add secret
+ service.Spec.TaskTemplate.ContainerSpec.Secrets = append(service.Spec.TaskTemplate.ContainerSpec.Secrets,
+ &swarmtypes.SecretReference{
+ File: &swarmtypes.SecretReferenceFileTarget{
+ Name: secretTarget,
+ UID: "0",
+ GID: "0",
+ Mode: 0600,
+ },
+ SecretID: resp.ID,
+ SecretName: secretName,
+ },
+ )
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+
+ service = getService(t, cli, serviceID)
+ secrets := service.Spec.TaskTemplate.ContainerSpec.Secrets
+ assert.Assert(t, is.Equal(1, len(secrets)))
+
+ secret := *secrets[0]
+ assert.Check(t, is.Equal(secretName, secret.SecretName))
+ assert.Check(t, nil != secret.File)
+ assert.Check(t, is.Equal(secretTarget, secret.File.Name))
+
+ // remove
+ service.Spec.TaskTemplate.ContainerSpec.Secrets = []*swarmtypes.SecretReference{}
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.Equal(0, len(service.Spec.TaskTemplate.ContainerSpec.Secrets)))
+
+ err = cli.ServiceRemove(context.Background(), serviceID)
+ assert.NilError(t, err)
+}
+
+func TestServiceUpdateConfigs(t *testing.T) {
+ skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+ defer setupTest(t)()
+ d := swarm.NewSwarm(t, testEnv)
+ defer d.Stop(t)
+ cli := d.NewClientT(t)
+ defer cli.Close()
+
+ ctx := context.Background()
+ configName := "TestConfig_" + t.Name()
+ configTarget := "targetName"
+ resp, err := cli.ConfigCreate(ctx, swarmtypes.ConfigSpec{
+ Annotations: swarmtypes.Annotations{
+ Name: configName,
+ },
+ Data: []byte("TESTINGDATA"),
+ })
+ assert.NilError(t, err)
+ assert.Check(t, resp.ID != "")
+
+ serviceName := "TestService_" + t.Name()
+ serviceID := swarm.CreateService(t, d, swarm.ServiceWithName(serviceName))
+ service := getService(t, cli, serviceID)
+
+ // add config
+ service.Spec.TaskTemplate.ContainerSpec.Configs = append(service.Spec.TaskTemplate.ContainerSpec.Configs,
+ &swarmtypes.ConfigReference{
+ File: &swarmtypes.ConfigReferenceFileTarget{
+ Name: configTarget,
+ UID: "0",
+ GID: "0",
+ Mode: 0600,
+ },
+ ConfigID: resp.ID,
+ ConfigName: configName,
+ },
+ )
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+
+ service = getService(t, cli, serviceID)
+ configs := service.Spec.TaskTemplate.ContainerSpec.Configs
+ assert.Assert(t, is.Equal(1, len(configs)))
+
+ config := *configs[0]
+ assert.Check(t, is.Equal(configName, config.ConfigName))
+ assert.Check(t, nil != config.File)
+ assert.Check(t, is.Equal(configTarget, config.File.Name))
+
+ // remove
+ service.Spec.TaskTemplate.ContainerSpec.Configs = []*swarmtypes.ConfigReference{}
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ service = getService(t, cli, serviceID)
+ assert.Check(t, is.Equal(0, len(service.Spec.TaskTemplate.ContainerSpec.Configs)))
+
+ err = cli.ServiceRemove(context.Background(), serviceID)
+ assert.NilError(t, err)
+}
+
+func TestServiceUpdateNetwork(t *testing.T) {
+ skip.If(t, testEnv.DaemonInfo.OSType != "linux")
+ defer setupTest(t)()
+ d := swarm.NewSwarm(t, testEnv)
+ defer d.Stop(t)
+ cli := d.NewClientT(t)
+ defer cli.Close()
+
+ ctx := context.Background()
+
+ // Create a overlay network
+ testNet := "testNet" + t.Name()
+ overlayID := network.CreateNoError(t, ctx, cli, testNet,
+ network.WithDriver("overlay"))
+
+ var instances uint64 = 1
+ // Create service with the overlay network
+ serviceName := "TestServiceUpdateNetworkRM_" + t.Name()
+ serviceID := swarm.CreateService(t, d,
+ swarm.ServiceWithReplicas(instances),
+ swarm.ServiceWithName(serviceName),
+ swarm.ServiceWithNetwork(testNet))
+
+ poll.WaitOn(t, swarm.RunningTasksCount(cli, serviceID, instances), swarm.ServicePoll)
+ service := getService(t, cli, serviceID)
+ netInfo, err := cli.NetworkInspect(ctx, testNet, types.NetworkInspectOptions{
+ Verbose: true,
+ Scope: "swarm",
+ })
+ assert.NilError(t, err)
+ assert.Assert(t, len(netInfo.Containers) == 2, "Expected 2 endpoints, one for container and one for LB Sandbox")
+
+ //Remove network from service
+ service.Spec.TaskTemplate.Networks = []swarmtypes.NetworkAttachmentConfig{}
+ _, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
+ assert.NilError(t, err)
+ poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+
+ netInfo, err = cli.NetworkInspect(ctx, testNet, types.NetworkInspectOptions{
+ Verbose: true,
+ Scope: "swarm",
+ })
+
+ assert.NilError(t, err)
+ assert.Assert(t, len(netInfo.Containers) == 0, "Load balancing endpoint still exists in network")
+
+ err = cli.NetworkRemove(ctx, overlayID)
+ assert.NilError(t, err)
+
+ err = cli.ServiceRemove(ctx, serviceID)
+ assert.NilError(t, err)
+}
+
+func getService(t *testing.T, cli client.ServiceAPIClient, serviceID string) swarmtypes.Service {
+ t.Helper()
+ service, _, err := cli.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+ assert.NilError(t, err)
+ return service
+}
+
+func serviceIsUpdated(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ service, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case service.UpdateStatus != nil && service.UpdateStatus.State == swarmtypes.UpdateStateCompleted:
+ return poll.Success()
+ default:
+ if service.UpdateStatus != nil {
+ return poll.Continue("waiting for service %s to be updated, state: %s, message: %s", serviceID, service.UpdateStatus.State, service.UpdateStatus.Message)
+ }
+ return poll.Continue("waiting for service %s to be updated", serviceID)
+ }
+ }
+}
+
+func serviceSpecIsUpdated(client client.ServiceAPIClient, serviceID string, serviceOldVersion uint64) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ service, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case service.Version.Index > serviceOldVersion:
+ return poll.Success()
+ default:
+ return poll.Continue("waiting for service %s to be updated", serviceID)
+ }
+ }
+}
diff --git a/integration/system/info_test.go b/integration/system/info_test.go
index 8005852..8130361 100644
--- a/integration/system/info_test.go
+++ b/integration/system/info_test.go
@@ -44,6 +44,7 @@
}
func TestInfoAPIWarnings(t *testing.T) {
+ skip.If(t, testEnv.IsRemoteDaemon, "cannot run daemon when remote daemon")
skip.If(t, testEnv.DaemonInfo.OSType == "windows", "FIXME")
d := daemon.New(t)
c := d.NewClientT(t)
diff --git a/integration/volume/volume_test.go b/integration/volume/volume_test.go
index 4ee109e..3b2baba 100644
--- a/integration/volume/volume_test.go
+++ b/integration/volume/volume_test.go
@@ -26,6 +26,10 @@
ctx := context.Background()
name := t.Name()
+ // Windows file system is case insensitive
+ if testEnv.OSType == "windows" {
+ name = strings.ToLower(name)
+ }
vol, err := client.VolumeCreate(ctx, volumetypes.VolumeCreateBody{
Name: name,
})
diff --git a/internal/test/daemon/daemon.go b/internal/test/daemon/daemon.go
index c0b5c48..61fbdb4 100644
--- a/internal/test/daemon/daemon.go
+++ b/internal/test/daemon/daemon.go
@@ -268,8 +268,11 @@
wait := make(chan error)
go func() {
- wait <- d.cmd.Wait()
+ ret := d.cmd.Wait()
d.log.Logf("[%s] exiting daemon", d.id)
+ // If we send before logging, we might accidentally log _after_ the test is done.
+ // As of Go 1.12, this incurs a panic instead of silently being dropped.
+ wait <- ret
close(wait)
}()
diff --git a/internal/test/environment/environment.go b/internal/test/environment/environment.go
index 5538d20..76f94a5 100644
--- a/internal/test/environment/environment.go
+++ b/internal/test/environment/environment.go
@@ -75,10 +75,13 @@
}
case "windows":
baseImage := "microsoft/windowsservercore"
- if override := os.Getenv("WINDOWS_BASE_IMAGE"); override != "" {
- baseImage = override
- fmt.Println("INFO: Windows Base image is ", baseImage)
+ if overrideBaseImage := os.Getenv("WINDOWS_BASE_IMAGE"); overrideBaseImage != "" {
+ baseImage = overrideBaseImage
+ if overrideBaseImageTag := os.Getenv("WINDOWS_BASE_IMAGE_TAG"); overrideBaseImageTag != "" {
+ baseImage = baseImage + ":" + overrideBaseImageTag
+ }
}
+ fmt.Println("INFO: Windows Base image is ", baseImage)
return PlatformDefaults{
BaseImage: baseImage,
VolumesConfigPath: filepath.FromSlash(volumesPath),
diff --git a/layer/layer_store.go b/layer/layer_store.go
index 1601465..81730e9 100644
--- a/layer/layer_store.go
+++ b/layer/layer_store.go
@@ -10,6 +10,7 @@
"github.com/docker/distribution"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/locker"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
@@ -36,7 +37,11 @@
mounts map[string]*mountedLayer
mountL sync.Mutex
- os string
+
+ // protect *RWLayer() methods from operating on the same name/id
+ locker *locker.Locker
+
+ os string
}
// StoreOptions are the options used to create a new Store instance
@@ -92,6 +97,7 @@
driver: driver,
layerMap: map[ChainID]*roLayer{},
mounts: map[string]*mountedLayer{},
+ locker: locker.New(),
useTarSplit: !caps.ReproducesExactDiffs,
os: os,
}
@@ -189,6 +195,8 @@
}
func (ls *layerStore) loadMount(mount string) error {
+ ls.mountL.Lock()
+ defer ls.mountL.Unlock()
if _, ok := ls.mounts[mount]; ok {
return nil
}
@@ -477,7 +485,7 @@
return ls.releaseLayer(layer)
}
-func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) {
+func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (_ RWLayer, err error) {
var (
storageOpt map[string]string
initFunc MountInit
@@ -490,14 +498,16 @@
initFunc = opts.InitFunc
}
+ ls.locker.Lock(name)
+ defer ls.locker.Unlock(name)
+
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- m, ok := ls.mounts[name]
+ _, ok := ls.mounts[name]
+ ls.mountL.Unlock()
if ok {
return nil, ErrMountNameConflict
}
- var err error
var pid string
var p *roLayer
if string(parent) != "" {
@@ -517,7 +527,7 @@
}()
}
- m = &mountedLayer{
+ m := &mountedLayer{
name: name,
parent: p,
mountID: ls.mountID(name),
@@ -528,7 +538,7 @@
if initFunc != nil {
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
if err != nil {
- return nil, err
+ return
}
m.initID = pid
}
@@ -538,20 +548,23 @@
}
if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
- return nil, err
+ return
}
if err = ls.saveMount(m); err != nil {
- return nil, err
+ return
}
return m.getReference(), nil
}
func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
+ ls.locker.Lock(id)
+ defer ls.locker.Unlock(id)
+
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- mount, ok := ls.mounts[id]
- if !ok {
+ mount := ls.mounts[id]
+ ls.mountL.Unlock()
+ if mount == nil {
return nil, ErrMountDoesNotExist
}
@@ -560,9 +573,10 @@
func (ls *layerStore) GetMountID(id string) (string, error) {
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- mount, ok := ls.mounts[id]
- if !ok {
+ mount := ls.mounts[id]
+ ls.mountL.Unlock()
+
+ if mount == nil {
return "", ErrMountDoesNotExist
}
logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
@@ -571,10 +585,14 @@
}
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
+ name := l.Name()
+ ls.locker.Lock(name)
+ defer ls.locker.Unlock(name)
+
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- m, ok := ls.mounts[l.Name()]
- if !ok {
+ m := ls.mounts[name]
+ ls.mountL.Unlock()
+ if m == nil {
return []Metadata{}, nil
}
@@ -606,7 +624,9 @@
return nil, err
}
- delete(ls.mounts, m.Name())
+ ls.mountL.Lock()
+ delete(ls.mounts, name)
+ ls.mountL.Unlock()
ls.layerL.Lock()
defer ls.layerL.Unlock()
@@ -634,7 +654,9 @@
}
}
+ ls.mountL.Lock()
ls.mounts[mount.name] = mount
+ ls.mountL.Unlock()
return nil
}
diff --git a/layer/migration.go b/layer/migration.go
index 2668ea9..1250069 100644
--- a/layer/migration.go
+++ b/layer/migration.go
@@ -3,7 +3,6 @@
import (
"compress/gzip"
"errors"
- "fmt"
"io"
"os"
@@ -13,64 +12,6 @@
"github.com/vbatts/tar-split/tar/storage"
)
-// CreateRWLayerByGraphID creates a RWLayer in the layer store using
-// the provided name with the given graphID. To get the RWLayer
-// after migration the layer may be retrieved by the given name.
-func (ls *layerStore) CreateRWLayerByGraphID(name, graphID string, parent ChainID) (err error) {
- ls.mountL.Lock()
- defer ls.mountL.Unlock()
- m, ok := ls.mounts[name]
- if ok {
- if m.parent.chainID != parent {
- return errors.New("name conflict, mismatched parent")
- }
- if m.mountID != graphID {
- return errors.New("mount already exists")
- }
-
- return nil
- }
-
- if !ls.driver.Exists(graphID) {
- return fmt.Errorf("graph ID does not exist: %q", graphID)
- }
-
- var p *roLayer
- if string(parent) != "" {
- p = ls.get(parent)
- if p == nil {
- return ErrLayerDoesNotExist
- }
-
- // Release parent chain if error
- defer func() {
- if err != nil {
- ls.layerL.Lock()
- ls.releaseLayer(p)
- ls.layerL.Unlock()
- }
- }()
- }
-
- // TODO: Ensure graphID has correct parent
-
- m = &mountedLayer{
- name: name,
- parent: p,
- mountID: graphID,
- layerStore: ls,
- references: map[RWLayer]*referencedRWLayer{},
- }
-
- // Check for existing init layer
- initID := fmt.Sprintf("%s-init", graphID)
- if ls.driver.Exists(initID) {
- m.initID = initID
- }
-
- return ls.saveMount(m)
-}
-
func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
defer func() {
if err != nil {
diff --git a/layer/migration_test.go b/layer/migration_test.go
index 9231663..2b5c330 100644
--- a/layer/migration_test.go
+++ b/layer/migration_test.go
@@ -3,7 +3,6 @@
import (
"bytes"
"compress/gzip"
- "fmt"
"io"
"io/ioutil"
"os"
@@ -12,7 +11,6 @@
"testing"
"github.com/docker/docker/daemon/graphdriver"
- "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stringid"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
@@ -269,161 +267,3 @@
assertMetadata(t, metadata, createMetadata(layer2a))
}
-
-func TestMountMigration(t *testing.T) {
- // TODO Windows: Figure out why this is failing (obvious - paths... needs porting)
- if runtime.GOOS == "windows" {
- t.Skip("Failing on Windows")
- }
- ls, _, cleanup := newTestStore(t)
- defer cleanup()
-
- baseFiles := []FileApplier{
- newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
- newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
- }
- initFiles := []FileApplier{
- newTestFile("/etc/hosts", []byte{}, 0644),
- newTestFile("/etc/resolv.conf", []byte{}, 0644),
- }
- mountFiles := []FileApplier{
- newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644),
- newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
- newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644),
- }
-
- initTar, err := tarFromFiles(initFiles...)
- if err != nil {
- t.Fatal(err)
- }
-
- mountTar, err := tarFromFiles(mountFiles...)
- if err != nil {
- t.Fatal(err)
- }
-
- graph := ls.(*layerStore).driver
-
- layer1, err := createLayer(ls, "", initWithFiles(baseFiles...))
- if err != nil {
- t.Fatal(err)
- }
-
- graphID1 := layer1.(*referencedCacheLayer).cacheID
-
- containerID := stringid.GenerateRandomID()
- containerInit := fmt.Sprintf("%s-init", containerID)
-
- if err := graph.Create(containerInit, graphID1, nil); err != nil {
- t.Fatal(err)
- }
- if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil {
- t.Fatal(err)
- }
-
- if err := graph.Create(containerID, containerInit, nil); err != nil {
- t.Fatal(err)
- }
- if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil {
- t.Fatal(err)
- }
-
- if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil {
- t.Fatal(err)
- }
-
- rwLayer1, err := ls.GetRWLayer("migration-mount")
- if err != nil {
- t.Fatal(err)
- }
-
- if _, err := rwLayer1.Mount(""); err != nil {
- t.Fatal(err)
- }
-
- changes, err := rwLayer1.Changes()
- if err != nil {
- t.Fatal(err)
- }
-
- if expected := 5; len(changes) != expected {
- t.Logf("Changes %#v", changes)
- t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected)
- }
-
- sortChanges(changes)
-
- assertChange(t, changes[0], archive.Change{
- Path: "/etc",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[1], archive.Change{
- Path: "/etc/hosts",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[2], archive.Change{
- Path: "/root",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[3], archive.Change{
- Path: "/root/.bashrc",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[4], archive.Change{
- Path: "/root/testfile1.txt",
- Kind: archive.ChangeAdd,
- })
-
- if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil {
- t.Fatal("Expected error creating mount with same name")
- } else if err != ErrMountNameConflict {
- t.Fatal(err)
- }
-
- rwLayer2, err := ls.GetRWLayer("migration-mount")
- if err != nil {
- t.Fatal(err)
- }
-
- if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) {
- t.Fatal("Expected same layer from get with same name as from migrate")
- }
-
- if _, err := rwLayer2.Mount(""); err != nil {
- t.Fatal(err)
- }
-
- if _, err := rwLayer2.Mount(""); err != nil {
- t.Fatal(err)
- }
-
- if metadata, err := ls.Release(layer1); err != nil {
- t.Fatal(err)
- } else if len(metadata) > 0 {
- t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata)
- }
-
- if err := rwLayer1.Unmount(); err != nil {
- t.Fatal(err)
- }
-
- if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil {
- t.Fatal(err)
- }
-
- if err := rwLayer2.Unmount(); err != nil {
- t.Fatal(err)
- }
- if err := rwLayer2.Unmount(); err != nil {
- t.Fatal(err)
- }
- metadata, err := ls.ReleaseRWLayer(rwLayer2)
- if err != nil {
- t.Fatal(err)
- }
- if len(metadata) == 0 {
- t.Fatal("Expected base layer to be deleted when deleting mount")
- }
-
- assertMetadata(t, metadata, createMetadata(layer1))
-}
diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go
index d6858c6..c5d9e0e 100644
--- a/layer/mounted_layer.go
+++ b/layer/mounted_layer.go
@@ -2,6 +2,7 @@
import (
"io"
+ "sync"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/containerfs"
@@ -15,6 +16,7 @@
path string
layerStore *layerStore
+ sync.Mutex
references map[RWLayer]*referencedRWLayer
}
@@ -62,16 +64,24 @@
ref := &referencedRWLayer{
mountedLayer: ml,
}
+ ml.Lock()
ml.references[ref] = ref
+ ml.Unlock()
return ref
}
func (ml *mountedLayer) hasReferences() bool {
- return len(ml.references) > 0
+ ml.Lock()
+ ret := len(ml.references) > 0
+ ml.Unlock()
+
+ return ret
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
+ ml.Lock()
+ defer ml.Unlock()
if _, ok := ml.references[ref]; !ok {
return ErrLayerNotRetained
}
@@ -81,7 +91,9 @@
func (ml *mountedLayer) retakeReference(r RWLayer) {
if ref, ok := r.(*referencedRWLayer); ok {
+ ml.Lock()
ml.references[ref] = ref
+ ml.Unlock()
}
}
diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go
index 2d9d662..6ff61e6 100644
--- a/pkg/chrootarchive/archive.go
+++ b/pkg/chrootarchive/archive.go
@@ -27,18 +27,34 @@
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return untarHandler(tarArchive, dest, options, true)
+ return untarHandler(tarArchive, dest, options, true, dest)
+}
+
+// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
+// The root directory is the directory that will be chrooted to.
+// `dest` must be a path within `root`, if it is not an error will be returned.
+//
+// `root` should set to a directory which is not controlled by any potentially
+// malicious process.
+//
+// This should be used to prevent a potential attacker from manipulating `dest`
+// such that it would provide access to files outside of `dest` through things
+// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
+// sanitizing symlinks in this manner is inherrently racey:
+// ref: CVE-2018-15664
+func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ return untarHandler(tarArchive, dest, options, true, root)
}
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return untarHandler(tarArchive, dest, options, false)
+ return untarHandler(tarArchive, dest, options, false, dest)
}
// Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
@@ -69,5 +85,13 @@
r = decompressedArchive
}
- return invokeUnpack(r, dest, options)
+ return invokeUnpack(r, dest, options, root)
+}
+
+// Tar tars the requested path while chrooted to the specified root.
+func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ return invokePack(srcPath, options, root)
}
diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go
index 5df8afd..ea2879d 100644
--- a/pkg/chrootarchive/archive_unix.go
+++ b/pkg/chrootarchive/archive_unix.go
@@ -10,10 +10,13 @@
"io"
"io/ioutil"
"os"
+ "path/filepath"
"runtime"
+ "strings"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
+ "github.com/pkg/errors"
)
// untar is the entry-point for docker-untar on re-exec. This is not used on
@@ -23,18 +26,28 @@
runtime.LockOSThread()
flag.Parse()
- var options *archive.TarOptions
+ var options archive.TarOptions
//read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
- if err := chroot(flag.Arg(0)); err != nil {
+ dst := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = dst
+ }
+
+ if err := chroot(root); err != nil {
fatal(err)
}
- if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+ if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
@@ -45,7 +58,10 @@
os.Exit(0)
}
-func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ if root == "" {
+ return errors.New("must specify a root to chroot to")
+ }
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
@@ -57,7 +73,21 @@
return fmt.Errorf("Untar pipe failure: %v", err)
}
- cmd := reexec.Command("docker-untar", dest)
+ if root != "" {
+ relDest, err := filepath.Rel(root, dest)
+ if err != nil {
+ return err
+ }
+ if relDest == "." {
+ relDest = "/"
+ }
+ if relDest[0] != '/' {
+ relDest = "/" + relDest
+ }
+ dest = relDest
+ }
+
+ cmd := reexec.Command("docker-untar", dest, root)
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
@@ -69,6 +99,7 @@
w.Close()
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
+
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
@@ -86,3 +117,92 @@
}
return nil
}
+
+func tar() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ src := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = src
+ }
+
+ if err := realChroot(root); err != nil {
+ fatal(err)
+ }
+
+ var options archive.TarOptions
+ if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ rdr, err := archive.TarWithOptions(src, &options)
+ if err != nil {
+ fatal(err)
+ }
+ defer rdr.Close()
+
+ if _, err := io.Copy(os.Stdout, rdr); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if root == "" {
+ return nil, errors.New("root path must not be empty")
+ }
+
+ relSrc, err := filepath.Rel(root, srcPath)
+ if err != nil {
+ return nil, err
+ }
+ if relSrc == "." {
+ relSrc = "/"
+ }
+ if relSrc[0] != '/' {
+ relSrc = "/" + relSrc
+ }
+
+ // make sure we didn't trim a trailing slash with the call to `Rel`
+ if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
+ relSrc += "/"
+ }
+
+ cmd := reexec.Command("docker-tar", relSrc, root)
+
+ errBuff := bytes.NewBuffer(nil)
+ cmd.Stderr = errBuff
+
+ tarR, tarW := io.Pipe()
+ cmd.Stdout = tarW
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ }
+
+ if err := cmd.Start(); err != nil {
+ return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ }
+
+ go func() {
+ err := cmd.Wait()
+ err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ tarW.CloseWithError(err)
+ }()
+
+ if err := json.NewEncoder(stdin).Encode(options); err != nil {
+ stdin.Close()
+ return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ }
+ stdin.Close()
+
+ return tarR, nil
+}
diff --git a/pkg/chrootarchive/archive_unix_test.go b/pkg/chrootarchive/archive_unix_test.go
new file mode 100644
index 0000000..f39a88a
--- /dev/null
+++ b/pkg/chrootarchive/archive_unix_test.go
@@ -0,0 +1,171 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+ gotar "archive/tar"
+ "bytes"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/archive"
+ "golang.org/x/sys/unix"
+ "gotest.tools/assert"
+)
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not copy data to a
+// container path that will actually overwrite data on the host
+func TestUntarWithMaliciousSymlinks(t *testing.T) {
+ dir, err := ioutil.TempDir("", t.Name())
+ assert.NilError(t, err)
+ defer os.RemoveAll(dir)
+
+ root := filepath.Join(dir, "root")
+
+ err = os.MkdirAll(root, 0755)
+ assert.NilError(t, err)
+
+ // Add a file into a directory above root
+ // Ensure that we can't access this file while tarring.
+ err = ioutil.WriteFile(filepath.Join(dir, "host-file"), []byte("I am a host file"), 0644)
+ assert.NilError(t, err)
+
+ // Create some data which which will be copied into the "container" root into
+ // the symlinked path.
+ // Before this change, the copy would overwrite the "host" content.
+ // With this change it should not.
+ data := filepath.Join(dir, "data")
+ err = os.MkdirAll(data, 0755)
+ assert.NilError(t, err)
+ err = ioutil.WriteFile(filepath.Join(data, "local-file"), []byte("pwn3d"), 0644)
+ assert.NilError(t, err)
+
+ safe := filepath.Join(root, "safe")
+ err = unix.Symlink(dir, safe)
+ assert.NilError(t, err)
+
+ rdr, err := archive.TarWithOptions(data, &archive.TarOptions{IncludeFiles: []string{"local-file"}, RebaseNames: map[string]string{"local-file": "host-file"}})
+ assert.NilError(t, err)
+
+ // Use tee to test both the good case and the bad case w/o recreating the archive
+ bufRdr := bytes.NewBuffer(nil)
+ tee := io.TeeReader(rdr, bufRdr)
+
+ err = UntarWithRoot(tee, safe, nil, root)
+ assert.Assert(t, err != nil)
+ assert.ErrorContains(t, err, "open /safe/host-file: no such file or directory")
+
+ // Make sure the "host" file is still in tact
+ // Before the fix the host file would be overwritten
+ hostData, err := ioutil.ReadFile(filepath.Join(dir, "host-file"))
+ assert.NilError(t, err)
+ assert.Equal(t, string(hostData), "I am a host file")
+
+ // Now test by chrooting to an attacker controlled path
+ // This should succeed as is and overwrite a "host" file
+ // Note that this would be a mis-use of this function.
+ err = UntarWithRoot(bufRdr, safe, nil, safe)
+ assert.NilError(t, err)
+
+ hostData, err = ioutil.ReadFile(filepath.Join(dir, "host-file"))
+ assert.NilError(t, err)
+ assert.Equal(t, string(hostData), "pwn3d")
+}
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not unwittingly leak
+// host data into the archive.
+func TestTarWithMaliciousSymlinks(t *testing.T) {
+ dir, err := ioutil.TempDir("", t.Name())
+ assert.NilError(t, err)
+ // defer os.RemoveAll(dir)
+ t.Log(dir)
+
+ root := filepath.Join(dir, "root")
+
+ err = os.MkdirAll(root, 0755)
+ assert.NilError(t, err)
+
+ hostFileData := []byte("I am a host file")
+
+ // Add a file into a directory above root
+ // Ensure that we can't access this file while tarring.
+ err = ioutil.WriteFile(filepath.Join(dir, "host-file"), hostFileData, 0644)
+ assert.NilError(t, err)
+
+ safe := filepath.Join(root, "safe")
+ err = unix.Symlink(dir, safe)
+ assert.NilError(t, err)
+
+ data := filepath.Join(dir, "data")
+ err = os.MkdirAll(data, 0755)
+ assert.NilError(t, err)
+
+ type testCase struct {
+ p string
+ includes []string
+ }
+
+ cases := []testCase{
+ {p: safe, includes: []string{"host-file"}},
+ {p: safe + "/", includes: []string{"host-file"}},
+ {p: safe, includes: nil},
+ {p: safe + "/", includes: nil},
+ {p: root, includes: []string{"safe/host-file"}},
+ {p: root, includes: []string{"/safe/host-file"}},
+ {p: root, includes: nil},
+ }
+
+ maxBytes := len(hostFileData)
+
+ for _, tc := range cases {
+ t.Run(path.Join(tc.p+"_"+strings.Join(tc.includes, "_")), func(t *testing.T) {
+ // Here if we use archive.TarWithOptions directly or change the "root" parameter
+ // to be the same as "safe", data from the host will be leaked into the archive
+ var opts *archive.TarOptions
+ if tc.includes != nil {
+ opts = &archive.TarOptions{
+ IncludeFiles: tc.includes,
+ }
+ }
+ rdr, err := Tar(tc.p, opts, root)
+ assert.NilError(t, err)
+ defer rdr.Close()
+
+ tr := gotar.NewReader(rdr)
+ assert.Assert(t, !isDataInTar(t, tr, hostFileData, int64(maxBytes)), "host data leaked to archive")
+ })
+ }
+}
+
+func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes int64) bool {
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ assert.NilError(t, err)
+
+ if h.Size == 0 {
+ continue
+ }
+ assert.Assert(t, h.Size <= maxBytes, "%s: file size exceeds max expected size %d: %d", h.Name, maxBytes, h.Size)
+
+ data := make([]byte, int(h.Size))
+ _, err = io.ReadFull(tr, data)
+ assert.NilError(t, err)
+ if bytes.Contains(data, compare) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/pkg/chrootarchive/archive_windows.go b/pkg/chrootarchive/archive_windows.go
index f297313..de87113 100644
--- a/pkg/chrootarchive/archive_windows.go
+++ b/pkg/chrootarchive/archive_windows.go
@@ -14,9 +14,16 @@
func invokeUnpack(decompressedArchive io.ReadCloser,
dest string,
- options *archive.TarOptions) error {
+ options *archive.TarOptions, root string) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ // Windows is different to Linux here because Windows does not support
+ // chroot. Hence there is no point sandboxing a chrooted process to
+ // do the pack. We call inline instead within the daemon process.
+ return archive.TarWithOptions(srcPath, options)
+}
diff --git a/pkg/chrootarchive/init_unix.go b/pkg/chrootarchive/init_unix.go
index a15e4bb..c24fea7 100644
--- a/pkg/chrootarchive/init_unix.go
+++ b/pkg/chrootarchive/init_unix.go
@@ -14,6 +14,7 @@
func init() {
reexec.Register("docker-applyLayer", applyLayer)
reexec.Register("docker-untar", untar)
+ reexec.Register("docker-tar", tar)
}
func fatal(err error) {
diff --git a/pkg/idtools/idtools.go b/pkg/idtools/idtools.go
index 230422e..b3af7a4 100644
--- a/pkg/idtools/idtools.go
+++ b/pkg/idtools/idtools.go
@@ -4,7 +4,6 @@
"bufio"
"fmt"
"os"
- "sort"
"strconv"
"strings"
)
@@ -203,8 +202,6 @@
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
- // sort the ranges by lowest ID first
- sort.Sort(subidRanges)
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
diff --git a/pkg/idtools/idtools_test.go b/pkg/idtools/idtools_test.go
new file mode 100644
index 0000000..7627d19
--- /dev/null
+++ b/pkg/idtools/idtools_test.go
@@ -0,0 +1,28 @@
+package idtools // import "github.com/docker/docker/pkg/idtools"
+
+import (
+ "testing"
+
+ "gotest.tools/assert"
+)
+
+func TestCreateIDMapOrder(t *testing.T) {
+ subidRanges := ranges{
+ {100000, 1000},
+ {1000, 1},
+ }
+
+ idMap := createIDMap(subidRanges)
+ assert.DeepEqual(t, idMap, []IDMap{
+ {
+ ContainerID: 0,
+ HostID: 100000,
+ Size: 1000,
+ },
+ {
+ ContainerID: 1000,
+ HostID: 1000,
+ Size: 1,
+ },
+ })
+}
diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go
index 874aff6..4afd63c 100644
--- a/pkg/mount/mount.go
+++ b/pkg/mount/mount.go
@@ -2,12 +2,46 @@
import (
"sort"
+ "strconv"
"strings"
- "syscall"
"github.com/sirupsen/logrus"
)
+// mountError records an error from mount or unmount operation
+type mountError struct {
+ op string
+ source, target string
+ flags uintptr
+ data string
+ err error
+}
+
+func (e *mountError) Error() string {
+ out := e.op + " "
+
+ if e.source != "" {
+ out += e.source + ":" + e.target
+ } else {
+ out += e.target
+ }
+
+ if e.flags != uintptr(0) {
+ out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
+ }
+ if e.data != "" {
+ out += ", data: " + e.data
+ }
+
+ out += ": " + e.err.Error()
+ return out
+}
+
+// Cause returns the underlying cause of the error
+func (e *mountError) Cause() error {
+ return e.err
+}
+
// FilterFunc is a type defining a callback function
// to filter out unwanted entries. It takes a pointer
// to an Info struct (not fully populated, currently
@@ -89,12 +123,7 @@
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
// does a normal unmount.
func Unmount(target string) error {
- err := unmount(target, mntDetach)
- if err == syscall.EINVAL {
- // ignore "not mounted" error
- err = nil
- }
- return err
+ return unmount(target, mntDetach)
}
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
@@ -114,25 +143,14 @@
logrus.Debugf("Trying to unmount %s", m.Mountpoint)
err = unmount(m.Mountpoint, mntDetach)
if err != nil {
- // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is
- // not a mountpoint (which is ok in this case).
- // Meanwhile calling `Mounted()` is very expensive.
- //
- // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching
- // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms),
- // this is nicer than defining a custom value that we can refer to in each platform file.
- if err == syscall.EINVAL {
- continue
- }
- if i == len(mounts)-1 {
+ if i == len(mounts)-1 { // last mount
if mounted, e := Mounted(m.Mountpoint); e != nil || mounted {
return err
}
- continue
+ } else {
+ // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
+ logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
}
- // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
- logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
- continue
}
logrus.Debugf("Unmounted %s", m.Mountpoint)
diff --git a/pkg/mount/mounter_freebsd.go b/pkg/mount/mounter_freebsd.go
index b6ab83a..09ad360 100644
--- a/pkg/mount/mounter_freebsd.go
+++ b/pkg/mount/mounter_freebsd.go
@@ -11,11 +11,9 @@
import "C"
import (
- "fmt"
"strings"
+ "syscall"
"unsafe"
-
- "golang.org/x/sys/unix"
)
func allocateIOVecs(options []string) []C.struct_iovec {
@@ -49,12 +47,13 @@
}
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
- reason := C.GoString(C.strerror(*C.__error()))
- return fmt.Errorf("Failed to call nmount: %s", reason)
+ return &mountError{
+ op: "mount",
+ source: device,
+ target: target,
+ flags: flag,
+ err: syscall.Errno(errno),
+ }
}
return nil
}
-
-func unmount(target string, flag int) error {
- return unix.Unmount(target, flag)
-}
diff --git a/pkg/mount/mounter_linux.go b/pkg/mount/mounter_linux.go
index 631daf1..48837ad 100644
--- a/pkg/mount/mounter_linux.go
+++ b/pkg/mount/mounter_linux.go
@@ -33,25 +33,42 @@
// Initial call applying all non-propagation flags for mount
// or remount with changed data
if err := unix.Mount(device, target, mType, oflags, data); err != nil {
- return err
+ return &mountError{
+ op: "mount",
+ source: device,
+ target: target,
+ flags: oflags,
+ data: data,
+ err: err,
+ }
}
}
if flags&ptypes != 0 {
// Change the propagation type.
if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
+ return &mountError{
+ op: "remount",
+ target: target,
+ flags: flags & pflags,
+ err: err,
+ }
return err
}
}
if oflags&broflags == broflags {
// Remount the bind to apply read only.
- return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "")
+ if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil {
+ return &mountError{
+ op: "remount-ro",
+ target: target,
+ flags: oflags | unix.MS_REMOUNT,
+ err: err,
+ }
+
+ }
}
return nil
}
-
-func unmount(target string, flag int) error {
- return unix.Unmount(target, flag)
-}
diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go
index 0195144..7a37f66 100644
--- a/pkg/mount/sharedsubtree_linux_test.go
+++ b/pkg/mount/sharedsubtree_linux_test.go
@@ -7,6 +7,7 @@
"path"
"testing"
+ "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -326,7 +327,7 @@
}()
// then attempt to mount it to target. It should fail
- if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != unix.EINVAL {
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && errors.Cause(err) != unix.EINVAL {
t.Fatal(err)
} else if err == nil {
t.Fatalf("%q should not have been bindable", sourceDir)
diff --git a/pkg/mount/unmount_unix.go b/pkg/mount/unmount_unix.go
new file mode 100644
index 0000000..4be4276
--- /dev/null
+++ b/pkg/mount/unmount_unix.go
@@ -0,0 +1,22 @@
+// +build !windows
+
+package mount // import "github.com/docker/docker/pkg/mount"
+
+import "golang.org/x/sys/unix"
+
+func unmount(target string, flags int) error {
+ err := unix.Unmount(target, flags)
+ if err == nil || err == unix.EINVAL {
+ // Ignore "not mounted" error here. Note the same error
+ // can be returned if flags are invalid, so this code
+ // assumes that the flags value is always correct.
+ return nil
+ }
+
+ return &mountError{
+ op: "umount",
+ target: target,
+ flags: uintptr(flags),
+ err: err,
+ }
+}
diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go
index fa7d916..5fe071d 100644
--- a/pkg/stringid/stringid.go
+++ b/pkg/stringid/stringid.go
@@ -2,17 +2,12 @@
package stringid // import "github.com/docker/docker/pkg/stringid"
import (
- cryptorand "crypto/rand"
+ "crypto/rand"
"encoding/hex"
"fmt"
- "io"
- "math"
- "math/big"
- "math/rand"
"regexp"
"strconv"
"strings"
- "time"
)
const shortLen = 12
@@ -41,10 +36,11 @@
return id
}
-func generateID(r io.Reader) string {
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
b := make([]byte, 32)
for {
- if _, err := io.ReadFull(r, b); err != nil {
+ if _, err := rand.Read(b); err != nil {
panic(err) // This shouldn't happen
}
id := hex.EncodeToString(b)
@@ -58,18 +54,6 @@
}
}
-// GenerateRandomID returns a unique id.
-func GenerateRandomID() string {
- return generateID(cryptorand.Reader)
-}
-
-// GenerateNonCryptoID generates unique id without using cryptographically
-// secure sources of random.
-// It helps you to save entropy.
-func GenerateNonCryptoID() string {
- return generateID(readerFunc(rand.Read))
-}
-
// ValidateID checks whether an ID string is a valid image ID.
func ValidateID(id string) error {
if ok := validHex.MatchString(id); !ok {
@@ -77,23 +61,3 @@
}
return nil
}
-
-func init() {
- // safely set the seed globally so we generate random ids. Tries to use a
- // crypto seed before falling back to time.
- var seed int64
- if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
- // This should not happen, but worst-case fallback to time-based seed.
- seed = time.Now().UnixNano()
- } else {
- seed = cryptoseed.Int64()
- }
-
- rand.Seed(seed)
-}
-
-type readerFunc func(p []byte) (int, error)
-
-func (fn readerFunc) Read(p []byte) (int, error) {
- return fn(p)
-}
diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go
index a7ccd5f..2660d2e 100644
--- a/pkg/stringid/stringid_test.go
+++ b/pkg/stringid/stringid_test.go
@@ -13,14 +13,6 @@
}
}
-func TestGenerateNonCryptoID(t *testing.T) {
- id := GenerateNonCryptoID()
-
- if len(id) != 64 {
- t.Fatalf("Id returned is incorrect: %s", id)
- }
-}
-
func TestShortenId(t *testing.T) {
id := "90435eec5c4e124e741ef731e118be2fc799a68aba0466ec17717f24ce2ae6a2"
truncID := TruncateID(id)
diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go
index e259017..6d00a24 100644
--- a/pkg/truncindex/truncindex_test.go
+++ b/pkg/truncindex/truncindex_test.go
@@ -158,7 +158,7 @@
func BenchmarkTruncIndexAdd100(b *testing.B) {
var testSet []string
for i := 0; i < 100; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -174,7 +174,7 @@
func BenchmarkTruncIndexAdd250(b *testing.B) {
var testSet []string
for i := 0; i < 250; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -190,7 +190,7 @@
func BenchmarkTruncIndexAdd500(b *testing.B) {
var testSet []string
for i := 0; i < 500; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -207,7 +207,7 @@
var testSet []string
var testKeys []string
for i := 0; i < 100; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
index := NewTruncIndex([]string{})
for _, id := range testSet {
@@ -231,7 +231,7 @@
var testSet []string
var testKeys []string
for i := 0; i < 250; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
index := NewTruncIndex([]string{})
for _, id := range testSet {
@@ -255,7 +255,7 @@
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
index := NewTruncIndex([]string{})
for _, id := range testSet {
@@ -278,7 +278,7 @@
func BenchmarkTruncIndexDelete100(b *testing.B) {
var testSet []string
for i := 0; i < 100; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -301,7 +301,7 @@
func BenchmarkTruncIndexDelete250(b *testing.B) {
var testSet []string
for i := 0; i < 250; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -324,7 +324,7 @@
func BenchmarkTruncIndexDelete500(b *testing.B) {
var testSet []string
for i := 0; i < 500; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -347,7 +347,7 @@
func BenchmarkTruncIndexNew100(b *testing.B) {
var testSet []string
for i := 0; i < 100; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -358,7 +358,7 @@
func BenchmarkTruncIndexNew250(b *testing.B) {
var testSet []string
for i := 0; i < 250; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -369,7 +369,7 @@
func BenchmarkTruncIndexNew500(b *testing.B) {
var testSet []string
for i := 0; i < 500; i++ {
- testSet = append(testSet, stringid.GenerateNonCryptoID())
+ testSet = append(testSet, stringid.GenerateRandomID())
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
@@ -381,7 +381,7 @@
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
- id := stringid.GenerateNonCryptoID()
+ id := stringid.GenerateRandomID()
testSet = append(testSet, id)
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
@@ -406,7 +406,7 @@
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
- id := stringid.GenerateNonCryptoID()
+ id := stringid.GenerateRandomID()
testSet = append(testSet, id)
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
@@ -431,7 +431,7 @@
var testSet []string
var testKeys []string
for i := 0; i < 500; i++ {
- id := stringid.GenerateNonCryptoID()
+ id := stringid.GenerateRandomID()
testSet = append(testSet, id)
l := rand.Intn(12) + 12
testKeys = append(testKeys, id[:l])
diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go
index df1fe5b..86ada8d 100644
--- a/plugin/manager_linux.go
+++ b/plugin/manager_linux.go
@@ -61,7 +61,7 @@
if err := pm.executor.Create(p.GetID(), *spec, stdout, stderr); err != nil {
if p.PluginObj.Config.PropagatedMount != "" {
if err := mount.Unmount(propRoot); err != nil {
- logrus.Warnf("Could not unmount %s: %v", propRoot, err)
+ logrus.WithField("plugin", p.Name()).WithError(err).Warn("Failed to unmount vplugin propagated mount root")
}
}
return errors.WithStack(err)
diff --git a/plugin/manager_linux_test.go b/plugin/manager_linux_test.go
index fd8fa85..1b6a3bf 100644
--- a/plugin/manager_linux_test.go
+++ b/plugin/manager_linux_test.go
@@ -70,7 +70,7 @@
}
func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
- id := stringid.GenerateNonCryptoID()
+ id := stringid.GenerateRandomID()
rootfs := filepath.Join(root, id)
if err := os.MkdirAll(rootfs, 0755); err != nil {
t.Fatal(err)
diff --git a/vendor.conf b/vendor.conf
index 43d78c9..c508c5f 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -26,7 +26,7 @@
golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
# buildkit
-github.com/moby/buildkit ed4da8b4a9661f278ae8433056ca37d0727c408b # docker-18.09 branch
+github.com/moby/buildkit 05766c5c21a1e528eeb1c3522b2f05493fe9ac47 # docker-18.09 branch
github.com/tonistiigi/fsutil 2862f6bc5ac9b97124e552a5c108230b38a1b0ca
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
@@ -37,7 +37,7 @@
#get libnetwork packages
# When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy accordingly
-github.com/docker/libnetwork 872f0a83c98add6cae255c8859e29532febc0039 # bump_18.09 branch
+github.com/docker/libnetwork e7933d41e7b206756115aa9df5e0599fc5169742 # bump_18.09 branch
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -121,7 +121,7 @@
github.com/containerd/containerd 9754871865f7fe2f4e74d43e2fc7ccd237edcbce # v1.2.2
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
-github.com/containerd/cgroups dbea6f2bd41658b84b00417ceefa416b979cbf10
+github.com/containerd/cgroups 4994991857f9b0ae8dc439551e8bebdbb4bf66c1
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
github.com/containerd/cri 0d5cabd006cb5319dc965046067b8432d9fa5ef8 # release/1.2 branch
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go
index 9fbea82..e3ef076 100644
--- a/vendor/github.com/containerd/cgroups/cgroup.go
+++ b/vendor/github.com/containerd/cgroups/cgroup.go
@@ -105,6 +105,10 @@
}
activeSubsystems = append(activeSubsystems, s)
}
+ // if we do not have any active systems then the cgroup is deleted
+ if len(activeSubsystems) == 0 {
+ return nil, ErrCgroupDeleted
+ }
return &cgroup{
path: path,
subsystems: activeSubsystems,
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go b/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go
index 51791fd..4ad25c1 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go
@@ -48,6 +48,12 @@
err error
)
+ // Windows does not support a host ip for port bindings (this is validated in ConvertPortBindings()).
+ // If the HostIP is nil, force it to be 0.0.0.0 for use as the key in portMapper.
+ if bnd.HostIP == nil {
+ bnd.HostIP = net.IPv4zero
+ }
+
// Store the container interface address in the operational binding
bnd.IP = containerIP
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
index c8ab047..b8a5f73 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
@@ -462,7 +462,7 @@
return nil, fmt.Errorf("Windows does not support more than one host port in NAT settings")
}
- if len(elem.HostIP) != 0 {
+ if len(elem.HostIP) != 0 && !elem.HostIP.IsUnspecified() {
return nil, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
index 0453f3a..262a768 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
@@ -275,6 +275,11 @@
if len(warnings) > 0 {
warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.")
}
+
+ if root.StartLine < 0 {
+ return nil, errors.New("file with no instructions.")
+ }
+
return &Result{
AST: root,
Warnings: warnings,
diff --git a/volume/local/local.go b/volume/local/local.go
index 7190de9..d3119cb 100644
--- a/volume/local/local.go
+++ b/volume/local/local.go
@@ -344,7 +344,7 @@
if v.opts != nil {
if err := mount.Unmount(v.path); err != nil {
if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil {
- return errdefs.System(errors.Wrapf(err, "error while unmounting volume path '%s'", v.path))
+ return errdefs.System(err)
}
}
v.active.mounted = false
diff --git a/volume/local/local_unix.go b/volume/local/local_unix.go
index b1c68b9..5ee2ed8 100644
--- a/volume/local/local_unix.go
+++ b/volume/local/local_unix.go
@@ -86,7 +86,7 @@
}
}
err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts)
- return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts)
+ return errors.Wrap(err, "failed to mount local volume")
}
func (v *localVolume) CreatedAt() (time.Time, error) {
diff --git a/volume/mounts/linux_parser.go b/volume/mounts/linux_parser.go
index 8e436ae..22ba747 100644
--- a/volume/mounts/linux_parser.go
+++ b/volume/mounts/linux_parser.go
@@ -82,7 +82,10 @@
}
if validateBindSourceExists {
- exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source)
+ exists, _, err := currentFileInfoProvider.fileInfo(mnt.Source)
+ if err != nil {
+ return &errMountConfig{mnt, err}
+ }
if !exists {
return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)}
}
@@ -292,7 +295,7 @@
switch cfg.Type {
case mount.TypeVolume:
if cfg.Source == "" {
- mp.Name = stringid.GenerateNonCryptoID()
+ mp.Name = stringid.GenerateRandomID()
} else {
mp.Name = cfg.Source
}
diff --git a/volume/mounts/mounts.go b/volume/mounts/mounts.go
index 63a1406..5bf169f 100644
--- a/volume/mounts/mounts.go
+++ b/volume/mounts/mounts.go
@@ -125,7 +125,7 @@
if m.Volume != nil {
id := m.ID
if id == "" {
- id = stringid.GenerateNonCryptoID()
+ id = stringid.GenerateRandomID()
}
path, err := m.Volume.Mount(id)
if err != nil {
diff --git a/volume/mounts/parser_test.go b/volume/mounts/parser_test.go
index 27257d6..f9b32e5 100644
--- a/volume/mounts/parser_test.go
+++ b/volume/mounts/parser_test.go
@@ -1,6 +1,7 @@
package mounts // import "github.com/docker/docker/volume/mounts"
import (
+ "errors"
"io/ioutil"
"os"
"runtime"
@@ -8,6 +9,8 @@
"testing"
"github.com/docker/docker/api/types/mount"
+ "gotest.tools/assert"
+ "gotest.tools/assert/cmp"
)
type parseMountRawTestSet struct {
@@ -477,4 +480,51 @@
t.Errorf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData)
}
}
+
+}
+
+// always returns the configured error
+// this is used to test error handling
+type mockFiProviderWithError struct{ err error }
+
+func (m mockFiProviderWithError) fileInfo(path string) (bool, bool, error) {
+ return false, false, m.err
+}
+
+// TestParseMountSpecBindWithFileinfoError makes sure that the parser returns
+// the error produced by the fileinfo provider.
+//
+// Some extra context for the future in case of changes and possible wtf are we
+// testing this for:
+//
+// Currently this "fileInfoProvider" returns (bool, bool, error)
+// The 1st bool is "does this path exist"
+// The 2nd bool is "is this path a dir"
+// Then of course the error is an error.
+//
+// The issue is the parser was ignoring the error and only looking at the
+// "does this path exist" boolean, which is always false if there is an error.
+// Then the error returned to the caller was a (slightly, maybe) friendlier
+// error string than what comes from `os.Stat`
+// So ...the caller was always getting an error saying the path doesn't exist
+// even if it does exist but got some other error (like a permission error).
+// This is confusing to users.
+func TestParseMountSpecBindWithFileinfoError(t *testing.T) {
+ previousProvider := currentFileInfoProvider
+ defer func() { currentFileInfoProvider = previousProvider }()
+
+ testErr := errors.New("some crazy error")
+ currentFileInfoProvider = &mockFiProviderWithError{err: testErr}
+
+ p := "/bananas"
+ if runtime.GOOS == "windows" {
+ p = `c:\bananas`
+ }
+ m := mount.Mount{Type: mount.TypeBind, Source: p, Target: p}
+
+ parser := NewParser(runtime.GOOS)
+
+ _, err := parser.ParseMountSpec(m)
+ assert.Assert(t, err != nil)
+ assert.Assert(t, cmp.Contains(err.Error(), "some crazy error"))
}
diff --git a/volume/mounts/windows_parser.go b/volume/mounts/windows_parser.go
index ac61044..8f427d8 100644
--- a/volume/mounts/windows_parser.go
+++ b/volume/mounts/windows_parser.go
@@ -385,7 +385,7 @@
switch cfg.Type {
case mount.TypeVolume:
if cfg.Source == "" {
- mp.Name = stringid.GenerateNonCryptoID()
+ mp.Name = stringid.GenerateRandomID()
} else {
mp.Name = cfg.Source
}
diff --git a/volume/service/service.go b/volume/service/service.go
index ebb5e20..f1fe5e7 100644
--- a/volume/service/service.go
+++ b/volume/service/service.go
@@ -56,7 +56,7 @@
// Create creates a volume
func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) {
if name == "" {
- name = stringid.GenerateNonCryptoID()
+ name = stringid.GenerateRandomID()
}
v, err := s.vs.Create(ctx, name, driverName, opts...)
if err != nil {