Merge pull request #35672 from tonistiigi/onbuild-test
Add testcase for onbuild command in multi stage build
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a38f54d..519e238 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -303,9 +303,8 @@
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the
-global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
-file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
-repository.
+[/project/GOVERNANCE.md](/project/GOVERNANCE.md)
+file in this repository.
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
@@ -371,6 +370,11 @@
used to ping maintainers to review a pull request, a proposal or an
issue.
+The open source governance for this repository is handled via the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc)
+charter. For any concerns with the community process regarding technical contributions,
+please contact the TSC. More information on project governance is available in
+our [project/GOVERNANCE.md](/project/GOVERNANCE.md) document.
+
### Guideline violations — 3 strikes method
The point of this section is not to find opportunities to punish people, but we
diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64
index 58ca40d..fde0c70 100644
--- a/Dockerfile.aarch64
+++ b/Dockerfile.aarch64
@@ -15,7 +15,7 @@
# the case. Therefore, you don't have to disable it anymore.
#
-FROM arm64v8/debian:stretch
+FROM debian:stretch
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
@@ -142,10 +142,10 @@
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
- aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \
- aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \
- aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \
- aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e
+ buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
+ busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
+ debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
+ hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
diff --git a/MAINTAINERS b/MAINTAINERS
index a896687..4c831d7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,12 +1,14 @@
# Moby maintainers file
#
-# This file describes who runs the docker/docker project and how.
-# This is a living document - if you see something out of date or missing, speak up!
+# This file describes the maintainer groups within the moby/moby project.
+# More detail on Moby project governance is available in the
+# project/GOVERNANCE.md file found in this repository.
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
+# TODO(estesp): This file should not necessarily depend on docker/opensource
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
diff --git a/api/common.go b/api/common.go
index af34d0b..693d0a4 100644
--- a/api/common.go
+++ b/api/common.go
@@ -3,7 +3,7 @@
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
- DefaultVersion string = "1.35"
+ DefaultVersion string = "1.36"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go
index d845fdd..b7848a3 100644
--- a/api/server/router/container/container_routes.go
+++ b/api/server/router/container/container_routes.go
@@ -593,7 +593,11 @@
close(done)
select {
case <-started:
- logrus.Errorf("Error attaching websocket: %s", err)
+ if err != nil {
+ logrus.Errorf("Error attaching websocket: %s", err)
+ } else {
+ logrus.Debug("websocket connection was closed by client")
+ }
return nil
default:
}
diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go
index dabab3b..fd95420 100644
--- a/api/server/router/image/image_routes.go
+++ b/api/server/router/image/image_routes.go
@@ -13,7 +13,6 @@
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
- "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
@@ -46,9 +45,6 @@
if err != nil && err != io.EOF { //Do not fail if body is empty.
return err
}
- if c == nil {
- c = &container.Config{}
- }
commitCfg := &backend.ContainerCommitConfig{
ContainerCommitConfig: types.ContainerCommitConfig{
diff --git a/api/swagger.yaml b/api/swagger.yaml
index b1533f6..73ea1a3 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -19,10 +19,10 @@
consumes:
- "application/json"
- "text/plain"
-basePath: "/v1.35"
+basePath: "/v1.36"
info:
title: "Docker Engine API"
- version: "1.35"
+ version: "1.36"
x-logo:
url: "https://docs.docker.com/images/logo-docker-main.png"
description: |
@@ -49,8 +49,8 @@
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
- If you omit the version-prefix, the current version of the API (v1.35) is used.
- For example, calling `/info` is the same as calling `/v1.35/info`. Using the
+ If you omit the version-prefix, the current version of the API (v1.36) is used.
+ For example, calling `/info` is the same as calling `/v1.36/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go
index b62d6fc..20f1650 100644
--- a/builder/dockerfile/builder.go
+++ b/builder/dockerfile/builder.go
@@ -396,7 +396,8 @@
}
dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, newBuildArgs(b.options.BuildArgs), newStagesBuildResults())
- dispatchRequest.state.runConfig = config
+ // We make mutations to the configuration, ensure we have a copy
+ dispatchRequest.state.runConfig = copyRunConfig(config)
dispatchRequest.state.imageID = config.Image
for _, cmd := range commands {
err := dispatch(dispatchRequest, cmd)
diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go
index c4ae197..c7da6ee 100644
--- a/cmd/dockerd/config.go
+++ b/cmd/dockerd/config.go
@@ -92,6 +92,8 @@
flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication")
if runtime.GOOS != "windows" {
+ // TODO: Remove this flag after 3 release cycles (18.03)
flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries")
+ flags.MarkHidden("disable-legacy-registry")
}
}
diff --git a/cmd/dockerd/config_experimental.go b/cmd/dockerd/config_experimental.go
deleted file mode 100644
index 355a29e..0000000
--- a/cmd/dockerd/config_experimental.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package main
-
-import (
- "github.com/docker/docker/daemon/config"
- "github.com/spf13/pflag"
-)
-
-func attachExperimentalFlags(conf *config.Config, cmd *pflag.FlagSet) {
-}
diff --git a/cmd/dockerd/config_solaris.go b/cmd/dockerd/config_solaris.go
deleted file mode 100644
index ed67064..0000000
--- a/cmd/dockerd/config_solaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package main
-
-import (
- "github.com/docker/docker/daemon/config"
- "github.com/spf13/pflag"
-)
-
-// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
-func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) {
- // First handle install flags which are consistent cross-platform
- installCommonConfigFlags(conf, flags)
-
- // Then install flags common to unix platforms
- installUnixConfigFlags(conf, flags)
-
- attachExperimentalFlags(conf, flags)
-}
diff --git a/cmd/dockerd/config_unix.go b/cmd/dockerd/config_unix.go
index b3bd741..a3b0e36 100644
--- a/cmd/dockerd/config_unix.go
+++ b/cmd/dockerd/config_unix.go
@@ -44,6 +44,4 @@
flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers")
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
-
- attachExperimentalFlags(conf, flags)
}
diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go
index 02a0314..d73b63a 100644
--- a/cmd/dockerd/daemon.go
+++ b/cmd/dockerd/daemon.go
@@ -6,6 +6,7 @@
"fmt"
"os"
"path/filepath"
+ "runtime"
"strings"
"time"
@@ -472,8 +473,15 @@
return nil, err
}
- if !conf.V2Only {
- logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
+ if runtime.GOOS != "windows" {
+ if flags.Changed("disable-legacy-registry") {
+ // TODO: Remove this error after 3 release cycles (18.03)
+ return nil, errors.New("ERROR: The '--disable-legacy-registry' flag has been removed. Interacting with legacy (v1) registries is no longer supported")
+ }
+ if !conf.V2Only {
+ // TODO: Remove this error after 3 release cycles (18.03)
+ return nil, errors.New("ERROR: The 'disable-legacy-registry' configuration option has been removed. Interacting with legacy (v1) registries is no longer supported")
+ }
}
if flags.Changed("graph") {
diff --git a/cmd/dockerd/daemon_linux.go b/cmd/dockerd/daemon_linux.go
index b58f0f0..b09fed9 100644
--- a/cmd/dockerd/daemon_linux.go
+++ b/cmd/dockerd/daemon_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package main
import systemdDaemon "github.com/coreos/go-systemd/daemon"
diff --git a/cmd/dockerd/daemon_unix_test.go b/cmd/dockerd/daemon_unix_test.go
index 475ff9e..41c392e 100644
--- a/cmd/dockerd/daemon_unix_test.go
+++ b/cmd/dockerd/daemon_unix_test.go
@@ -97,15 +97,3 @@
assert.True(t, loadedConfig.EnableUserlandProxy)
}
-
-func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) {
- content := `{"disable-legacy-registry": false}`
- tempFile := fs.NewFile(t, "config", fs.WithContent(content))
- defer tempFile.Remove()
-
- opts := defaultOptions(tempFile.Path())
- loadedConfig, err := loadDaemonCliConfig(opts)
- require.NoError(t, err)
- require.NotNil(t, loadedConfig)
- assert.False(t, loadedConfig.V2Only)
-}
diff --git a/container/container_windows.go b/container/container_windows.go
index 5cb2e45..92b50a6 100644
--- a/container/container_windows.go
+++ b/container/container_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package container
import (
diff --git a/container/state_solaris.go b/container/state_solaris.go
deleted file mode 100644
index 1229650..0000000
--- a/container/state_solaris.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package container
-
-// setFromExitStatus is a platform specific helper function to set the state
-// based on the ExitStatus structure.
-func (s *State) setFromExitStatus(exitStatus *ExitStatus) {
- s.ExitCodeValue = exitStatus.ExitCode
-}
diff --git a/daemon/archive_tarcopyoptions_windows.go b/daemon/archive_tarcopyoptions_windows.go
index 535efd2..500e128 100644
--- a/daemon/archive_tarcopyoptions_windows.go
+++ b/daemon/archive_tarcopyoptions_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
import (
diff --git a/daemon/cluster/listen_addr_linux.go b/daemon/cluster/listen_addr_linux.go
index 3d4f239..2f342fd 100644
--- a/daemon/cluster/listen_addr_linux.go
+++ b/daemon/cluster/listen_addr_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package cluster
import (
diff --git a/daemon/cluster/listen_addr_solaris.go b/daemon/cluster/listen_addr_solaris.go
deleted file mode 100644
index 57a894b..0000000
--- a/daemon/cluster/listen_addr_solaris.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package cluster
-
-import (
- "bufio"
- "fmt"
- "net"
- "os/exec"
- "strings"
-)
-
-func (c *Cluster) resolveSystemAddr() (net.IP, error) {
- defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " +
- "`/usr/sbin/route get default | /usr/bin/grep interface | " +
- "/usr/bin/awk '{print $2}'`"
- out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output()
- if err != nil {
- return nil, fmt.Errorf("cannot get default route: %v", err)
- }
-
- defInterface := strings.SplitN(string(out), "/", 2)
- defInterfaceIP := net.ParseIP(defInterface[0])
-
- return defInterfaceIP, nil
-}
-
-func listSystemIPs() []net.IP {
- var systemAddrs []net.IP
- cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr")
- cmdReader, err := cmd.StdoutPipe()
- if err != nil {
- return nil
- }
-
- if err := cmd.Start(); err != nil {
- return nil
- }
-
- scanner := bufio.NewScanner(cmdReader)
- go func() {
- for scanner.Scan() {
- text := scanner.Text()
- nameAddrPair := strings.SplitN(text, "/", 2)
- // Let go of loopback interfaces and docker interfaces
- systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0]))
- }
- }()
-
- if err := scanner.Err(); err != nil {
- fmt.Printf("scan underwent err: %+v\n", err)
- }
-
- if err := cmd.Wait(); err != nil {
- fmt.Printf("run command wait: %+v\n", err)
- }
-
- return systemAddrs
-}
diff --git a/daemon/commit.go b/daemon/commit.go
index 0053132..1bdbd6b 100644
--- a/daemon/commit.go
+++ b/daemon/commit.go
@@ -149,6 +149,10 @@
defer daemon.containerUnpause(container)
}
+ if c.MergeConfigs && c.Config == nil {
+ c.Config = container.Config
+ }
+
newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes)
if err != nil {
return "", err
diff --git a/daemon/config/config_solaris.go b/daemon/config/config_solaris.go
deleted file mode 100644
index 6b1e061..0000000
--- a/daemon/config/config_solaris.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package config
-
-// Config defines the configuration of a docker daemon.
-// These are the configuration settings that you pass
-// to the docker daemon when you launch it with say: `docker -d -e lxc`
-type Config struct {
- CommonConfig
-
- // These fields are common to all unix platforms.
- CommonUnixConfig
-}
-
-// BridgeConfig stores all the bridge driver specific
-// configuration.
-type BridgeConfig struct {
- commonBridgeConfig
-
- // Fields below here are platform specific.
- commonUnixBridgeConfig
-}
-
-// IsSwarmCompatible defines if swarm mode can be enabled in this config
-func (conf *Config) IsSwarmCompatible() error {
- return nil
-}
-
-// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid.
-func (conf *Config) ValidatePlatformConfig() error {
- return nil
-}
diff --git a/daemon/configs_linux.go b/daemon/configs_linux.go
index af20ad7..d498c95 100644
--- a/daemon/configs_linux.go
+++ b/daemon/configs_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
func configsSupported() bool {
diff --git a/daemon/configs_windows.go b/daemon/configs_windows.go
index 7cb2e9c..d498c95 100644
--- a/daemon/configs_windows.go
+++ b/daemon/configs_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
func configsSupported() bool {
diff --git a/daemon/container_windows.go b/daemon/container_windows.go
index 6fdd1e6..6db130a 100644
--- a/daemon/container_windows.go
+++ b/daemon/container_windows.go
@@ -1,5 +1,3 @@
-//+build windows
-
package daemon
import (
diff --git a/daemon/daemon.go b/daemon/daemon.go
index e63e209..dd8c100 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -62,8 +62,8 @@
"github.com/pkg/errors"
)
-// MainNamespace is the name of the namespace used for users containers
-const MainNamespace = "moby"
+// ContainersNamespace is the name of the namespace used for users containers
+const ContainersNamespace = "moby"
var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
@@ -247,6 +247,11 @@
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
}
+ } else if !daemon.configStore.LiveRestoreEnabled {
+ if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
+ logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
+ return
+ }
}
if c.IsRunning() || c.IsPaused() {
@@ -317,24 +322,24 @@
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
- } else {
- // get list of containers we need to restart
+ }
- // Do not autostart containers which
- // has endpoints in a swarm scope
- // network yet since the cluster is
- // not initialized yet. We will start
- // it after the cluster is
- // initialized.
- if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
- mapLock.Lock()
- restartContainers[c] = make(chan struct{})
- mapLock.Unlock()
- } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
- mapLock.Lock()
- removeContainers[c.ID] = c
- mapLock.Unlock()
- }
+ // get list of containers we need to restart
+
+ // Do not autostart containers which
+ // has endpoints in a swarm scope
+ // network yet since the cluster is
+ // not initialized yet. We will start
+ // it after the cluster is
+ // initialized.
+ if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
+ mapLock.Lock()
+ restartContainers[c] = make(chan struct{})
+ mapLock.Unlock()
+ } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
+ mapLock.Lock()
+ removeContainers[c.ID] = c
+ mapLock.Unlock()
}
c.Lock()
@@ -890,7 +895,7 @@
go d.execCommandGC()
- d.containerd, err = containerdRemote.NewClient(MainNamespace, d)
+ d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d)
if err != nil {
return nil, err
}
diff --git a/daemon/daemon_experimental.go b/daemon/daemon_experimental.go
deleted file mode 100644
index fb0251d..0000000
--- a/daemon/daemon_experimental.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package daemon
-
-import "github.com/docker/docker/api/types/container"
-
-func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
- return nil, nil
-}
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index 1f9885d..51ea7c7 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -574,11 +574,6 @@
var warnings []string
sysInfo := sysinfo.New(true)
- warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config)
- if err != nil {
- return warnings, err
- }
-
w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go
index 8029bbf..8582d4e 100644
--- a/daemon/daemon_windows.go
+++ b/daemon/daemon_windows.go
@@ -26,7 +26,6 @@
winlibnetwork "github.com/docker/libnetwork/drivers/windows"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/options"
- blkiodev "github.com/opencontainers/runc/libcontainer/configs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
@@ -47,10 +46,6 @@
return filepath.Join(root, "plugins")
}
-func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) {
- return nil, nil
-}
-
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
return parseSecurityOpt(container, hostConfig)
}
diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go
deleted file mode 100644
index 7c1fc20..0000000
--- a/daemon/exec_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/container"
- "github.com/docker/docker/daemon/exec"
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func (daemon *Daemon) execSetPlatformOpt(_ *container.Container, _ *exec.Config, _ *specs.Process) error {
- return nil
-}
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index db41f05..6659878 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -1201,7 +1201,7 @@
options = joinMountOptions(options, devices.mountOptions)
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
- return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256)))
+ return fmt.Errorf("Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options, err, string(dmesg.Dmesg(256)))
}
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
@@ -2392,7 +2392,7 @@
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
- return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
+ return fmt.Errorf("devmapper: Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), path, fstype, options, err, string(dmesg.Dmesg(256)))
}
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go
index f59862d..d2d7c9f 100644
--- a/daemon/graphdriver/driver_linux.go
+++ b/daemon/graphdriver/driver_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package graphdriver
import (
diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go
index f1731ea..4b596ae 100644
--- a/daemon/graphdriver/overlay2/overlay.go
+++ b/daemon/graphdriver/overlay2/overlay.go
@@ -31,6 +31,7 @@
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -704,6 +705,7 @@
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
return 0, err
}
diff --git a/daemon/graphdriver/vfs/copy_linux.go b/daemon/graphdriver/vfs/copy_linux.go
index a632d35..1a63a11 100644
--- a/daemon/graphdriver/vfs/copy_linux.go
+++ b/daemon/graphdriver/vfs/copy_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package vfs
import "github.com/docker/docker/daemon/graphdriver/copy"
diff --git a/daemon/graphdriver/vfs/quota_linux.go b/daemon/graphdriver/vfs/quota_linux.go
index 032c15b..d40d0d1 100644
--- a/daemon/graphdriver/vfs/quota_linux.go
+++ b/daemon/graphdriver/vfs/quota_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package vfs
import "github.com/docker/docker/daemon/graphdriver/quota"
diff --git a/daemon/health.go b/daemon/health.go
index f40c0dd..9acf190 100644
--- a/daemon/health.go
+++ b/daemon/health.go
@@ -80,6 +80,7 @@
execConfig.Tty = false
execConfig.Privileged = false
execConfig.User = cntr.Config.User
+ execConfig.WorkingDir = cntr.Config.WorkingDir
linkedEnv, err := d.setupLinkedContainers(cntr)
if err != nil {
diff --git a/daemon/image.go b/daemon/image.go
index 6e90429..486f2a2 100644
--- a/daemon/image.go
+++ b/daemon/image.go
@@ -6,7 +6,6 @@
"github.com/docker/distribution/reference"
"github.com/docker/docker/image"
- "github.com/docker/docker/pkg/stringid"
)
// errImageDoesNotExist is error returned when no image can be found for a reference.
@@ -59,21 +58,6 @@
return id, imageOS, nil
}
- // deprecated: repo:shortid https://github.com/docker/docker/pull/799
- if tagged, ok := namedRef.(reference.Tagged); ok {
- if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) {
- for platform := range daemon.stores {
- if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil {
- for _, storeRef := range daemon.referenceStore.References(id.Digest()) {
- if storeRef.Name() == namedRef.Name() {
- return id, platform, nil
- }
- }
- }
- }
- }
- }
-
// Search based on ID
for os := range daemon.stores {
if id, err := daemon.stores[os].imageStore.Search(refOrID); err == nil {
diff --git a/daemon/initlayer/setup_windows.go b/daemon/initlayer/setup_windows.go
index b47563e..ff78a4d 100644
--- a/daemon/initlayer/setup_windows.go
+++ b/daemon/initlayer/setup_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package initlayer
import (
diff --git a/daemon/inspect_unix.go b/daemon/inspect_linux.go
similarity index 98%
rename from daemon/inspect_unix.go
rename to daemon/inspect_linux.go
index f073695..8d334dc 100644
--- a/daemon/inspect_unix.go
+++ b/daemon/inspect_linux.go
@@ -1,5 +1,3 @@
-// +build !windows
-
package daemon
import (
diff --git a/daemon/inspect_solaris.go b/daemon/inspect_solaris.go
deleted file mode 100644
index 0b275c1..0000000
--- a/daemon/inspect_solaris.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/backend"
- "github.com/docker/docker/api/types/versions/v1p19"
- "github.com/docker/docker/container"
- "github.com/docker/docker/daemon/exec"
-)
-
-// This sets platform-specific fields
-func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
- return contJSONBase
-}
-
-// containerInspectPre120 get containers for pre 1.20 APIs.
-func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) {
- return &v1p19.ContainerJSON{}, nil
-}
-
-func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
- return &backend.ExecProcessConfig{
- Tty: e.Tty,
- Entrypoint: e.Entrypoint,
- Arguments: e.Args,
- }
-}
diff --git a/daemon/kill.go b/daemon/kill.go
index 1292f86..5cde0d7 100644
--- a/daemon/kill.go
+++ b/daemon/kill.go
@@ -4,10 +4,10 @@
"context"
"fmt"
"runtime"
- "strings"
"syscall"
"time"
+ "github.com/docker/docker/api/errdefs"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/pkg/signal"
@@ -97,15 +97,11 @@
}
if err := daemon.kill(container, sig); err != nil {
- err = errors.Wrapf(err, "Cannot kill container %s", container.ID)
- // if container or process not exists, ignore the error
- // TODO: we shouldn't have to parse error strings from containerd
- if strings.Contains(err.Error(), "container not found") ||
- strings.Contains(err.Error(), "no such process") {
- logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error())
+ if errdefs.IsNotFound(err) {
unpause = false
+ logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'")
} else {
- return err
+ return errors.Wrapf(err, "Cannot kill container %s", container.ID)
}
}
@@ -171,7 +167,7 @@
// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error.
func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error {
err := daemon.killWithSignal(container, sig)
- if err == syscall.ESRCH {
+ if errdefs.IsNotFound(err) {
e := errNoSuchProcess{container.GetPID(), sig}
logrus.Debug(e)
return e
diff --git a/daemon/listeners/listeners_unix.go b/daemon/listeners/listeners_linux.go
similarity index 98%
rename from daemon/listeners/listeners_unix.go
rename to daemon/listeners/listeners_linux.go
index 3a7c0f8..7e0aaa2 100644
--- a/daemon/listeners/listeners_unix.go
+++ b/daemon/listeners/listeners_linux.go
@@ -1,5 +1,3 @@
-// +build !windows
-
package listeners
import (
diff --git a/daemon/listeners/listeners_solaris.go b/daemon/listeners/listeners_solaris.go
deleted file mode 100644
index ee1bd0f..0000000
--- a/daemon/listeners/listeners_solaris.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package listeners
-
-import (
- "crypto/tls"
- "fmt"
- "net"
- "os"
-
- "github.com/docker/go-connections/sockets"
- "github.com/sirupsen/logrus"
-)
-
-// Init creates new listeners for the server.
-func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) {
- switch proto {
- case "tcp":
- l, err := sockets.NewTCPSocket(addr, tlsConfig)
- if err != nil {
- return nil, err
- }
- ls = append(ls, l)
- case "unix":
- gid, err := lookupGID(socketGroup)
- if err != nil {
- if socketGroup != "" {
- if socketGroup != defaultSocketGroup {
- return nil, err
- }
- logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err)
- }
- gid = os.Getgid()
- }
- l, err := sockets.NewUnixSocket(addr, gid)
- if err != nil {
- return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err)
- }
- ls = append(ls, l)
- default:
- return nil, fmt.Errorf("Invalid protocol format: %q", proto)
- }
-
- return
-}
diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go
index 4ea9420..25dd215 100644
--- a/daemon/logger/awslogs/cloudwatchlogs.go
+++ b/daemon/logger/awslogs/cloudwatchlogs.go
@@ -95,6 +95,17 @@
}
}
+// eventBatch holds the events that are batched for submission and the
+// associated data about it.
+//
+// Warning: this type is not threadsafe and must not be used
+// concurrently. This type is expected to be consumed in a single go
+// routine and never concurrently.
+type eventBatch struct {
+ batch []wrappedEvent
+ bytes int
+}
+
// New creates an awslogs logger using the configuration passed in on the
// context. Supported context configuration variables are awslogs-region,
// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern
@@ -389,32 +400,32 @@
// Logs, the processEvents method is called. If a multiline pattern is not
// configured, log events are submitted to the processEvents method immediately.
func (l *logStream) collectBatch() {
- timer := newTicker(batchPublishFrequency)
- var events []wrappedEvent
+ ticker := newTicker(batchPublishFrequency)
var eventBuffer []byte
var eventBufferTimestamp int64
+ var batch = newEventBatch()
for {
select {
- case t := <-timer.C:
+ case t := <-ticker.C:
// If event buffer is older than batch publish frequency flush the event buffer
if eventBufferTimestamp > 0 && len(eventBuffer) > 0 {
eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp
eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond)
eventBufferNegative := eventBufferAge < 0
if eventBufferExpired || eventBufferNegative {
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
}
}
- l.publishBatch(events)
- events = events[:0]
+ l.publishBatch(batch)
+ batch.reset()
case msg, more := <-l.messages:
if !more {
// Flush event buffer and release resources
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
- l.publishBatch(events)
- events = events[:0]
+ l.publishBatch(batch)
+ batch.reset()
return
}
if eventBufferTimestamp == 0 {
@@ -425,7 +436,7 @@
if l.multilinePattern.Match(unprocessedLine) || len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent {
// This is a new log event or we will exceed max bytes per event
// so flush the current eventBuffer to events and reset timestamp
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
eventBuffer = eventBuffer[:0]
}
@@ -434,7 +445,7 @@
eventBuffer = append(eventBuffer, processedLine...)
logger.PutMessage(msg)
} else {
- events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
+ l.processEvent(batch, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
logger.PutMessage(msg)
}
}
@@ -450,8 +461,7 @@
// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event
// byte overhead (defined in perEventBytes) which is accounted for in split- and
// batch-calculations.
-func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent {
- bytes := 0
+func (l *logStream) processEvent(batch *eventBatch, unprocessedLine []byte, timestamp int64) {
for len(unprocessedLine) > 0 {
// Split line length so it does not exceed the maximum
lineBytes := len(unprocessedLine)
@@ -459,38 +469,33 @@
lineBytes = maximumBytesPerEvent
}
line := unprocessedLine[:lineBytes]
- unprocessedLine = unprocessedLine[lineBytes:]
- if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) {
- // Publish an existing batch if it's already over the maximum number of events or if adding this
- // event would push it over the maximum number of total bytes.
- l.publishBatch(events)
- events = events[:0]
- bytes = 0
- }
- events = append(events, wrappedEvent{
+
+ event := wrappedEvent{
inputLogEvent: &cloudwatchlogs.InputLogEvent{
Message: aws.String(string(line)),
Timestamp: aws.Int64(timestamp),
},
- insertOrder: len(events),
- })
- bytes += (lineBytes + perEventBytes)
+ insertOrder: batch.count(),
+ }
+
+ added := batch.add(event, lineBytes)
+ if added {
+ unprocessedLine = unprocessedLine[lineBytes:]
+ } else {
+ l.publishBatch(batch)
+ batch.reset()
+ }
}
- return events
}
// publishBatch calls PutLogEvents for a given set of InputLogEvents,
// accounting for sequencing requirements (each request must reference the
// sequence token returned by the previous request).
-func (l *logStream) publishBatch(events []wrappedEvent) {
- if len(events) == 0 {
+func (l *logStream) publishBatch(batch *eventBatch) {
+ if batch.isEmpty() {
return
}
-
- // events in a batch must be sorted by timestamp
- // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
- sort.Sort(byTimestamp(events))
- cwEvents := unwrapEvents(events)
+ cwEvents := unwrapEvents(batch.events())
nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken)
@@ -615,3 +620,70 @@
}
return cwEvents
}
+
+func newEventBatch() *eventBatch {
+ return &eventBatch{
+ batch: make([]wrappedEvent, 0),
+ bytes: 0,
+ }
+}
+
+// events returns a slice of wrappedEvents sorted in order of their
+// timestamps and then by their insertion order (see `byTimestamp`).
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) events() []wrappedEvent {
+ sort.Sort(byTimestamp(b.batch))
+ return b.batch
+}
+
+// add adds an event to the batch of events accounting for the
+// necessary overhead for an event to be logged. An error will be
+// returned if the event cannot be added to the batch due to service
+// limits.
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) add(event wrappedEvent, size int) bool {
+ addBytes := size + perEventBytes
+
+ // verify we are still within service limits
+ switch {
+ case len(b.batch)+1 > maximumLogEventsPerPut:
+ return false
+ case b.bytes+addBytes > maximumBytesPerPut:
+ return false
+ }
+
+ b.bytes += addBytes
+ b.batch = append(b.batch, event)
+
+ return true
+}
+
+// count is the number of batched events. Warning: this method
+// is not threadsafe and must not be used concurrently.
+func (b *eventBatch) count() int {
+ return len(b.batch)
+}
+
+// size is the total number of bytes that the batch represents.
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) size() int {
+ return b.bytes
+}
+
+func (b *eventBatch) isEmpty() bool {
+ zeroEvents := b.count() == 0
+ zeroSize := b.size() == 0
+ return zeroEvents && zeroSize
+}
+
+// reset prepares the batch for reuse.
+func (b *eventBatch) reset() {
+ b.bytes = 0
+ b.batch = b.batch[:0]
+}
diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go
index 7ebc5de..67ea474 100644
--- a/daemon/logger/awslogs/cloudwatchlogs_test.go
+++ b/daemon/logger/awslogs/cloudwatchlogs_test.go
@@ -49,6 +49,15 @@
}
}
+func testEventBatch(events []wrappedEvent) *eventBatch {
+ batch := newEventBatch()
+ for _, event := range events {
+ eventlen := len([]byte(*event.inputLogEvent.Message))
+ batch.add(event, eventlen)
+ }
+ return batch
+}
+
func TestNewAWSLogsClientUserAgentHandler(t *testing.T) {
info := logger.Info{
Config: map[string]string{
@@ -212,7 +221,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -257,7 +266,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -291,7 +300,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -354,7 +363,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -859,7 +868,8 @@
}
func TestCollectBatchMaxTotalBytes(t *testing.T) {
- mockClient := newMockClientBuffered(1)
+ expectedPuts := 2
+ mockClient := newMockClientBuffered(expectedPuts)
stream := &logStream{
client: mockClient,
logGroupName: groupName,
@@ -867,11 +877,14 @@
sequenceToken: aws.String(sequenceToken),
messages: make(chan *logger.Message),
}
- mockClient.putLogEventsResult <- &putLogEventsResult{
- successResult: &cloudwatchlogs.PutLogEventsOutput{
- NextSequenceToken: aws.String(nextSequenceToken),
- },
+ for i := 0; i < expectedPuts; i++ {
+ mockClient.putLogEventsResult <- &putLogEventsResult{
+ successResult: &cloudwatchlogs.PutLogEventsOutput{
+ NextSequenceToken: aws.String(nextSequenceToken),
+ },
+ }
}
+
var ticks = make(chan time.Time)
newTicker = func(_ time.Duration) *time.Ticker {
return &time.Ticker{
@@ -881,32 +894,57 @@
go stream.collectBatch()
- longline := strings.Repeat("A", maximumBytesPerPut)
+ numPayloads := maximumBytesPerPut / (maximumBytesPerEvent + perEventBytes)
+ // maxline is the maximum line that could be submitted after
+ // accounting for its overhead.
+ maxline := strings.Repeat("A", maximumBytesPerPut-(perEventBytes*numPayloads))
+ // This will be split and batched up to the `maximumBytesPerPut'
+ // (+/- `maximumBytesPerEvent'). This /should/ be aligned, but
+ // should also tolerate an offset within that range.
stream.Log(&logger.Message{
- Line: []byte(longline + "B"),
+ Line: []byte(maxline[:len(maxline)/2]),
+ Timestamp: time.Time{},
+ })
+ stream.Log(&logger.Message{
+ Line: []byte(maxline[len(maxline)/2:]),
+ Timestamp: time.Time{},
+ })
+ stream.Log(&logger.Message{
+ Line: []byte("B"),
Timestamp: time.Time{},
})
- // no ticks
+ // no ticks, guarantee batch by size (and chan close)
stream.Close()
argument := <-mockClient.putLogEventsArgument
if argument == nil {
t.Fatal("Expected non-nil PutLogEventsInput")
}
- bytes := 0
+
+ // Should total to the maximum allowed bytes.
+ eventBytes := 0
for _, event := range argument.LogEvents {
- bytes += len(*event.Message)
+ eventBytes += len(*event.Message)
}
- if bytes > maximumBytesPerPut {
- t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes)
+ eventsOverhead := len(argument.LogEvents) * perEventBytes
+ payloadTotal := eventBytes + eventsOverhead
+ // lowestMaxBatch allows the payload to be offset if the messages
+ // don't lend themselves to align with the maximum event size.
+ lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent
+
+ if payloadTotal > maximumBytesPerPut {
+ t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, payloadTotal)
+ }
+ if payloadTotal < lowestMaxBatch {
+ t.Errorf("Batch to be no less than %d but was %d", lowestMaxBatch, payloadTotal)
}
argument = <-mockClient.putLogEventsArgument
if len(argument.LogEvents) != 1 {
t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents))
}
- message := *argument.LogEvents[0].Message
+ message := *argument.LogEvents[len(argument.LogEvents)-1].Message
if message[len(message)-1:] != "B" {
t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:])
}
diff --git a/daemon/logger/awslogs/cwlogsiface_mock_test.go b/daemon/logger/awslogs/cwlogsiface_mock_test.go
index 82bb34b..d0a2eba 100644
--- a/daemon/logger/awslogs/cwlogsiface_mock_test.go
+++ b/daemon/logger/awslogs/cwlogsiface_mock_test.go
@@ -1,6 +1,10 @@
package awslogs
-import "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+)
type mockcwlogsclient struct {
createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput
@@ -67,7 +71,30 @@
LogGroupName: input.LogGroupName,
LogStreamName: input.LogStreamName,
}
+
+ // Intended mock output
output := <-m.putLogEventsResult
+
+ // Checked enforced limits in mock
+ totalBytes := 0
+ for _, evt := range events {
+ if evt.Message == nil {
+ continue
+ }
+ eventBytes := len([]byte(*evt.Message))
+ if eventBytes > maximumBytesPerEvent {
+ // exceeded per event message size limits
+ return nil, fmt.Errorf("maximum bytes per event exceeded: Event too large %d, max allowed: %d", eventBytes, maximumBytesPerEvent)
+ }
+ // total event bytes including overhead
+ totalBytes += eventBytes + perEventBytes
+ }
+
+ if totalBytes > maximumBytesPerPut {
+ // exceeded per put maximum size limit
+ return nil, fmt.Errorf("maximum bytes per put exceeded: Upload too large %d, max allowed: %d", totalBytes, maximumBytesPerPut)
+ }
+
return output.successResult, output.errorResult
}
diff --git a/daemon/logger/gcplogs/gcplogging_linux.go b/daemon/logger/gcplogs/gcplogging_linux.go
index 8917bdd..41a0936 100644
--- a/daemon/logger/gcplogs/gcplogging_linux.go
+++ b/daemon/logger/gcplogs/gcplogging_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package gcplogs
import (
diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go
deleted file mode 100644
index 0995758..0000000
--- a/daemon/monitor_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/container"
- "github.com/docker/docker/libcontainerd"
-)
-
-// postRunProcessing perfoms any processing needed on the container after it has stopped.
-func (daemon *Daemon) postRunProcessing(_ *container.Container, _ libcontainerd.EventInfo) error {
- return nil
-}
diff --git a/daemon/secrets_linux.go b/daemon/secrets_linux.go
index fca4e12..6ae0117 100644
--- a/daemon/secrets_linux.go
+++ b/daemon/secrets_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
func secretsSupported() bool {
diff --git a/daemon/secrets_windows.go b/daemon/secrets_windows.go
index 9054354..6ae0117 100644
--- a/daemon/secrets_windows.go
+++ b/daemon/secrets_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
func secretsSupported() bool {
diff --git a/daemon/selinux_linux.go b/daemon/selinux_linux.go
index fb2578b..46da7f1 100644
--- a/daemon/selinux_linux.go
+++ b/daemon/selinux_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
import "github.com/opencontainers/selinux/go-selinux"
diff --git a/daemon/stats/collector_windows.go b/daemon/stats/collector_windows.go
index 5fb27ce..03109fd 100644
--- a/daemon/stats/collector_windows.go
+++ b/daemon/stats/collector_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package stats
// platformNewStatsCollector performs platform specific initialisation of the
diff --git a/daemon/update_linux.go b/daemon/update_linux.go
index 41d3b53..966d74e 100644
--- a/daemon/update_linux.go
+++ b/daemon/update_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
import (
diff --git a/daemon/update_solaris.go b/daemon/update_solaris.go
deleted file mode 100644
index f3b545c..0000000
--- a/daemon/update_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/libcontainerd"
-)
-
-func toContainerdResources(resources container.Resources) libcontainerd.Resources {
- var r libcontainerd.Resources
- return r
-}
diff --git a/daemon/update_windows.go b/daemon/update_windows.go
index 4f85f41..e60f63d 100644
--- a/daemon/update_windows.go
+++ b/daemon/update_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
import (
diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go
index 62c9e23..bfb5133 100644
--- a/daemon/volumes_windows.go
+++ b/daemon/volumes_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
import (
diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go
index b4573e1..08ff437 100644
--- a/distribution/pull_v2_windows.go
+++ b/distribution/pull_v2_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package distribution
import (
diff --git a/docs/api/version-history.md b/docs/api/version-history.md
index 2ed2ae5..0fdf464 100644
--- a/docs/api/version-history.md
+++ b/docs/api/version-history.md
@@ -13,6 +13,11 @@
will be rejected.
-->
+## v1.36 API changes
+
+[Docker Engine API v1.36](https://docs.docker.com/engine/api/v1.36/) documentation
+
+
## v1.35 API changes
[Docker Engine API v1.35](https://docs.docker.com/engine/api/v1.35/) documentation
diff --git a/docs/contributing/set-up-dev-env.md b/docs/contributing/set-up-dev-env.md
index 28bea5b..b4cacf5 100644
--- a/docs/contributing/set-up-dev-env.md
+++ b/docs/contributing/set-up-dev-env.md
@@ -129,13 +129,14 @@
```none
Successfully built 3d872560918e
+ Successfully tagged docker-dev:dry-run-test
docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/moby/moby/bundles" -t "docker-dev:dry-run-test" bash
root@f31fa223770f:/go/src/github.com/docker/docker#
```
At this point, your prompt reflects the container's BASH shell.
-5. List the contents of the current directory (`/go/src/github.com/moby/moby`).
+5. List the contents of the current directory (`/go/src/github.com/docker/docker`).
You should see the image's source from the `/go/src/github.com/docker/docker`
directory.
@@ -185,13 +186,65 @@
hack/make.sh binary install-binary run
```
-9. Inside your container, check your Docker version.
+9. Inside your container, check your Docker versions:
```none
- root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version
- Docker version 1.12.0-dev, build 6e728fb
+ # docker version
+ Client:
+ Version: 17.06.0-ce
+ API version: 1.30
+ Go version: go1.8.3
+ Git commit: 02c1d87
+ Built: Fri Jun 23 21:15:15 2017
+ OS/Arch: linux/amd64
+
+ Server:
+ Version: dev
+ API version: 1.35 (minimum version 1.12)
+ Go version: go1.9.2
+ Git commit: 4aa6362da
+ Built: Sat Dec 2 05:22:42 2017
+ OS/Arch: linux/amd64
+ Experimental: false
```
+ Notice the split versions between client and server, which might be
+ unexpected. In more recent times the Docker CLI component (which provides the
+ `docker` command) has split out from the Moby project and is now maintained in:
+
+ * [docker/cli](https://github.com/docker/cli) - The Docker CLI source-code;
+ * [docker/docker-ce](https://github.com/docker/docker-ce) - The Docker CE
+ edition project, which assembles engine, CLI and other components.
+
+ The Moby project now defaults to a [fixed
+ version](https://github.com/docker/docker-ce/commits/v17.06.0-ce) of the
+ `docker` CLI for integration tests.
+
+ You may have noticed the following message when starting the container with the `shell` command:
+
+ ```none
+ Makefile:123: The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:
+ DOCKER_CLI_PATH=/host/path/to/cli/binary make shell
+ then change the cli and compile into a binary at the same location.
+ ```
+
+ By setting `DOCKER_CLI_PATH` you can supply a newer `docker` CLI to the
+ server development container for testing and for `integration-cli`
+ test-execution:
+
+ ```none
+ make DOCKER_CLI_PATH=/home/ubuntu/git/docker-ce/components/packaging/static/build/linux/docker/docker BIND_DIR=. shell
+ ...
+ # which docker
+ /usr/local/cli/docker
+ # docker --version
+ Docker version 17.09.0-dev, build
+ ```
+
+ This Docker CLI should be built from the [docker-ce
+ project](https://github.com/docker/docker-ce) and needs to be a Linux
+ binary.
+
Inside the container you are running a development version. This is the version
on the current branch. It reflects the value of the `VERSION` file at the
root of your `docker-fork` repository.
diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits
index abe8bfe..3b52082 100644
--- a/hack/dockerfile/binaries-commits
+++ b/hack/dockerfile/binaries-commits
@@ -4,7 +4,7 @@
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
-CONTAINERD_COMMIT=v1.0.0
+CONTAINERD_COMMIT=89623f28b87a6004d4b785663257362d1658a729 # v1.0.0
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go
index 60ca4b9..e7d77f0 100644
--- a/integration-cli/docker_api_info_test.go
+++ b/integration-cli/docker_api_info_test.go
@@ -1,13 +1,10 @@
package main
import (
- "encoding/json"
"net/http"
"fmt"
- "github.com/docker/docker/api/types"
-
"github.com/docker/docker/client"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/request"
@@ -48,25 +45,6 @@
}
}
-// TestInfoAPIRuncCommit tests that dockerd is able to obtain RunC version
-// information, and that the version matches the expected version
-func (s *DockerSuite) TestInfoAPIRuncCommit(c *check.C) {
- testRequires(c, DaemonIsLinux) // Windows does not have RunC version information
-
- res, body, err := request.Get("/v1.30/info")
- c.Assert(res.StatusCode, checker.Equals, http.StatusOK)
- c.Assert(err, checker.IsNil)
-
- b, err := request.ReadBody(body)
- c.Assert(err, checker.IsNil)
-
- var i types.Info
-
- c.Assert(json.Unmarshal(b, &i), checker.IsNil)
- c.Assert(i.RuncCommit.ID, checker.Not(checker.Equals), "N/A")
- c.Assert(i.RuncCommit.ID, checker.Equals, i.RuncCommit.Expected)
-}
-
func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go
index 0672e32..89c2865 100644
--- a/integration-cli/docker_api_logs_test.go
+++ b/integration-cli/docker_api_logs_test.go
@@ -151,7 +151,7 @@
func (s *DockerSuite) TestLogsAPIUntil(c *check.C) {
name := "logsuntil"
- dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 0.5; done")
+ dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 1; done")
client, err := request.NewClient()
if err != nil {
@@ -172,6 +172,8 @@
// Get timestamp of second log line
allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true})
+ c.Assert(len(allLogs), checker.GreaterOrEqualThan, 3)
+
t, err := time.Parse(time.RFC3339Nano, strings.Split(allLogs[1], " ")[0])
c.Assert(err, checker.IsNil)
until := t.Format(time.RFC3339Nano)
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 89e62c1..e60f4d5 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -4860,7 +4860,7 @@
}
}
-func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageArg(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `FROM busybox
ARG foo=abc
@@ -4884,7 +4884,7 @@
c.Assert(result.Stdout(), checker.Contains, "bar=def")
}
-func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageGlobalArg(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `ARG tag=nosuchtag
FROM busybox:${tag}
@@ -4909,7 +4909,7 @@
c.Assert(result.Stdout(), checker.Contains, "tag=latest")
}
-func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageUnusedArg(c *check.C) {
imgName := "multifromunusedarg"
dockerfile := `FROM busybox
ARG foo
@@ -5727,7 +5727,7 @@
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
-func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCache(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
@@ -5888,7 +5888,7 @@
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n")
}
-func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCopyFromSyntax(c *check.C) {
dockerfile := `
FROM busybox AS first
COPY foo bar
@@ -5946,7 +5946,7 @@
cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"})
}
-func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCopyFromErrors(c *check.C) {
testCases := []struct {
dockerfile string
expectedError string
@@ -5993,7 +5993,7 @@
}
}
-func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageMultipleBuilds(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar`
@@ -6026,7 +6026,7 @@
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
-func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageImplicitFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY --from=busybox /etc/passwd /mypasswd
@@ -6053,7 +6053,7 @@
}
}
-func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) {
+func (s *DockerRegistrySuite) TestBuildMultiStageImplicitPull(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL)
dockerfile := `
@@ -6083,7 +6083,7 @@
cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"})
}
-func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageNameVariants(c *check.C) {
dockerfile := `
FROM busybox as foo
COPY foo /
@@ -6094,7 +6094,7 @@
FROM foo
COPY --from=foo1 foo f1
COPY --from=FOo2 foo f2
- ` // foo2 case also tests that names are canse insensitive
+ ` // foo2 case also tests that names are case insensitive
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
@@ -6108,7 +6108,7 @@
cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"})
}
-func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) {
+func (s *DockerTrustSuite) TestBuildMultiStageTrusted(c *check.C) {
img1 := s.setupTrustedImage(c, "trusted-build1")
img2 := s.setupTrustedImage(c, "trusted-build2")
dockerFile := fmt.Sprintf(`
@@ -6130,7 +6130,7 @@
dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"})
}
-func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageMultipleBuildsWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
@@ -6218,7 +6218,7 @@
}
// #33176
-func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) {
+func (s *DockerSuite) TestBuildMulitStageResetScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
index 58a50ce..057c2d6 100644
--- a/integration-cli/docker_cli_commit_test.go
+++ b/integration-cli/docker_cli_commit_test.go
@@ -121,11 +121,19 @@
"test", "test-commit")
imageID = strings.TrimSpace(imageID)
+ // The ordering here is due to `PATH` being overridden from the container's
+ // ENV. On windows, the container doesn't have a `PATH` ENV variable so
+ // the ordering is the same as the cli.
+ expectedEnv := "[PATH=/foo DEBUG=true test=1]"
+ if testEnv.DaemonPlatform() == "windows" {
+ expectedEnv = "[DEBUG=true test=1 PATH=/foo]"
+ }
+
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalized on Windows
expected := map[string]string{
"Config.ExposedPorts": "map[8080/tcp:{}]",
- "Config.Env": "[DEBUG=true test=1 PATH=/foo]",
+ "Config.Env": expectedEnv,
"Config.Labels": "map[foo:bar]",
"Config.Cmd": "[/bin/sh]",
"Config.WorkingDir": prefix + slash + "opt",
diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go
index f5fe0da..8e12314 100644
--- a/integration-cli/docker_cli_create_test.go
+++ b/integration-cli/docker_cli_create_test.go
@@ -268,7 +268,6 @@
dockerCmd(c, "create", imageID)
dockerCmd(c, "create", truncatedImageID)
- dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID))
// Ensure this fails
out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID))
@@ -280,7 +279,10 @@
c.Fatalf(`Expected %q in output; got: %s`, expected, out)
}
- out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID))
+ if i := strings.IndexRune(imageID, ':'); i >= 0 {
+ imageID = imageID[i+1:]
+ }
+ out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", imageID))
if exit == 0 {
c.Fatalf("expected non-zero exit code; received %d", exit)
}
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index 6865b92..fb61626 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -1451,7 +1451,7 @@
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
+ "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
// restart daemon.
d.Restart(c)
@@ -2011,7 +2011,7 @@
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
+ "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
// Give time to containerd to process the command if we don't
// the exit event might be received after we do the inspect
@@ -2106,7 +2106,7 @@
result := icmd.RunCommand(
ctrBinary,
"--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace,
+ "--namespace", moby_daemon.ContainersNamespace,
"tasks", "resume", cid)
result.Assert(t, icmd.Success)
diff --git a/integration-cli/docker_cli_logout_test.go b/integration-cli/docker_cli_logout_test.go
index 5076ceb..e0752f4 100644
--- a/integration-cli/docker_cli_logout_test.go
+++ b/integration-cli/docker_cli_logout_test.go
@@ -13,9 +13,7 @@
)
func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) {
-
- // @TODO TestLogoutWithExternalAuth expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported
- s.d.StartWithBusybox(c, "--disable-legacy-registry=false")
+ s.d.StartWithBusybox(c)
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
@@ -62,7 +60,7 @@
// check I cannot pull anymore
out, err := s.d.Cmd("--config", tmp, "pull", repoName)
c.Assert(err, check.NotNil, check.Commentf(out))
- c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found")
+ c.Assert(out, checker.Contains, "no basic auth credentials")
}
// #23100
diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go
index 613cdb3..0e88b1e 100644
--- a/integration-cli/docker_cli_pull_test.go
+++ b/integration-cli/docker_cli_pull_test.go
@@ -259,18 +259,6 @@
c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected"))
}
-func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) {
- // @TODO TestPullNoCredentialsNotFound expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported
- s.d.StartWithBusybox(c, "--disable-legacy-registry=false")
-
- // we don't care about the actual image, we just want to see image not found
- // because that means v2 call returned 401 and we fell back to v1 which usually
- // gives a 404 (in this case the test registry doesn't handle v1 at all)
- out, err := s.d.Cmd("pull", privateRegistryURL+"/busybox")
- c.Assert(err, check.NotNil, check.Commentf(out))
- c.Assert(out, checker.Contains, "Error: image busybox:latest not found")
-}
-
// Regression test for https://github.com/docker/docker/issues/26429
func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) {
testRequires(c, DaemonIsWindows, Network)
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
index ee94a9b..278e348 100644
--- a/integration-cli/docker_cli_tag_test.go
+++ b/integration-cli/docker_cli_tag_test.go
@@ -1,13 +1,10 @@
package main
import (
- "fmt"
"strings"
"github.com/docker/docker/integration-cli/checker"
- "github.com/docker/docker/integration-cli/cli/build"
"github.com/docker/docker/internal/testutil"
- "github.com/docker/docker/pkg/stringid"
"github.com/go-check/check"
)
@@ -140,29 +137,3 @@
c.Fatal("tagging with image named \"sha256\" should have failed")
}
}
-
-// ensure tags cannot create ambiguity with image ids
-func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) {
- buildImageSuccessfully(c, "notbusybox:latest", build.WithDockerfile(`FROM busybox
- MAINTAINER dockerio`))
- imageID := getIDByName(c, "notbusybox:latest")
- truncatedImageID := stringid.TruncateID(imageID)
- truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID)
-
- id := inspectField(c, truncatedTag, "Id")
-
- // Ensure inspect by image id returns image for image id
- c.Assert(id, checker.Equals, imageID)
- c.Logf("Built image: %s", imageID)
-
- // test setting tag fails
- _, _, err := dockerCmdWithError("tag", "busybox:latest", truncatedTag)
- if err != nil {
- c.Fatalf("Error tagging with an image id: %s", err)
- }
-
- id = inspectField(c, truncatedTag, "Id")
-
- // Ensure id is imageID and not busybox:latest
- c.Assert(id, checker.Not(checker.Equals), imageID)
-}
diff --git a/integration-cli/docker_cli_v2_only_test.go b/integration-cli/docker_cli_v2_only_test.go
index b82cdbd..3757341 100644
--- a/integration-cli/docker_cli_v2_only_test.go
+++ b/integration-cli/docker_cli_v2_only_test.go
@@ -22,7 +22,7 @@
return f.Name(), nil
}
-// TestV2Only ensures that a daemon by default does not
+// TestV2Only ensures that a daemon does not
// attempt to contact any v1 registry endpoints.
func (s *DockerRegistrySuite) TestV2Only(c *check.C) {
reg, err := registry.NewMock(c)
@@ -56,65 +56,3 @@
s.d.Cmd("push", repoName)
s.d.Cmd("pull", repoName)
}
-
-// TestV1 starts a daemon with legacy registries enabled
-// and ensure v1 endpoints are hit for the following operations:
-// login, push, pull, build & run
-func (s *DockerRegistrySuite) TestV1(c *check.C) {
- reg, err := registry.NewMock(c)
- defer reg.Close()
- c.Assert(err, check.IsNil)
-
- v2Pings := 0
- reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) {
- v2Pings++
- // V2 ping 404 causes fallback to v1
- w.WriteHeader(404)
- })
-
- v1Pings := 0
- reg.RegisterHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) {
- v1Pings++
- })
-
- v1Logins := 0
- reg.RegisterHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) {
- v1Logins++
- })
-
- v1Repo := 0
- reg.RegisterHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) {
- v1Repo++
- })
-
- reg.RegisterHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) {
- v1Repo++
- })
-
- s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=false")
-
- tmp, err := ioutil.TempDir("", "integration-cli-")
- c.Assert(err, check.IsNil)
- defer os.RemoveAll(tmp)
-
- dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL()))
- c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile"))
-
- s.d.Cmd("build", "--file", dockerfileName, tmp)
- c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build"))
-
- repoName := fmt.Sprintf("%s/busybox", reg.URL())
- s.d.Cmd("run", repoName)
- c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run"))
-
- s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL())
- c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt"))
-
- s.d.Cmd("tag", "busybox", repoName)
- s.d.Cmd("push", repoName)
-
- c.Assert(v1Repo, check.Equals, 2)
-
- s.d.Cmd("pull", repoName)
- c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull"))
-}
diff --git a/integration/container/health_test.go b/integration/container/health_test.go
new file mode 100644
index 0000000..8ed86a8
--- /dev/null
+++ b/integration/container/health_test.go
@@ -0,0 +1,61 @@
+package container
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/gotestyourself/gotestyourself/poll"
+ "github.com/stretchr/testify/require"
+)
+
+// TestHealthCheckWorkdir verifies that health-checks inherit the containers'
+// working-dir.
+func TestHealthCheckWorkdir(t *testing.T) {
+ defer setupTest(t)()
+ ctx := context.Background()
+ client := request.NewAPIClient(t)
+
+ c, err := client.ContainerCreate(ctx,
+ &container.Config{
+ Image: "busybox",
+ Tty: true,
+ WorkingDir: "/foo",
+ Cmd: strslice.StrSlice([]string{"top"}),
+ Healthcheck: &container.HealthConfig{
+ Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"},
+ Interval: 50 * time.Millisecond,
+ Retries: 3,
+ },
+ },
+ &container.HostConfig{},
+ &network.NetworkingConfig{},
+ "healthtest",
+ )
+ require.NoError(t, err)
+ err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{})
+ require.NoError(t, err)
+
+ poll.WaitOn(t, pollForHealthStatus(ctx, client, c.ID, types.Healthy), poll.WithDelay(100*time.Millisecond))
+}
+
+func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus string) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ inspect, err := client.ContainerInspect(ctx, containerID)
+
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case inspect.State.Health.Status == healthStatus:
+ return poll.Success()
+ default:
+ return poll.Continue("waiting for container to become %s", healthStatus)
+ }
+ }
+}
diff --git a/integration/container/restart_test.go b/integration/container/restart_test.go
new file mode 100644
index 0000000..fe80f09
--- /dev/null
+++ b/integration/container/restart_test.go
@@ -0,0 +1,112 @@
+package container
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/integration-cli/daemon"
+)
+
+func TestDaemonRestartKillContainers(t *testing.T) {
+ type testCase struct {
+ desc string
+ config *container.Config
+ hostConfig *container.HostConfig
+
+ xRunning bool
+ xRunningLiveRestore bool
+ }
+
+ for _, c := range []testCase{
+ {
+ desc: "container without restart policy",
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
+ xRunningLiveRestore: true,
+ },
+ {
+ desc: "container with restart=always",
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
+ hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
+ xRunning: true,
+ xRunningLiveRestore: true,
+ },
+ } {
+ for _, liveRestoreEnabled := range []bool{false, true} {
+ for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){
+ "kill-daemon": func(t *testing.T, d *daemon.Daemon) {
+ if err := d.Kill(); err != nil {
+ t.Fatal(err)
+ }
+ },
+ "stop-daemon": func(t *testing.T, d *daemon.Daemon) {
+ d.Stop(t)
+ },
+ } {
+ t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) {
+ c := c
+ liveRestoreEnabled := liveRestoreEnabled
+ stopDaemon := stopDaemon
+
+ t.Parallel()
+
+ d := daemon.New(t, "", "dockerd", daemon.Config{})
+ client, err := d.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var args []string
+ if liveRestoreEnabled {
+ args = []string{"--live-restore"}
+ }
+
+ d.StartWithBusybox(t, args...)
+ defer d.Stop(t)
+ ctx := context.Background()
+
+ resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
+
+ if err := client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+ t.Fatal(err)
+ }
+
+ stopDaemon(t, d)
+ d.Start(t, args...)
+
+ expected := c.xRunning
+ if liveRestoreEnabled {
+ expected = c.xRunningLiveRestore
+ }
+
+ var running bool
+ for i := 0; i < 30; i++ {
+ inspect, err := client.ContainerInspect(ctx, resp.ID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ running = inspect.State.Running
+ if running == expected {
+ break
+ }
+ time.Sleep(2 * time.Second)
+
+ }
+
+ if running != expected {
+ t.Fatalf("got unexpected running state, expected %v, got: %v", expected, running)
+ }
+ // TODO(cpuguy83): test pause states... this seems to be rather undefined currently
+ })
+ }
+ }
+ }
+}
diff --git a/integration/image/commit_test.go b/integration/image/commit_test.go
new file mode 100644
index 0000000..13edbe1
--- /dev/null
+++ b/integration/image/commit_test.go
@@ -0,0 +1,47 @@
+package image
+
+import (
+ "context"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCommitInheritsEnv(t *testing.T) {
+ defer setupTest(t)()
+ client := request.NewAPIClient(t)
+ ctx := context.Background()
+
+ createResp1, err := client.ContainerCreate(ctx, &container.Config{Image: "busybox"}, nil, nil, "")
+ require.NoError(t, err)
+
+ commitResp1, err := client.ContainerCommit(ctx, createResp1.ID, types.ContainerCommitOptions{
+ Changes: []string{"ENV PATH=/bin"},
+ Reference: "test-commit-image",
+ })
+ require.NoError(t, err)
+
+ image1, _, err := client.ImageInspectWithRaw(ctx, commitResp1.ID)
+ require.NoError(t, err)
+
+ expectedEnv1 := []string{"PATH=/bin"}
+ assert.Equal(t, expectedEnv1, image1.Config.Env)
+
+ createResp2, err := client.ContainerCreate(ctx, &container.Config{Image: image1.ID}, nil, nil, "")
+ require.NoError(t, err)
+
+ commitResp2, err := client.ContainerCommit(ctx, createResp2.ID, types.ContainerCommitOptions{
+ Changes: []string{"ENV PATH=/usr/bin:$PATH"},
+ Reference: "test-commit-image",
+ })
+ require.NoError(t, err)
+
+ image2, _, err := client.ImageInspectWithRaw(ctx, commitResp2.ID)
+ require.NoError(t, err)
+ expectedEnv2 := []string{"PATH=/usr/bin:/bin"}
+ assert.Equal(t, expectedEnv2, image2.Config.Env)
+}
diff --git a/integration/system/info_linux_test.go b/integration/system/info_linux_test.go
new file mode 100644
index 0000000..bea14ea
--- /dev/null
+++ b/integration/system/info_linux_test.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+package system
+
+import (
+ "testing"
+
+ "github.com/docker/docker/integration/util/request"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/net/context"
+)
+
+func TestInfo_BinaryCommits(t *testing.T) {
+ client := request.NewAPIClient(t)
+
+ info, err := client.Info(context.Background())
+ require.NoError(t, err)
+
+ assert.NotNil(t, info.ContainerdCommit)
+ assert.NotEqual(t, "N/A", info.ContainerdCommit.ID)
+ assert.Equal(t, testEnv.DaemonInfo.ContainerdCommit.Expected, info.ContainerdCommit.Expected)
+ assert.Equal(t, info.ContainerdCommit.Expected, info.ContainerdCommit.ID)
+
+ assert.NotNil(t, info.InitCommit)
+ assert.NotEqual(t, "N/A", info.InitCommit.ID)
+ assert.Equal(t, testEnv.DaemonInfo.InitCommit.Expected, info.InitCommit.Expected)
+ assert.Equal(t, info.InitCommit.Expected, info.InitCommit.ID)
+
+ assert.NotNil(t, info.RuncCommit)
+ assert.NotEqual(t, "N/A", info.RuncCommit.ID)
+ assert.Equal(t, testEnv.DaemonInfo.RuncCommit.Expected, info.RuncCommit.Expected)
+ assert.Equal(t, info.RuncCommit.Expected, info.RuncCommit.ID)
+}
diff --git a/libcontainerd/client_daemon.go b/libcontainerd/client_daemon.go
index 0a3502c..a9f7c11 100644
--- a/libcontainerd/client_daemon.go
+++ b/libcontainerd/client_daemon.go
@@ -27,6 +27,7 @@
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/linux/runctypes"
"github.com/containerd/typeurl"
@@ -42,7 +43,7 @@
const InitProcessName = "init"
type container struct {
- sync.Mutex
+ mu sync.Mutex
bundleDir string
ctr containerd.Container
@@ -51,6 +52,54 @@
oomKilled bool
}
+func (c *container) setTask(t containerd.Task) {
+ c.mu.Lock()
+ c.task = t
+ c.mu.Unlock()
+}
+
+func (c *container) getTask() containerd.Task {
+ c.mu.Lock()
+ t := c.task
+ c.mu.Unlock()
+ return t
+}
+
+func (c *container) addProcess(id string, p containerd.Process) {
+ c.mu.Lock()
+ if c.execs == nil {
+ c.execs = make(map[string]containerd.Process)
+ }
+ c.execs[id] = p
+ c.mu.Unlock()
+}
+
+func (c *container) deleteProcess(id string) {
+ c.mu.Lock()
+ delete(c.execs, id)
+ c.mu.Unlock()
+}
+
+func (c *container) getProcess(id string) containerd.Process {
+ c.mu.Lock()
+ p := c.execs[id]
+ c.mu.Unlock()
+ return p
+}
+
+func (c *container) setOOMKilled(killed bool) {
+ c.mu.Lock()
+ c.oomKilled = killed
+ c.mu.Unlock()
+}
+
+func (c *container) getOOMKilled() bool {
+ c.mu.Lock()
+ killed := c.oomKilled
+ c.mu.Unlock()
+ return killed
+}
+
type client struct {
sync.RWMutex // protects containers map
@@ -160,10 +209,10 @@
// Start create and start a task for the specified containerd id
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) {
ctr := c.getContainer(id)
- switch {
- case ctr == nil:
+ if ctr == nil {
return -1, errors.WithStack(newNotFoundError("no such container"))
- case ctr.task != nil:
+ }
+ if t := ctr.getTask(); t != nil {
return -1, errors.WithStack(newConflictError("container already started"))
}
@@ -227,9 +276,7 @@
return -1, err
}
- c.Lock()
- c.containers[id].task = t
- c.Unlock()
+ ctr.setTask(t)
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
@@ -239,9 +286,7 @@
c.logger.WithError(err).WithField("container", id).
Error("failed to delete task after fail start")
}
- c.Lock()
- c.containers[id].task = nil
- c.Unlock()
+ ctr.setTask(nil)
return -1, err
}
@@ -250,12 +295,15 @@
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
ctr := c.getContainer(containerID)
- switch {
- case ctr == nil:
+ if ctr == nil {
return -1, errors.WithStack(newNotFoundError("no such container"))
- case ctr.task == nil:
+ }
+ t := ctr.getTask()
+ if t == nil {
return -1, errors.WithStack(newInvalidParameterError("container is not running"))
- case ctr.execs != nil && ctr.execs[processID] != nil:
+ }
+
+ if p := ctr.getProcess(processID); p != nil {
return -1, errors.WithStack(newConflictError("id already in use"))
}
@@ -278,7 +326,7 @@
}
}()
- p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
+ p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
return rio, err
})
@@ -291,21 +339,14 @@
return -1, err
}
- ctr.Lock()
- if ctr.execs == nil {
- ctr.execs = make(map[string]containerd.Process)
- }
- ctr.execs[processID] = p
- ctr.Unlock()
+ ctr.addProcess(processID, p)
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
if err = p.Start(ctx); err != nil {
p.Delete(context.Background())
- ctr.Lock()
- delete(ctr.execs, processID)
- ctr.Unlock()
+ ctr.deleteProcess(processID)
return -1, err
}
@@ -317,7 +358,7 @@
if err != nil {
return err
}
- return p.Kill(ctx, syscall.Signal(signal))
+ return wrapError(p.Kill(ctx, syscall.Signal(signal)))
}
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
@@ -431,12 +472,9 @@
return 255, time.Now(), nil
}
- c.Lock()
- if ctr, ok := c.containers[containerID]; ok {
- ctr.task = nil
+ if ctr := c.getContainer(containerID); ctr != nil {
+ ctr.setTask(nil)
}
- c.Unlock()
-
return status.ExitCode(), status.ExitTime(), nil
}
@@ -470,7 +508,12 @@
return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
}
- s, err := ctr.task.Status(ctx)
+ t := ctr.getTask()
+ if t == nil {
+ return StatusUnknown, errors.WithStack(newNotFoundError("no such task"))
+ }
+
+ s, err := t.Status(ctx)
if err != nil {
return StatusUnknown, err
}
@@ -546,26 +589,22 @@
func (c *client) getProcess(containerID, processID string) (containerd.Process, error) {
ctr := c.getContainer(containerID)
- switch {
- case ctr == nil:
+ if ctr == nil {
return nil, errors.WithStack(newNotFoundError("no such container"))
- case ctr.task == nil:
- return nil, errors.WithStack(newNotFoundError("container is not running"))
- case processID == InitProcessName:
- return ctr.task, nil
- default:
- ctr.Lock()
- defer ctr.Unlock()
- if ctr.execs == nil {
- return nil, errors.WithStack(newNotFoundError("no execs"))
- }
}
- p := ctr.execs[processID]
+ t := ctr.getTask()
+ if t == nil {
+ return nil, errors.WithStack(newNotFoundError("container is not running"))
+ }
+ if processID == InitProcessName {
+ return t, nil
+ }
+
+ p := ctr.getProcess(processID)
if p == nil {
return nil, errors.WithStack(newNotFoundError("no such exec"))
}
-
return p, nil
}
@@ -623,12 +662,7 @@
}
if et == EventExit && ei.ProcessID != ei.ContainerID {
- var p containerd.Process
- ctr.Lock()
- if ctr.execs != nil {
- p = ctr.execs[ei.ProcessID]
- }
- ctr.Unlock()
+ p := ctr.getProcess(ei.ProcessID)
if p == nil {
c.logger.WithError(errors.New("no such process")).
WithFields(logrus.Fields{
@@ -644,9 +678,8 @@
"process": ei.ProcessID,
}).Warn("failed to delete process")
}
- c.Lock()
- delete(ctr.execs, ei.ProcessID)
- c.Unlock()
+ ctr.deleteProcess(ei.ProcessID)
+
ctr := c.getContainer(ei.ContainerID)
if ctr == nil {
c.logger.WithFields(logrus.Fields{
@@ -783,10 +816,10 @@
}
if oomKilled {
- ctr.oomKilled = true
+ ctr.setOOMKilled(true)
oomKilled = false
}
- ei.OOMKilled = ctr.oomKilled
+ ei.OOMKilled = ctr.getOOMKilled()
c.processEvent(ctr, et, ei)
}
@@ -816,12 +849,19 @@
}
func wrapError(err error) error {
- if err != nil {
- msg := err.Error()
- for _, s := range []string{"container does not exist", "not found", "no such container"} {
- if strings.Contains(msg, s) {
- return wrapNotFoundError(err)
- }
+ if err == nil {
+ return nil
+ }
+
+ switch {
+ case errdefs.IsNotFound(err):
+ return wrapNotFoundError(err)
+ }
+
+ msg := err.Error()
+ for _, s := range []string{"container does not exist", "not found", "no such container"} {
+ if strings.Contains(msg, s) {
+ return wrapNotFoundError(err)
}
}
return err
diff --git a/libcontainerd/remote_daemon.go b/libcontainerd/remote_daemon.go
index e6fd05f..609bcfb 100644
--- a/libcontainerd/remote_daemon.go
+++ b/libcontainerd/remote_daemon.go
@@ -278,7 +278,7 @@
select {
case <-r.shutdownContext.Done():
- r.logger.Info("stopping healtcheck following graceful shutdown")
+ r.logger.Info("stopping healthcheck following graceful shutdown")
client.Close()
return
default:
diff --git a/opts/hosts_windows.go b/opts/hosts_windows.go
index 7c239e0..684f0e1 100644
--- a/opts/hosts_windows.go
+++ b/opts/hosts_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package opts
// DefaultHost constant defines the default host string used by docker on Windows
diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go
index a22410c..66243a6 100644
--- a/pkg/archive/archive_windows.go
+++ b/pkg/archive/archive_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package archive
import (
diff --git a/pkg/directory/directory_windows.go b/pkg/directory/directory_windows.go
index 6fb0917..efe05ce 100644
--- a/pkg/directory/directory_windows.go
+++ b/pkg/directory/directory_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package directory
import (
diff --git a/pkg/dmesg/dmesg_linux.go b/pkg/dmesg/dmesg_linux.go
index 7df7f3d..2fb494e 100644
--- a/pkg/dmesg/dmesg_linux.go
+++ b/pkg/dmesg/dmesg_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package dmesg
import (
diff --git a/pkg/fsutils/fsutils_linux.go b/pkg/fsutils/fsutils_linux.go
index e6094b5..7596259 100644
--- a/pkg/fsutils/fsutils_linux.go
+++ b/pkg/fsutils/fsutils_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package fsutils
import (
diff --git a/pkg/homedir/homedir_linux.go b/pkg/homedir/homedir_linux.go
index 012fe52..a7cd2e1 100644
--- a/pkg/homedir/homedir_linux.go
+++ b/pkg/homedir/homedir_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package homedir
import (
diff --git a/pkg/idtools/idtools_windows.go b/pkg/idtools/idtools_windows.go
index 94ca33a..ec49177 100644
--- a/pkg/idtools/idtools_windows.go
+++ b/pkg/idtools/idtools_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package idtools
import (
diff --git a/pkg/ioutils/temp_windows.go b/pkg/ioutils/temp_windows.go
index c258e5f..fb14c95 100644
--- a/pkg/ioutils/temp_windows.go
+++ b/pkg/ioutils/temp_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package ioutils
import (
diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go
index be69fee..dde889e 100644
--- a/pkg/mount/mountinfo_linux.go
+++ b/pkg/mount/mountinfo_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package mount
import (
diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go
index 8ceec84..f3c13e5 100644
--- a/pkg/mount/sharedsubtree_linux.go
+++ b/pkg/mount/sharedsubtree_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package mount
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go
index e598672..93620ee 100644
--- a/pkg/parsers/kernel/kernel_windows.go
+++ b/pkg/parsers/kernel/kernel_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package kernel
import (
diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go
index 05319ea..d3f1061 100644
--- a/pkg/reexec/command_linux.go
+++ b/pkg/reexec/command_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package reexec
import (
diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go
index ca871c4..c320876 100644
--- a/pkg/reexec/command_windows.go
+++ b/pkg/reexec/command_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package reexec
import (
diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go
index 440f270..c84a63e 100644
--- a/pkg/signal/signal_windows.go
+++ b/pkg/signal/signal_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package signal
import (
diff --git a/pkg/sysinfo/numcpu_linux.go b/pkg/sysinfo/numcpu_linux.go
index f1d2d9d..5739b33 100644
--- a/pkg/sysinfo/numcpu_linux.go
+++ b/pkg/sysinfo/numcpu_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package sysinfo
import (
diff --git a/pkg/sysinfo/numcpu_windows.go b/pkg/sysinfo/numcpu_windows.go
index 1d89dd5..3516182 100644
--- a/pkg/sysinfo/numcpu_windows.go
+++ b/pkg/sysinfo/numcpu_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package sysinfo
import (
diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go
index 4e6255b..8889318 100644
--- a/pkg/sysinfo/sysinfo_windows.go
+++ b/pkg/sysinfo/sysinfo_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package sysinfo
// New returns an empty SysInfo for windows for now.
diff --git a/pkg/system/chtimes_windows.go b/pkg/system/chtimes_windows.go
index 45428c1..a1f4fd5 100644
--- a/pkg/system/chtimes_windows.go
+++ b/pkg/system/chtimes_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
import (
diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go
index a61b53d..b1e46d9 100644
--- a/pkg/system/filesys_windows.go
+++ b/pkg/system/filesys_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
import (
diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go
index 2e863c0..ba2692a 100644
--- a/pkg/system/mknod_windows.go
+++ b/pkg/system/mknod_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
// Mknod is not implemented on Windows.
diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go
index 13f1de1..71fc0f1 100644
--- a/pkg/system/umask_windows.go
+++ b/pkg/system/umask_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
// Umask is not supported on the windows platform.
diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go
index 5abf5e7..908131eb 100644
--- a/pkg/tarsum/fileinfosums.go
+++ b/pkg/tarsum/fileinfosums.go
@@ -1,6 +1,10 @@
package tarsum
-import "sort"
+import (
+ "runtime"
+ "sort"
+ "strings"
+)
// FileInfoSumInterface provides an interface for accessing file checksum
// information within a tar file. This info is accessed through interface
@@ -35,8 +39,11 @@
// GetFile returns the first FileInfoSumInterface with a matching name.
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
+ // We do case insensitive matching on Windows as c:\APP and c:\app are
+ // the same. See issue #33107.
for i := range fis {
- if fis[i].Name() == name {
+ if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) ||
+ (runtime.GOOS != "windows" && fis[i].Name() == name) {
return fis[i]
}
}
diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go
index b6819b3..284ac63 100644
--- a/pkg/term/term_windows.go
+++ b/pkg/term/term_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package term
import (
diff --git a/plugin/backend_linux.go b/plugin/backend_linux.go
index 28a6c18..1f2830a 100644
--- a/plugin/backend_linux.go
+++ b/plugin/backend_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package plugin
import (
diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go
index 9839467..5343b85 100644
--- a/plugin/executor/containerd/containerd.go
+++ b/plugin/executor/containerd/containerd.go
@@ -16,7 +16,7 @@
)
// PluginNamespace is the name used for the plugins namespace
-var PluginNamespace = "moby-plugins"
+var PluginNamespace = "plugins.moby"
// ExitHandler represents an object that is called when the exit event is received from containerd
type ExitHandler interface {
diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go
index eff21e1..59066c1 100644
--- a/plugin/manager_linux.go
+++ b/plugin/manager_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package plugin
import (
diff --git a/plugin/manager_solaris.go b/plugin/manager_solaris.go
deleted file mode 100644
index ac03d6e..0000000
--- a/plugin/manager_solaris.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package plugin
-
-import (
- "fmt"
-
- "github.com/docker/docker/plugin/v2"
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
- return fmt.Errorf("Not implemented")
-}
-
-func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) {
- return nil, fmt.Errorf("Not implemented")
-}
-
-func (pm *Manager) disable(p *v2.Plugin, c *controller) error {
- return fmt.Errorf("Not implemented")
-}
-
-func (pm *Manager) restore(p *v2.Plugin) error {
- return fmt.Errorf("Not implemented")
-}
-
-// Shutdown plugins
-func (pm *Manager) Shutdown() {
-}
-
-func setupRoot(root string) error { return nil }
diff --git a/plugin/manager_windows.go b/plugin/manager_windows.go
index 56a7ee3..ac03d6e 100644
--- a/plugin/manager_windows.go
+++ b/plugin/manager_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package plugin
import (
diff --git a/plugin/v2/plugin_linux.go b/plugin/v2/plugin_linux.go
index be82363..9590df4 100644
--- a/plugin/v2/plugin_linux.go
+++ b/plugin/v2/plugin_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package v2
import (
diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md
index 6ae7baf..4b52989 100644
--- a/project/GOVERNANCE.md
+++ b/project/GOVERNANCE.md
@@ -1,17 +1,120 @@
-# Docker Governance Advisory Board Meetings
+# Moby project governance
-In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public.
-All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership.
+Moby projects are governed by the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc).
+See the Moby TSC [charter](https://github.com/moby/tsc/blob/master/README.md) for
+further information on the role of the TSC and procedures for escalation
+of technical issues or concerns.
-The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at
-[Google Docs Folder](https://goo.gl/Alfj8r)
+Contact [any Moby TSC member](https://github.com/moby/tsc/blob/master/MEMBERS.md) with your questions/concerns about the governance or a specific technical
+issue that you feel requires escalation.
-These include:
+## Project maintainers
-* First Meeting Notes
-* DGAB Charter
-* Presentation 1: Introductory Presentation, including State of The Project
-* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal
-* Presentation 3: Long Term Roadmap/Statement of Direction
-
+The current maintainers of the moby/moby repository are listed in the
+[MAINTAINERS](/MAINTAINERS) file.
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+ 1. They share responsibility in the project's success.
+ 2. They have made a long-term, recurring time investment to improve the project.
+ 3. They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is less visible.
+It's easy to recognize a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+
+### Adding maintainers
+
+Maintainers are first and foremost contributors who have shown their
+commitment to the long term success of a project. Contributors who want to
+become maintainers first demonstrate commitment to the project by contributing
+code, reviewing others' work, and triaging issues on a regular basis for at
+least three months.
+
+The contributions alone don't make you a maintainer. You need to earn the
+trust of the current maintainers and other project contributors, that your
+decisions and actions are in the best interest of the project.
+
+Periodically, the existing maintainers curate a list of contributors who have
+shown regular activity on the project over the prior months. From this
+list, maintainer candidates are selected and proposed on the maintainers
+mailing list.
+
+After a candidate is announced on the maintainers mailing list, the
+existing maintainers discuss the candidate over the next 5 business days,
+provide feedback, and vote. At least 66% of the current maintainers must
+vote in the affirmative.
+
+If a candidate is approved, a maintainer contacts the candidate to
+invite them to open a pull request that adds the contributor to
+the MAINTAINERS file. The candidate becomes a maintainer once the pull
+request is merged.
+
+### Removing maintainers
+
+Maintainers can be removed from the project, either at their own request
+or due to [project inactivity](#inactive-maintainer-policy).
+
+#### How to step down
+
+Life priorities, interests, and passions can change. If you're a maintainer but
+feel you must remove yourself from the list, inform other maintainers that you
+intend to step down, and if possible, help find someone to pick up your work.
+At the very least, ensure your work can be continued where you left off.
+
+After you've informed other maintainers, create a pull request to remove
+yourself from the MAINTAINERS file.
+
+#### Inactive maintainer policy
+
+An existing maintainer can be removed if they do not show significant activity
+on the project. Periodically, the maintainers review the list of maintainers
+and their activity over the last three months.
+
+If a maintainer has shown insufficient activity over this period, a project
+representative will contact the maintainer to ask if they want to continue
+being a maintainer. If the maintainer decides to step down as a maintainer,
+they open a pull request to be removed from the MAINTAINERS file.
+
+If the maintainer wants to continue in this role, but is unable to perform the
+required duties, they can be removed with a vote by at least 66% of the current
+maintainers. The maintainer under discussion will not be allowed to vote. An
+e-mail is sent to the mailing list, inviting maintainers of the project to
+vote. The voting period is five business days. Issues related to a maintainer's
+performance should be discussed with them among the other maintainers so that
+they are not surprised by a pull request removing them. This discussion should
+be handled objectively with no ad hominem attacks.
+
+## Project decision making
+
+Short answer: **Everything is a pull request**.
+
+The Moby core engine project is an open-source project with an open design
+philosophy. This means that the repository is the source of truth for **every**
+aspect of the project, including its philosophy, design, road map, and APIs.
+*If it's part of the project, it's in the repo. If it's in the repo, it's part
+of the project.*
+
+As a result, each decision can be expressed as a change to the repository. An
+implementation change is expressed as a change to the source code. An API
+change is a change to the API specification. A philosophy change is a change
+to the philosophy manifesto, and so on.
+
+All decisions affecting the moby/moby repository, both big and small, follow
+the same steps:
+
+ * **Step 1**: Open a pull request. Anyone can do this.
+
+ * **Step 2**: Discuss the pull request. Anyone can do this.
+
+ * **Step 3**: Maintainers merge, close or reject the pull request.
+
+Pull requests are reviewed by the current maintainers of the moby/moby
+repository. Weekly meetings are organized to are organized to synchronously
+discuss tricky PRs, as well as design and architecture decisions.. When
+technical agreement cannot be reached among the maintainers of the project,
+escalation or concerns can be raised by opening an issue to be handled
+by the [Moby Technical Steering Committee](https://github.com/moby/tsc).
diff --git a/vendor.conf b/vendor.conf
index 9ff5290..2163706 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -77,7 +77,7 @@
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
# gelf logging driver deps
-github.com/Graylog2/go-gelf v2
+github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841
github.com/fluent/fluent-logger-golang v1.3.0
# fluent-logger-golang deps
@@ -103,7 +103,7 @@
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
-github.com/containerd/containerd v1.0.0
+github.com/containerd/containerd 89623f28b87a6004d4b785663257362d1658a729 # v1.0.0
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
@@ -114,7 +114,7 @@
github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
# cluster
-github.com/docker/swarmkit 4429c763170d9ca96929249353c3270c19e7d39e
+github.com/docker/swarmkit 713d79dc8799b33465c58ed120b870c52eb5eb4f
github.com/gogo/protobuf v0.4
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go b/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
index 8f22c9a..74255ec 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
@@ -5,6 +5,7 @@
"encoding/json"
"fmt"
"net"
+ "time"
)
type TCPReader struct {
@@ -13,16 +14,21 @@
messages chan []byte
}
-func newTCPReader(addr string) (*TCPReader, chan string, error) {
+type connChannels struct {
+ drop chan string
+ confirm chan string
+}
+
+func newTCPReader(addr string) (*TCPReader, chan string, chan string, error) {
var err error
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
- return nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
+ return nil, nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
}
listener, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
- return nil, nil, fmt.Errorf("ListenTCP: %s", err)
+ return nil, nil, nil, fmt.Errorf("ListenTCP: %s", err)
}
r := &TCPReader{
@@ -30,26 +36,61 @@
messages: make(chan []byte, 100), // Make a buffered channel with at most 100 messages
}
- signal := make(chan string, 1)
+ closeSignal := make(chan string, 1)
+ doneSignal := make(chan string, 1)
- go r.listenUntilCloseSignal(signal)
+ go r.listenUntilCloseSignal(closeSignal, doneSignal)
- return r, signal, nil
+ return r, closeSignal, doneSignal, nil
}
-func (r *TCPReader) listenUntilCloseSignal(signal chan string) {
- defer func() { signal <- "done" }()
- defer r.listener.Close()
+func (r *TCPReader) accepter(connections chan net.Conn) {
for {
conn, err := r.listener.Accept()
if err != nil {
break
}
- go handleConnection(conn, r.messages)
+ connections <- conn
+ }
+}
+
+func (r *TCPReader) listenUntilCloseSignal(closeSignal chan string, doneSignal chan string) {
+ defer func() { doneSignal <- "done" }()
+ defer r.listener.Close()
+ var conns []connChannels
+ connectionsChannel := make(chan net.Conn, 1)
+ go r.accepter(connectionsChannel)
+ for {
select {
- case sig := <-signal:
- if sig == "stop" {
- break
+ case conn := <-connectionsChannel:
+ dropSignal := make(chan string, 1)
+ dropConfirm := make(chan string, 1)
+ channels := connChannels{drop: dropSignal, confirm: dropConfirm}
+ go handleConnection(conn, r.messages, dropSignal, dropConfirm)
+ conns = append(conns, channels)
+ default:
+ }
+
+ select {
+ case sig := <-closeSignal:
+ if sig == "stop" || sig == "drop" {
+ if len(conns) >= 1 {
+ for _, s := range conns {
+ if s.drop != nil {
+ s.drop <- "drop"
+ <-s.confirm
+ conns = append(conns[:0], conns[1:]...)
+ }
+ }
+ if sig == "stop" {
+ return
+ }
+ } else if sig == "stop" {
+ closeSignal <- "stop"
+ }
+ if sig == "drop" {
+ doneSignal <- "done"
+ }
}
default:
}
@@ -60,19 +101,41 @@
return r.listener.Addr().String()
}
-func handleConnection(conn net.Conn, messages chan<- []byte) {
+func handleConnection(conn net.Conn, messages chan<- []byte, dropSignal chan string, dropConfirm chan string) {
+ defer func() { dropConfirm <- "done" }()
defer conn.Close()
reader := bufio.NewReader(conn)
var b []byte
var err error
+ drop := false
+ canDrop := false
for {
+ conn.SetDeadline(time.Now().Add(2 * time.Second))
if b, err = reader.ReadBytes(0); err != nil {
- continue
- }
- if len(b) > 0 {
+ if drop {
+ return
+ }
+ } else if len(b) > 0 {
messages <- b
+ canDrop = true
+ if drop {
+ return
+ }
+ } else if drop {
+ return
+ }
+ select {
+ case sig := <-dropSignal:
+ if sig == "drop" {
+ drop = true
+ time.Sleep(1 * time.Second)
+ if canDrop {
+ return
+ }
+ }
+ default:
}
}
}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go b/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
index ab95cbc..da1390d 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
@@ -75,12 +75,17 @@
func (w *TCPWriter) writeToSocketWithReconnectAttempts(zBytes []byte) (n int, err error) {
var errConn error
+ var i int
w.mu.Lock()
- for i := 0; n <= w.MaxReconnect; i++ {
+ for i = 0; i <= w.MaxReconnect; i++ {
errConn = nil
- n, err = w.conn.Write(zBytes)
+ if w.conn != nil {
+ n, err = w.conn.Write(zBytes)
+ } else {
+ err = fmt.Errorf("Connection was nil, will attempt reconnect")
+ }
if err != nil {
time.Sleep(w.ReconnectDelay * time.Second)
w.conn, errConn = net.Dial("tcp", w.addr)
@@ -90,6 +95,9 @@
}
w.mu.Unlock()
+ if i > w.MaxReconnect {
+ return 0, fmt.Errorf("Maximum reconnection attempts was reached; giving up")
+ }
if errConn != nil {
return 0, fmt.Errorf("Write Failed: %s\nReconnection failed: %s", err, errConn)
}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
index 93c3692..153be2c 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
@@ -27,5 +27,8 @@
// Close connection and interrupt blocked Read or Write operations
func (w *GelfWriter) Close() error {
+ if w.conn == nil {
+ return nil
+ }
return w.conn.Close()
}
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
index 53f9ffb..b89e72e 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
@@ -404,6 +404,11 @@
vipLoop:
for _, vip := range s.Endpoint.VirtualIPs {
if na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {
+ // This checks the condition when ingress network is needed
+ // but allocation has not been done.
+ if _, ok := na.services[s.ID]; !ok {
+ return false
+ }
continue vipLoop
}
for _, net := range specNetworks {
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
index 19dcbec..7f3f1c1 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
@@ -324,9 +324,18 @@
}
portStates := allocatedPorts{}
+ hostTargetPorts := map[uint32]struct{}{}
for _, portState := range s.Endpoint.Ports {
- if portState.PublishMode == api.PublishModeIngress {
+ switch portState.PublishMode {
+ case api.PublishModeIngress:
portStates.addState(portState)
+ case api.PublishModeHost:
+ // build a map of host mode ports we've seen. if in the spec we get
+ // a host port that's not in the service, then we need to do
+ // allocation. if we get the same target port but something else
+ // has changed, then HostPublishPortsNeedUpdate will cover that
+ // case. see docker/swarmkit#2376
+ hostTargetPorts[portState.TargetPort] = struct{}{}
}
}
@@ -344,18 +353,28 @@
// Iterate portConfigs with PublishedPort == 0 (low priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress
- if portConfig.PublishMode != api.PublishModeIngress {
- continue
- }
- if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
- return false
- }
+ switch portConfig.PublishMode {
+ case api.PublishModeIngress:
+ if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
+ return false
+ }
- // If SwarmPort was not defined by user and the func
- // is called during allocator initialization state then
- // we are not allocated.
- if portConfig.PublishedPort == 0 && onInit {
- return false
+ // If SwarmPort was not defined by user and the func
+ // is called during allocator initialization state then
+ // we are not allocated.
+ if portConfig.PublishedPort == 0 && onInit {
+ return false
+ }
+ case api.PublishModeHost:
+ // check if the target port is already in the port config. if it
+ // isn't, then it's our problem.
+ if _, ok := hostTargetPorts[portConfig.TargetPort]; !ok {
+ return false
+ }
+ // NOTE(dperny) there could be a further case where we check if
+ // there are host ports in the config that aren't in the spec, but
+ // that's only possible if there's a mismatch in the number of
+ // ports, which is handled by a length check earlier in the code
}
}
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go
index bdc25d9..cee9fe1 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go
@@ -12,6 +12,8 @@
func (is slotsByRunningState) Len() int { return len(is) }
func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
+// Less returns true if the first task should be preferred over the second task,
+// all other things being equal in terms of node balance.
func (is slotsByRunningState) Less(i, j int) bool {
iRunning := false
jRunning := false
@@ -29,7 +31,19 @@
}
}
- return iRunning && !jRunning
+ if iRunning && !jRunning {
+ return true
+ }
+
+ if !iRunning && jRunning {
+ return false
+ }
+
+ // Use Slot number as a tie-breaker to prefer to remove tasks in reverse
+ // order of Slot number. This would help us avoid unnecessary master
+ // migration when scaling down a stateful service because the master
+ // task of a stateful service is usually in a low numbered Slot.
+ return is[i][0].Slot < is[j][0].Slot
}
type slotWithIndex struct {
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
index bcef801..d702783 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
@@ -96,10 +96,10 @@
// Serviceless tasks can be cleaned up right away since they are not attached to a service.
tr.cleanup = append(tr.cleanup, t.ID)
}
- // tasks with desired state REMOVE that have progressed beyond SHUTDOWN can be cleaned up
+ // tasks with desired state REMOVE that have progressed beyond COMPLETE can be cleaned up
// right away
for _, t := range removeTasks {
- if t.Status.State >= api.TaskStateShutdown {
+ if t.Status.State >= api.TaskStateCompleted {
tr.cleanup = append(tr.cleanup, t.ID)
}
}
@@ -138,10 +138,10 @@
if t.Status.State >= api.TaskStateOrphaned && t.ServiceID == "" {
tr.cleanup = append(tr.cleanup, t.ID)
}
- // add tasks that have progressed beyond SHUTDOWN and have desired state REMOVE. These
+ // add tasks that have progressed beyond COMPLETE and have desired state REMOVE. These
// tasks are associated with slots that were removed as part of a service scale down
// or service removal.
- if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateShutdown {
+ if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateCompleted {
tr.cleanup = append(tr.cleanup, t.ID)
}
case api.EventUpdateCluster:
@@ -282,6 +282,8 @@
// Stop stops the TaskReaper and waits for the main loop to exit.
func (tr *TaskReaper) Stop() {
+ // TODO(dperny) calling stop on the task reaper twice will cause a panic
+ // because we try to close a channel that will already have been closed.
close(tr.stopChan)
<-tr.doneChan
}
diff --git a/volume/store/store.go b/volume/store/store.go
index fd1ca61..9a511a5 100644
--- a/volume/store/store.go
+++ b/volume/store/store.go
@@ -145,8 +145,9 @@
s.globalLock.Lock()
v, exists := s.names[name]
if exists {
- if _, err := volumedrivers.ReleaseDriver(v.DriverName()); err != nil {
- logrus.Errorf("Error dereferencing volume driver: %v", err)
+ driverName := v.DriverName()
+ if _, err := volumedrivers.ReleaseDriver(driverName); err != nil {
+ logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver")
}
}
if err := s.removeMeta(name); err != nil {