Merge pull request #260 from thaJeztah/19.03_backport_buildkit_systemd_resolvconf
[19.03 backport] build: buildkit now also uses systemd's resolv.conf
diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go
index ff7a5a9..0178091 100644
--- a/api/server/backend/build/backend.go
+++ b/api/server/backend/build/backend.go
@@ -91,7 +91,9 @@
stdout := config.ProgressWriter.StdoutFormatter
fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID))
}
- err = tagger.TagImages(image.ID(imageID))
+ if imageID != "" {
+ err = tagger.TagImages(image.ID(imageID))
+ }
return imageID, err
}
diff --git a/api/swagger.yaml b/api/swagger.yaml
index fb980dd..4fe0791 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -3805,7 +3805,7 @@
description: |
The driver to use for managing cgroups.
type: "string"
- enum: ["cgroupfs", "systemd"]
+ enum: ["cgroupfs", "systemd", "none"]
default: "cgroupfs"
example: "cgroupfs"
NEventsListener:
@@ -4040,7 +4040,7 @@
SecurityOptions:
description: |
List of security features that are enabled on the daemon, such as
- apparmor, seccomp, SELinux, and user-namespaces (userns).
+ apparmor, seccomp, SELinux, user-namespaces (userns), and rootless.
Additional configuration options for each security feature may
be present, and are included as a comma-separated list of key/value
@@ -4053,6 +4053,7 @@
- "name=seccomp,profile=default"
- "name=selinux"
- "name=userns"
+ - "name=rootless"
ProductLicense:
description: |
Reports a summary of the product license on the daemon.
@@ -6222,6 +6223,7 @@
description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
schema:
type: "string"
+ format: "binary"
tags: ["Container"]
/containers/prune:
post:
diff --git a/daemon/archive.go b/daemon/archive.go
index 9c7971b..109376b 100644
--- a/daemon/archive.go
+++ b/daemon/archive.go
@@ -31,18 +31,19 @@
}
// helper functions to extract or archive
-func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
+func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions, root string) error {
if ea, ok := i.(extractor); ok {
return ea.ExtractArchive(src, dst, opts)
}
- return chrootarchive.Untar(src, dst, opts)
+
+ return chrootarchive.UntarWithRoot(src, dst, opts, root)
}
-func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
+func archivePath(i interface{}, src string, opts *archive.TarOptions, root string) (io.ReadCloser, error) {
if ap, ok := i.(archiver); ok {
return ap.ArchivePath(src, opts)
}
- return archive.TarWithOptions(src, opts)
+ return chrootarchive.Tar(src, opts, root)
}
// ContainerCopy performs a deprecated operation of archiving the resource at
@@ -238,7 +239,7 @@
sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
- data, err := archivePath(driver, sourceDir, opts)
+ data, err := archivePath(driver, sourceDir, opts, container.BaseFS.Path())
if err != nil {
return nil, nil, err
}
@@ -367,7 +368,7 @@
}
}
- if err := extractArchive(driver, content, resolvedPath, options); err != nil {
+ if err := extractArchive(driver, content, resolvedPath, options, container.BaseFS.Path()); err != nil {
return err
}
@@ -432,7 +433,7 @@
archive, err := archivePath(driver, basePath, &archive.TarOptions{
Compression: archive.Uncompressed,
IncludeFiles: filter,
- })
+ }, container.BaseFS.Path())
if err != nil {
return nil, err
}
diff --git a/daemon/cluster/nodes.go b/daemon/cluster/nodes.go
index 3c073b0..dffd755 100644
--- a/daemon/cluster/nodes.go
+++ b/daemon/cluster/nodes.go
@@ -8,6 +8,7 @@
"github.com/docker/docker/daemon/cluster/convert"
"github.com/docker/docker/errdefs"
swarmapi "github.com/docker/swarmkit/api"
+ "google.golang.org/grpc"
)
// GetNodes returns a list of all nodes known to a cluster.
@@ -30,7 +31,9 @@
r, err := state.controlClient.ListNodes(
ctx,
- &swarmapi.ListNodesRequest{Filters: filters})
+ &swarmapi.ListNodesRequest{Filters: filters},
+ grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
+ )
if err != nil {
return nil, err
}
diff --git a/daemon/cluster/secrets.go b/daemon/cluster/secrets.go
index c6fd842..6f652eb 100644
--- a/daemon/cluster/secrets.go
+++ b/daemon/cluster/secrets.go
@@ -7,6 +7,7 @@
types "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/convert"
swarmapi "github.com/docker/swarmkit/api"
+ "google.golang.org/grpc"
)
// GetSecret returns a secret from a managed swarm cluster
@@ -44,7 +45,9 @@
defer cancel()
r, err := state.controlClient.ListSecrets(ctx,
- &swarmapi.ListSecretsRequest{Filters: filters})
+ &swarmapi.ListSecretsRequest{Filters: filters},
+ grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse),
+ )
if err != nil {
return nil, err
}
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index 3d0c38b..df64de6 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -73,6 +73,7 @@
// constant for cgroup drivers
cgroupFsDriver = "cgroupfs"
cgroupSystemdDriver = "systemd"
+ cgroupNoneDriver = "none"
// DefaultRuntimeName is the default runtime to be used by
// containerd if none is specified
@@ -575,6 +576,9 @@
}
func (daemon *Daemon) getCgroupDriver() string {
+ if daemon.Rootless() {
+ return cgroupNoneDriver
+ }
cgroupDriver := cgroupFsDriver
if UsingSystemd(daemon.configStore) {
@@ -601,6 +605,9 @@
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
return nil
}
+ if cd == cgroupNoneDriver {
+ return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd)
+ }
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
}
diff --git a/daemon/export.go b/daemon/export.go
index 27bc359..01593f4 100644
--- a/daemon/export.go
+++ b/daemon/export.go
@@ -70,7 +70,7 @@
Compression: archive.Uncompressed,
UIDMaps: daemon.idMapping.UIDs(),
GIDMaps: daemon.idMapping.GIDs(),
- })
+ }, basefs.Path())
if err != nil {
rwlayer.Unmount()
return nil, err
diff --git a/docs/api/version-history.md b/docs/api/version-history.md
index 1c5a4f5..30f2bb4 100644
--- a/docs/api/version-history.md
+++ b/docs/api/version-history.md
@@ -49,6 +49,11 @@
* `GET /info` now returns information about `DataPathPort` that is currently used in swarm
* `GET /info` now returns `PidsLimit` boolean to indicate if the host kernel has
PID limit support enabled.
+* `GET /info` now includes `name=rootless` in `SecurityOptions` when the daemon is running in
+ rootless mode. This change is not versioned, and affects all API versions if the daemon has
+ this patch.
+* `GET /info` now returns `none` as `CgroupDriver` when the daemon is running in rootless mode.
+ This change is not versioned, and affects all API versions if the daemon has this patch.
* `POST /containers/create` now accepts `DeviceRequests` as part of `HostConfig`.
Can be used to set Nvidia GPUs.
* `GET /swarm` endpoint now returns DataPathPort info
diff --git a/docs/rootless.md b/docs/rootless.md
index 9cf6dd7..f8e27a2 100644
--- a/docs/rootless.md
+++ b/docs/rootless.md
@@ -64,6 +64,8 @@
* The exec dir is set to `$XDG_RUNTIME_DIR/docker` by default.
* The daemon config dir is set to `~/.config/docker` (not `~/.docker`, which is used by the client) by default.
* The `dockerd-rootless.sh` script executes `dockerd` in its own user, mount, and network namespaces. You can enter the namespaces by running `nsenter -U --preserve-credentials -n -m -t $(cat $XDG_RUNTIME_DIR/docker.pid)`.
+* `docker info` shows `rootless` in `SecurityOptions`
+* `docker info` shows `none` as `Cgroup Driver`
### Client
diff --git a/hack/ci/windows.ps1 b/hack/ci/windows.ps1
index c2c937f..8f8b919 100644
--- a/hack/ci/windows.ps1
+++ b/hack/ci/windows.ps1
@@ -409,7 +409,7 @@
# Redirect to a temporary location.
$TEMPORIG=$env:TEMP
$env:TEMP="$env:TESTRUN_DRIVE`:\$env:TESTRUN_SUBDIR\CI-$COMMITHASH"
- $env:LOCALAPPDATA="$TEMP\localappdata"
+ $env:LOCALAPPDATA="$env:TEMP\localappdata"
$errorActionPreference='Stop'
New-Item -ItemType Directory "$env:TEMP" -ErrorAction SilentlyContinue | Out-Null
New-Item -ItemType Directory "$env:TEMP\userprofile" -ErrorAction SilentlyContinue | Out-Null
diff --git a/integration/service/update_test.go b/integration/service/update_test.go
index 92a4368..8575e56 100644
--- a/integration/service/update_test.go
+++ b/integration/service/update_test.go
@@ -33,7 +33,7 @@
service.Spec.Labels["foo"] = "bar"
_, err := cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
assert.NilError(t, err)
- poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
service = getService(t, cli, serviceID)
assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
@@ -41,21 +41,21 @@
service.Spec.Labels["foo2"] = "bar"
_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
assert.NilError(t, err)
- poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
service = getService(t, cli, serviceID)
assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar", "foo2": "bar"}))
delete(service.Spec.Labels, "foo2")
_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
assert.NilError(t, err)
- poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
service = getService(t, cli, serviceID)
assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
delete(service.Spec.Labels, "foo")
_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
assert.NilError(t, err)
- poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
service = getService(t, cli, serviceID)
assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{}))
@@ -63,7 +63,7 @@
service.Spec.Labels["foo"] = "bar"
_, err = cli.ServiceUpdate(ctx, serviceID, service.Version, service.Spec, types.ServiceUpdateOptions{})
assert.NilError(t, err)
- poll.WaitOn(t, serviceIsUpdated(cli, serviceID), swarm.ServicePoll)
+ poll.WaitOn(t, serviceSpecIsUpdated(cli, serviceID, service.Version.Index), swarm.ServicePoll)
service = getService(t, cli, serviceID)
assert.Check(t, is.DeepEqual(service.Spec.Labels, map[string]string{"foo": "bar"}))
@@ -271,3 +271,17 @@
}
}
}
+
+func serviceSpecIsUpdated(client client.ServiceAPIClient, serviceID string, serviceOldVersion uint64) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ service, _, err := client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case service.Version.Index > serviceOldVersion:
+ return poll.Success()
+ default:
+ return poll.Continue("waiting for service %s to be updated", serviceID)
+ }
+ }
+}
diff --git a/layer/layer_store.go b/layer/layer_store.go
index 1601465..81730e9 100644
--- a/layer/layer_store.go
+++ b/layer/layer_store.go
@@ -10,6 +10,7 @@
"github.com/docker/distribution"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/locker"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/pkg/system"
@@ -36,7 +37,11 @@
mounts map[string]*mountedLayer
mountL sync.Mutex
- os string
+
+ // protect *RWLayer() methods from operating on the same name/id
+ locker *locker.Locker
+
+ os string
}
// StoreOptions are the options used to create a new Store instance
@@ -92,6 +97,7 @@
driver: driver,
layerMap: map[ChainID]*roLayer{},
mounts: map[string]*mountedLayer{},
+ locker: locker.New(),
useTarSplit: !caps.ReproducesExactDiffs,
os: os,
}
@@ -189,6 +195,8 @@
}
func (ls *layerStore) loadMount(mount string) error {
+ ls.mountL.Lock()
+ defer ls.mountL.Unlock()
if _, ok := ls.mounts[mount]; ok {
return nil
}
@@ -477,7 +485,7 @@
return ls.releaseLayer(layer)
}
-func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) {
+func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (_ RWLayer, err error) {
var (
storageOpt map[string]string
initFunc MountInit
@@ -490,14 +498,16 @@
initFunc = opts.InitFunc
}
+ ls.locker.Lock(name)
+ defer ls.locker.Unlock(name)
+
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- m, ok := ls.mounts[name]
+ _, ok := ls.mounts[name]
+ ls.mountL.Unlock()
if ok {
return nil, ErrMountNameConflict
}
- var err error
var pid string
var p *roLayer
if string(parent) != "" {
@@ -517,7 +527,7 @@
}()
}
- m = &mountedLayer{
+ m := &mountedLayer{
name: name,
parent: p,
mountID: ls.mountID(name),
@@ -528,7 +538,7 @@
if initFunc != nil {
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
if err != nil {
- return nil, err
+ return
}
m.initID = pid
}
@@ -538,20 +548,23 @@
}
if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil {
- return nil, err
+ return
}
if err = ls.saveMount(m); err != nil {
- return nil, err
+ return
}
return m.getReference(), nil
}
func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) {
+ ls.locker.Lock(id)
+ defer ls.locker.Unlock(id)
+
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- mount, ok := ls.mounts[id]
- if !ok {
+ mount := ls.mounts[id]
+ ls.mountL.Unlock()
+ if mount == nil {
return nil, ErrMountDoesNotExist
}
@@ -560,9 +573,10 @@
func (ls *layerStore) GetMountID(id string) (string, error) {
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- mount, ok := ls.mounts[id]
- if !ok {
+ mount := ls.mounts[id]
+ ls.mountL.Unlock()
+
+ if mount == nil {
return "", ErrMountDoesNotExist
}
logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID)
@@ -571,10 +585,14 @@
}
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
+ name := l.Name()
+ ls.locker.Lock(name)
+ defer ls.locker.Unlock(name)
+
ls.mountL.Lock()
- defer ls.mountL.Unlock()
- m, ok := ls.mounts[l.Name()]
- if !ok {
+ m := ls.mounts[name]
+ ls.mountL.Unlock()
+ if m == nil {
return []Metadata{}, nil
}
@@ -606,7 +624,9 @@
return nil, err
}
- delete(ls.mounts, m.Name())
+ ls.mountL.Lock()
+ delete(ls.mounts, name)
+ ls.mountL.Unlock()
ls.layerL.Lock()
defer ls.layerL.Unlock()
@@ -634,7 +654,9 @@
}
}
+ ls.mountL.Lock()
ls.mounts[mount.name] = mount
+ ls.mountL.Unlock()
return nil
}
diff --git a/layer/migration.go b/layer/migration.go
index 2668ea9..1250069 100644
--- a/layer/migration.go
+++ b/layer/migration.go
@@ -3,7 +3,6 @@
import (
"compress/gzip"
"errors"
- "fmt"
"io"
"os"
@@ -13,64 +12,6 @@
"github.com/vbatts/tar-split/tar/storage"
)
-// CreateRWLayerByGraphID creates a RWLayer in the layer store using
-// the provided name with the given graphID. To get the RWLayer
-// after migration the layer may be retrieved by the given name.
-func (ls *layerStore) CreateRWLayerByGraphID(name, graphID string, parent ChainID) (err error) {
- ls.mountL.Lock()
- defer ls.mountL.Unlock()
- m, ok := ls.mounts[name]
- if ok {
- if m.parent.chainID != parent {
- return errors.New("name conflict, mismatched parent")
- }
- if m.mountID != graphID {
- return errors.New("mount already exists")
- }
-
- return nil
- }
-
- if !ls.driver.Exists(graphID) {
- return fmt.Errorf("graph ID does not exist: %q", graphID)
- }
-
- var p *roLayer
- if string(parent) != "" {
- p = ls.get(parent)
- if p == nil {
- return ErrLayerDoesNotExist
- }
-
- // Release parent chain if error
- defer func() {
- if err != nil {
- ls.layerL.Lock()
- ls.releaseLayer(p)
- ls.layerL.Unlock()
- }
- }()
- }
-
- // TODO: Ensure graphID has correct parent
-
- m = &mountedLayer{
- name: name,
- parent: p,
- mountID: graphID,
- layerStore: ls,
- references: map[RWLayer]*referencedRWLayer{},
- }
-
- // Check for existing init layer
- initID := fmt.Sprintf("%s-init", graphID)
- if ls.driver.Exists(initID) {
- m.initID = initID
- }
-
- return ls.saveMount(m)
-}
-
func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) {
defer func() {
if err != nil {
diff --git a/layer/migration_test.go b/layer/migration_test.go
index 9231663..2b5c330 100644
--- a/layer/migration_test.go
+++ b/layer/migration_test.go
@@ -3,7 +3,6 @@
import (
"bytes"
"compress/gzip"
- "fmt"
"io"
"io/ioutil"
"os"
@@ -12,7 +11,6 @@
"testing"
"github.com/docker/docker/daemon/graphdriver"
- "github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stringid"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
@@ -269,161 +267,3 @@
assertMetadata(t, metadata, createMetadata(layer2a))
}
-
-func TestMountMigration(t *testing.T) {
- // TODO Windows: Figure out why this is failing (obvious - paths... needs porting)
- if runtime.GOOS == "windows" {
- t.Skip("Failing on Windows")
- }
- ls, _, cleanup := newTestStore(t)
- defer cleanup()
-
- baseFiles := []FileApplier{
- newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644),
- newTestFile("/etc/profile", []byte("# Base configuration"), 0644),
- }
- initFiles := []FileApplier{
- newTestFile("/etc/hosts", []byte{}, 0644),
- newTestFile("/etc/resolv.conf", []byte{}, 0644),
- }
- mountFiles := []FileApplier{
- newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644),
- newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644),
- newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644),
- }
-
- initTar, err := tarFromFiles(initFiles...)
- if err != nil {
- t.Fatal(err)
- }
-
- mountTar, err := tarFromFiles(mountFiles...)
- if err != nil {
- t.Fatal(err)
- }
-
- graph := ls.(*layerStore).driver
-
- layer1, err := createLayer(ls, "", initWithFiles(baseFiles...))
- if err != nil {
- t.Fatal(err)
- }
-
- graphID1 := layer1.(*referencedCacheLayer).cacheID
-
- containerID := stringid.GenerateRandomID()
- containerInit := fmt.Sprintf("%s-init", containerID)
-
- if err := graph.Create(containerInit, graphID1, nil); err != nil {
- t.Fatal(err)
- }
- if _, err := graph.ApplyDiff(containerInit, graphID1, bytes.NewReader(initTar)); err != nil {
- t.Fatal(err)
- }
-
- if err := graph.Create(containerID, containerInit, nil); err != nil {
- t.Fatal(err)
- }
- if _, err := graph.ApplyDiff(containerID, containerInit, bytes.NewReader(mountTar)); err != nil {
- t.Fatal(err)
- }
-
- if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil {
- t.Fatal(err)
- }
-
- rwLayer1, err := ls.GetRWLayer("migration-mount")
- if err != nil {
- t.Fatal(err)
- }
-
- if _, err := rwLayer1.Mount(""); err != nil {
- t.Fatal(err)
- }
-
- changes, err := rwLayer1.Changes()
- if err != nil {
- t.Fatal(err)
- }
-
- if expected := 5; len(changes) != expected {
- t.Logf("Changes %#v", changes)
- t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected)
- }
-
- sortChanges(changes)
-
- assertChange(t, changes[0], archive.Change{
- Path: "/etc",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[1], archive.Change{
- Path: "/etc/hosts",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[2], archive.Change{
- Path: "/root",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[3], archive.Change{
- Path: "/root/.bashrc",
- Kind: archive.ChangeModify,
- })
- assertChange(t, changes[4], archive.Change{
- Path: "/root/testfile1.txt",
- Kind: archive.ChangeAdd,
- })
-
- if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), nil); err == nil {
- t.Fatal("Expected error creating mount with same name")
- } else if err != ErrMountNameConflict {
- t.Fatal(err)
- }
-
- rwLayer2, err := ls.GetRWLayer("migration-mount")
- if err != nil {
- t.Fatal(err)
- }
-
- if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) {
- t.Fatal("Expected same layer from get with same name as from migrate")
- }
-
- if _, err := rwLayer2.Mount(""); err != nil {
- t.Fatal(err)
- }
-
- if _, err := rwLayer2.Mount(""); err != nil {
- t.Fatal(err)
- }
-
- if metadata, err := ls.Release(layer1); err != nil {
- t.Fatal(err)
- } else if len(metadata) > 0 {
- t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata)
- }
-
- if err := rwLayer1.Unmount(); err != nil {
- t.Fatal(err)
- }
-
- if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil {
- t.Fatal(err)
- }
-
- if err := rwLayer2.Unmount(); err != nil {
- t.Fatal(err)
- }
- if err := rwLayer2.Unmount(); err != nil {
- t.Fatal(err)
- }
- metadata, err := ls.ReleaseRWLayer(rwLayer2)
- if err != nil {
- t.Fatal(err)
- }
- if len(metadata) == 0 {
- t.Fatal("Expected base layer to be deleted when deleting mount")
- }
-
- assertMetadata(t, metadata, createMetadata(layer1))
-}
diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go
index d6858c6..c5d9e0e 100644
--- a/layer/mounted_layer.go
+++ b/layer/mounted_layer.go
@@ -2,6 +2,7 @@
import (
"io"
+ "sync"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/containerfs"
@@ -15,6 +16,7 @@
path string
layerStore *layerStore
+ sync.Mutex
references map[RWLayer]*referencedRWLayer
}
@@ -62,16 +64,24 @@
ref := &referencedRWLayer{
mountedLayer: ml,
}
+ ml.Lock()
ml.references[ref] = ref
+ ml.Unlock()
return ref
}
func (ml *mountedLayer) hasReferences() bool {
- return len(ml.references) > 0
+ ml.Lock()
+ ret := len(ml.references) > 0
+ ml.Unlock()
+
+ return ret
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
+ ml.Lock()
+ defer ml.Unlock()
if _, ok := ml.references[ref]; !ok {
return ErrLayerNotRetained
}
@@ -81,7 +91,9 @@
func (ml *mountedLayer) retakeReference(r RWLayer) {
if ref, ok := r.(*referencedRWLayer); ok {
+ ml.Lock()
ml.references[ref] = ref
+ ml.Unlock()
}
}
diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go
index 2d9d662..6ff61e6 100644
--- a/pkg/chrootarchive/archive.go
+++ b/pkg/chrootarchive/archive.go
@@ -27,18 +27,34 @@
// The archive may be compressed with one of the following algorithms:
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return untarHandler(tarArchive, dest, options, true)
+ return untarHandler(tarArchive, dest, options, true, dest)
+}
+
+// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
+// The root directory is the directory that will be chrooted to.
+// `dest` must be a path within `root`, if it is not an error will be returned.
+//
+// `root` should set to a directory which is not controlled by any potentially
+// malicious process.
+//
+// This should be used to prevent a potential attacker from manipulating `dest`
+// such that it would provide access to files outside of `dest` through things
+// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
+// sanitizing symlinks in this manner is inherrently racey:
+// ref: CVE-2018-15664
+func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ return untarHandler(tarArchive, dest, options, true, root)
}
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
- return untarHandler(tarArchive, dest, options, false)
+ return untarHandler(tarArchive, dest, options, false, dest)
}
// Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
if tarArchive == nil {
return fmt.Errorf("Empty archive")
}
@@ -69,5 +85,13 @@
r = decompressedArchive
}
- return invokeUnpack(r, dest, options)
+ return invokeUnpack(r, dest, options, root)
+}
+
+// Tar tars the requested path while chrooted to the specified root.
+func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ return invokePack(srcPath, options, root)
}
diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go
index 5df8afd..ea2879d 100644
--- a/pkg/chrootarchive/archive_unix.go
+++ b/pkg/chrootarchive/archive_unix.go
@@ -10,10 +10,13 @@
"io"
"io/ioutil"
"os"
+ "path/filepath"
"runtime"
+ "strings"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
+ "github.com/pkg/errors"
)
// untar is the entry-point for docker-untar on re-exec. This is not used on
@@ -23,18 +26,28 @@
runtime.LockOSThread()
flag.Parse()
- var options *archive.TarOptions
+ var options archive.TarOptions
//read the options from the pipe "ExtraFiles"
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
fatal(err)
}
- if err := chroot(flag.Arg(0)); err != nil {
+ dst := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = dst
+ }
+
+ if err := chroot(root); err != nil {
fatal(err)
}
- if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+ if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
fatal(err)
}
// fully consume stdin in case it is zero padded
@@ -45,7 +58,10 @@
os.Exit(0)
}
-func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+ if root == "" {
+ return errors.New("must specify a root to chroot to")
+ }
// We can't pass a potentially large exclude list directly via cmd line
// because we easily overrun the kernel's max argument/environment size
@@ -57,7 +73,21 @@
return fmt.Errorf("Untar pipe failure: %v", err)
}
- cmd := reexec.Command("docker-untar", dest)
+ if root != "" {
+ relDest, err := filepath.Rel(root, dest)
+ if err != nil {
+ return err
+ }
+ if relDest == "." {
+ relDest = "/"
+ }
+ if relDest[0] != '/' {
+ relDest = "/" + relDest
+ }
+ dest = relDest
+ }
+
+ cmd := reexec.Command("docker-untar", dest, root)
cmd.Stdin = decompressedArchive
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
@@ -69,6 +99,7 @@
w.Close()
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
+
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
w.Close()
@@ -86,3 +117,92 @@
}
return nil
}
+
+func tar() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ src := flag.Arg(0)
+ var root string
+ if len(flag.Args()) > 1 {
+ root = flag.Arg(1)
+ }
+
+ if root == "" {
+ root = src
+ }
+
+ if err := realChroot(root); err != nil {
+ fatal(err)
+ }
+
+ var options archive.TarOptions
+ if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ rdr, err := archive.TarWithOptions(src, &options)
+ if err != nil {
+ fatal(err)
+ }
+ defer rdr.Close()
+
+ if _, err := io.Copy(os.Stdout, rdr); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ if root == "" {
+ return nil, errors.New("root path must not be empty")
+ }
+
+ relSrc, err := filepath.Rel(root, srcPath)
+ if err != nil {
+ return nil, err
+ }
+ if relSrc == "." {
+ relSrc = "/"
+ }
+ if relSrc[0] != '/' {
+ relSrc = "/" + relSrc
+ }
+
+ // make sure we didn't trim a trailing slash with the call to `Rel`
+ if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
+ relSrc += "/"
+ }
+
+ cmd := reexec.Command("docker-tar", relSrc, root)
+
+ errBuff := bytes.NewBuffer(nil)
+ cmd.Stderr = errBuff
+
+ tarR, tarW := io.Pipe()
+ cmd.Stdout = tarW
+
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ return nil, errors.Wrap(err, "error getting options pipe for tar process")
+ }
+
+ if err := cmd.Start(); err != nil {
+ return nil, errors.Wrap(err, "tar error on re-exec cmd")
+ }
+
+ go func() {
+ err := cmd.Wait()
+ err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+ tarW.CloseWithError(err)
+ }()
+
+ if err := json.NewEncoder(stdin).Encode(options); err != nil {
+ stdin.Close()
+ return nil, errors.Wrap(err, "tar json encode to pipe failed")
+ }
+ stdin.Close()
+
+ return tarR, nil
+}
diff --git a/pkg/chrootarchive/archive_unix_test.go b/pkg/chrootarchive/archive_unix_test.go
new file mode 100644
index 0000000..f39a88a
--- /dev/null
+++ b/pkg/chrootarchive/archive_unix_test.go
@@ -0,0 +1,171 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+ gotar "archive/tar"
+ "bytes"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/archive"
+ "golang.org/x/sys/unix"
+ "gotest.tools/assert"
+)
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not copy data to a
+// container path that will actually overwrite data on the host
+func TestUntarWithMaliciousSymlinks(t *testing.T) {
+ dir, err := ioutil.TempDir("", t.Name())
+ assert.NilError(t, err)
+ defer os.RemoveAll(dir)
+
+ root := filepath.Join(dir, "root")
+
+ err = os.MkdirAll(root, 0755)
+ assert.NilError(t, err)
+
+ // Add a file into a directory above root
+ // Ensure that we can't access this file while tarring.
+ err = ioutil.WriteFile(filepath.Join(dir, "host-file"), []byte("I am a host file"), 0644)
+ assert.NilError(t, err)
+
+ // Create some data which which will be copied into the "container" root into
+ // the symlinked path.
+ // Before this change, the copy would overwrite the "host" content.
+ // With this change it should not.
+ data := filepath.Join(dir, "data")
+ err = os.MkdirAll(data, 0755)
+ assert.NilError(t, err)
+ err = ioutil.WriteFile(filepath.Join(data, "local-file"), []byte("pwn3d"), 0644)
+ assert.NilError(t, err)
+
+ safe := filepath.Join(root, "safe")
+ err = unix.Symlink(dir, safe)
+ assert.NilError(t, err)
+
+ rdr, err := archive.TarWithOptions(data, &archive.TarOptions{IncludeFiles: []string{"local-file"}, RebaseNames: map[string]string{"local-file": "host-file"}})
+ assert.NilError(t, err)
+
+ // Use tee to test both the good case and the bad case w/o recreating the archive
+ bufRdr := bytes.NewBuffer(nil)
+ tee := io.TeeReader(rdr, bufRdr)
+
+ err = UntarWithRoot(tee, safe, nil, root)
+ assert.Assert(t, err != nil)
+ assert.ErrorContains(t, err, "open /safe/host-file: no such file or directory")
+
+ // Make sure the "host" file is still in tact
+ // Before the fix the host file would be overwritten
+ hostData, err := ioutil.ReadFile(filepath.Join(dir, "host-file"))
+ assert.NilError(t, err)
+ assert.Equal(t, string(hostData), "I am a host file")
+
+ // Now test by chrooting to an attacker controlled path
+ // This should succeed as is and overwrite a "host" file
+ // Note that this would be a mis-use of this function.
+ err = UntarWithRoot(bufRdr, safe, nil, safe)
+ assert.NilError(t, err)
+
+ hostData, err = ioutil.ReadFile(filepath.Join(dir, "host-file"))
+ assert.NilError(t, err)
+ assert.Equal(t, string(hostData), "pwn3d")
+}
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not unwittingly leak
+// host data into the archive.
+func TestTarWithMaliciousSymlinks(t *testing.T) {
+ dir, err := ioutil.TempDir("", t.Name())
+ assert.NilError(t, err)
+ // defer os.RemoveAll(dir)
+ t.Log(dir)
+
+ root := filepath.Join(dir, "root")
+
+ err = os.MkdirAll(root, 0755)
+ assert.NilError(t, err)
+
+ hostFileData := []byte("I am a host file")
+
+ // Add a file into a directory above root
+ // Ensure that we can't access this file while tarring.
+ err = ioutil.WriteFile(filepath.Join(dir, "host-file"), hostFileData, 0644)
+ assert.NilError(t, err)
+
+ safe := filepath.Join(root, "safe")
+ err = unix.Symlink(dir, safe)
+ assert.NilError(t, err)
+
+ data := filepath.Join(dir, "data")
+ err = os.MkdirAll(data, 0755)
+ assert.NilError(t, err)
+
+ type testCase struct {
+ p string
+ includes []string
+ }
+
+ cases := []testCase{
+ {p: safe, includes: []string{"host-file"}},
+ {p: safe + "/", includes: []string{"host-file"}},
+ {p: safe, includes: nil},
+ {p: safe + "/", includes: nil},
+ {p: root, includes: []string{"safe/host-file"}},
+ {p: root, includes: []string{"/safe/host-file"}},
+ {p: root, includes: nil},
+ }
+
+ maxBytes := len(hostFileData)
+
+ for _, tc := range cases {
+ t.Run(path.Join(tc.p+"_"+strings.Join(tc.includes, "_")), func(t *testing.T) {
+ // Here if we use archive.TarWithOptions directly or change the "root" parameter
+ // to be the same as "safe", data from the host will be leaked into the archive
+ var opts *archive.TarOptions
+ if tc.includes != nil {
+ opts = &archive.TarOptions{
+ IncludeFiles: tc.includes,
+ }
+ }
+ rdr, err := Tar(tc.p, opts, root)
+ assert.NilError(t, err)
+ defer rdr.Close()
+
+ tr := gotar.NewReader(rdr)
+ assert.Assert(t, !isDataInTar(t, tr, hostFileData, int64(maxBytes)), "host data leaked to archive")
+ })
+ }
+}
+
+func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes int64) bool {
+ for {
+ h, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ assert.NilError(t, err)
+
+ if h.Size == 0 {
+ continue
+ }
+ assert.Assert(t, h.Size <= maxBytes, "%s: file size exceeds max expected size %d: %d", h.Name, maxBytes, h.Size)
+
+ data := make([]byte, int(h.Size))
+ _, err = io.ReadFull(tr, data)
+ assert.NilError(t, err)
+ if bytes.Contains(data, compare) {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/pkg/chrootarchive/archive_windows.go b/pkg/chrootarchive/archive_windows.go
index f297313..de87113 100644
--- a/pkg/chrootarchive/archive_windows.go
+++ b/pkg/chrootarchive/archive_windows.go
@@ -14,9 +14,16 @@
func invokeUnpack(decompressedArchive io.ReadCloser,
dest string,
- options *archive.TarOptions) error {
+ options *archive.TarOptions, root string) error {
// Windows is different to Linux here because Windows does not support
// chroot. Hence there is no point sandboxing a chrooted process to
// do the unpack. We call inline instead within the daemon process.
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+ // Windows is different to Linux here because Windows does not support
+ // chroot. Hence there is no point sandboxing a chrooted process to
+ // do the pack. We call inline instead within the daemon process.
+ return archive.TarWithOptions(srcPath, options)
+}
diff --git a/pkg/chrootarchive/init_unix.go b/pkg/chrootarchive/init_unix.go
index a15e4bb..c24fea7 100644
--- a/pkg/chrootarchive/init_unix.go
+++ b/pkg/chrootarchive/init_unix.go
@@ -14,6 +14,7 @@
func init() {
reexec.Register("docker-applyLayer", applyLayer)
reexec.Register("docker-untar", untar)
+ reexec.Register("docker-tar", tar)
}
func fatal(err error) {
diff --git a/vendor.conf b/vendor.conf
index b767282..517512d 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -130,7 +130,7 @@
github.com/gogo/googleapis d31c731455cb061f42baff3bda55bad0118b126b # v1.2.0
# cluster
-github.com/docker/swarmkit 48eb1828ce81be20b25d647f6ca8f33d599f705c
+github.com/docker/swarmkit 961ec3a56b7b6c311a2137b6a398f9d778fba94b # bump_v19.03 branch
github.com/gogo/protobuf ba06b47c162d49f2af050fb4c75bcbc86a159d5c # v1.2.1
github.com/cloudflare/cfssl 5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
index e5fe3fe..7c977db 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
@@ -141,17 +141,10 @@
}
// Abort immediately if all tasks are clean.
if len(dirtySlots) == 0 {
- if service.UpdateStatus == nil {
- if u.annotationsUpdated(service) {
- // Annotation-only update; mark the update as completed
- u.completeUpdate(ctx, service.ID, true)
- }
- return
- }
- if service.UpdateStatus.State == api.UpdateStatus_UPDATING || service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
- // Update or rollback was started, and is now complete
- u.completeUpdate(ctx, service.ID, true)
- return
+ if service.UpdateStatus != nil &&
+ (service.UpdateStatus.State == api.UpdateStatus_UPDATING ||
+ service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED) {
+ u.completeUpdate(ctx, service.ID)
}
return
}
@@ -310,7 +303,7 @@
// have reached RUNNING by this point.
if !stopped {
- u.completeUpdate(ctx, service.ID, false)
+ u.completeUpdate(ctx, service.ID)
}
}
@@ -623,32 +616,25 @@
}
}
-func (u *Updater) completeUpdate(ctx context.Context, serviceID string, force bool) {
+func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
log.G(ctx).Debugf("update of service %s complete", serviceID)
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
- switch {
- case service == nil:
+ if service == nil {
return nil
- case service.UpdateStatus == nil && force:
- // Force marking the status as updated; to account for annotation-only updates.
- service.UpdateStatus = &api.UpdateStatus{
- StartedAt: ptypes.MustTimestampProto(time.Now()),
- State: api.UpdateStatus_COMPLETED,
- Message: "update completed",
- }
- case service.UpdateStatus == nil:
+ }
+ if service.UpdateStatus == nil {
// The service was changed since we started this update
return nil
- case service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED:
+ }
+ if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED
service.UpdateStatus.Message = "rollback completed"
- default:
+ } else {
service.UpdateStatus.State = api.UpdateStatus_COMPLETED
service.UpdateStatus.Message = "update completed"
}
-
service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now())
return store.UpdateService(tx, service)
@@ -658,10 +644,3 @@
log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID)
}
}
-
-func (u *Updater) annotationsUpdated(service *api.Service) bool {
- if service.PreviousSpec == nil {
- return false
- }
- return !reflect.DeepEqual(service.Spec, service.PreviousSpec)
-}