Merge pull request #279 from thaJeztah/19.03_backport_attach_to_existing_network_error

[19.03 backport] Handle the error case when a container reattaches to the same network
diff --git a/daemon/archive.go b/daemon/archive.go
index 9c7971b..21339cf 100644
--- a/daemon/archive.go
+++ b/daemon/archive.go
@@ -31,18 +31,19 @@
 }
 
 // helper functions to extract or archive
-func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
+func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions, root string) error {
 	if ea, ok := i.(extractor); ok {
 		return ea.ExtractArchive(src, dst, opts)
 	}
-	return chrootarchive.Untar(src, dst, opts)
+
+	return chrootarchive.UntarWithRoot(src, dst, opts, root)
 }
 
-func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
+func archivePath(i interface{}, src string, opts *archive.TarOptions, root string) (io.ReadCloser, error) {
 	if ap, ok := i.(archiver); ok {
 		return ap.ArchivePath(src, opts)
 	}
-	return archive.TarWithOptions(src, opts)
+	return chrootarchive.Tar(src, opts, root)
 }
 
 // ContainerCopy performs a deprecated operation of archiving the resource at
@@ -235,10 +236,16 @@
 	if driver.Base(resolvedPath) == "." {
 		resolvedPath += string(driver.Separator()) + "."
 	}
-	sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
+
+	sourceDir := resolvedPath
+	sourceBase := "."
+
+	if stat.Mode&os.ModeDir == 0 { // not dir
+		sourceDir, sourceBase = driver.Split(resolvedPath)
+	}
 	opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
 
-	data, err := archivePath(driver, sourceDir, opts)
+	data, err := archivePath(driver, sourceDir, opts, container.BaseFS.Path())
 	if err != nil {
 		return nil, nil, err
 	}
@@ -367,7 +374,7 @@
 		}
 	}
 
-	if err := extractArchive(driver, content, resolvedPath, options); err != nil {
+	if err := extractArchive(driver, content, resolvedPath, options, container.BaseFS.Path()); err != nil {
 		return err
 	}
 
@@ -425,14 +432,11 @@
 		d, f := driver.Split(basePath)
 		basePath = d
 		filter = []string{f}
-	} else {
-		filter = []string{driver.Base(basePath)}
-		basePath = driver.Dir(basePath)
 	}
 	archive, err := archivePath(driver, basePath, &archive.TarOptions{
 		Compression:  archive.Uncompressed,
 		IncludeFiles: filter,
-	})
+	}, container.BaseFS.Path())
 	if err != nil {
 		return nil, err
 	}
diff --git a/daemon/export.go b/daemon/export.go
index 27bc359..01593f4 100644
--- a/daemon/export.go
+++ b/daemon/export.go
@@ -70,7 +70,7 @@
 		Compression: archive.Uncompressed,
 		UIDMaps:     daemon.idMapping.UIDs(),
 		GIDMaps:     daemon.idMapping.GIDs(),
-	})
+	}, basefs.Path())
 	if err != nil {
 		rwlayer.Unmount()
 		return nil, err
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index 11ca939..bbd19a8 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -72,7 +72,6 @@
 
 // Driver contains information about the filesystem mounted.
 type Driver struct {
-	sync.Mutex
 	root          string
 	uidMaps       []idtools.IDMap
 	gidMaps       []idtools.IDMap
@@ -81,6 +80,7 @@
 	pathCache     map[string]string
 	naiveDiff     graphdriver.DiffDriver
 	locker        *locker.Locker
+	mntL          sync.Mutex
 }
 
 // Init returns a new AUFS driver.
@@ -327,11 +327,11 @@
 			break
 		}
 
-		if err != unix.EBUSY {
-			return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint)
+		if errors.Cause(err) != unix.EBUSY {
+			return errors.Wrap(err, "aufs: unmount error")
 		}
 		if retries >= 5 {
-			return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint)
+			return errors.Wrap(err, "aufs: unmount error after retries")
 		}
 		// If unmount returns EBUSY, it could be a transient error. Sleep and retry.
 		retries++
@@ -437,7 +437,7 @@
 
 	err := a.unmount(m)
 	if err != nil {
-		logger.Debugf("Failed to unmount %s aufs: %v", id, err)
+		logger.WithError(err).WithField("method", "Put()").Warn()
 	}
 	return err
 }
@@ -547,9 +547,6 @@
 }
 
 func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
-	a.Lock()
-	defer a.Unlock()
-
 	// If the id is mounted or we get an error return
 	if mounted, err := a.mounted(target); err != nil || mounted {
 		return err
@@ -564,9 +561,6 @@
 }
 
 func (a *Driver) unmount(mountPath string) error {
-	a.Lock()
-	defer a.Unlock()
-
 	if mounted, err := a.mounted(mountPath); err != nil || !mounted {
 		return err
 	}
@@ -579,23 +573,20 @@
 
 // Cleanup aufs and unmount all mountpoints
 func (a *Driver) Cleanup() error {
-	var dirs []string
-	if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-		if !info.IsDir() {
-			return nil
-		}
-		dirs = append(dirs, path)
-		return nil
-	}); err != nil {
-		return err
+	dir := a.mntPath()
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return errors.Wrap(err, "aufs readdir error")
 	}
+	for _, f := range files {
+		if !f.IsDir() {
+			continue
+		}
 
-	for _, m := range dirs {
+		m := path.Join(dir, f.Name())
+
 		if err := a.unmount(m); err != nil {
-			logger.Debugf("error unmounting %s: %s", m, err)
+			logger.WithError(err).WithField("method", "Cleanup()").Warn()
 		}
 	}
 	return mount.RecursiveUnmount(a.root)
@@ -604,7 +595,7 @@
 func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
 	defer func() {
 		if err != nil {
-			Unmount(target)
+			mount.Unmount(target)
 		}
 	}()
 
@@ -632,14 +623,29 @@
 		opts += ",dirperm1"
 	}
 	data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
-	if err = unix.Mount("none", target, "aufs", 0, data); err != nil {
+	a.mntL.Lock()
+	err = unix.Mount("none", target, "aufs", 0, data)
+	a.mntL.Unlock()
+	if err != nil {
+		err = errors.Wrap(err, "mount target="+target+" data="+data)
 		return
 	}
 
-	for ; index < len(ro); index++ {
-		layer := fmt.Sprintf(":%s=ro+wh", ro[index])
-		data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
-		if err = unix.Mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
+	for index < len(ro) {
+		bp = 0
+		for ; index < len(ro); index++ {
+			layer := fmt.Sprintf("append:%s=ro+wh,", ro[index])
+			if bp+len(layer) > len(b) {
+				break
+			}
+			bp += copy(b[bp:], layer)
+		}
+		data := label.FormatMountLabel(string(b[:bp]), mountLabel)
+		a.mntL.Lock()
+		err = unix.Mount("none", target, "aufs", unix.MS_REMOUNT, data)
+		a.mntL.Unlock()
+		if err != nil {
+			err = errors.Wrap(err, "mount target="+target+" flags=MS_REMOUNT data="+data)
 			return
 		}
 	}
diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go
index 9f55103..fc20a5e 100644
--- a/daemon/graphdriver/aufs/mount.go
+++ b/daemon/graphdriver/aufs/mount.go
@@ -4,14 +4,38 @@
 
 import (
 	"os/exec"
+	"syscall"
 
-	"golang.org/x/sys/unix"
+	"github.com/docker/docker/pkg/mount"
 )
 
 // Unmount the target specified.
 func Unmount(target string) error {
-	if err := exec.Command("auplink", target, "flush").Run(); err != nil {
-		logger.WithError(err).Warnf("Couldn't run auplink before unmount %s", target)
+	const (
+		EINVAL  = 22 // if auplink returns this,
+		retries = 3  // retry a few times
+	)
+
+	for i := 0; ; i++ {
+		out, err := exec.Command("auplink", target, "flush").CombinedOutput()
+		if err == nil {
+			break
+		}
+		rc := 0
+		if exiterr, ok := err.(*exec.ExitError); ok {
+			if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+				rc = status.ExitStatus()
+			}
+		}
+		if i >= retries || rc != EINVAL {
+			logger.WithError(err).WithField("method", "Unmount").Warnf("auplink flush failed: %s", out)
+			break
+		}
+		// auplink failed to find target in /proc/self/mounts because
+		// kernel can't guarantee continuity while reading from it
+		// while mounts table is being changed
+		logger.Debugf("auplink flush error (retrying %d/%d): %s", i+1, retries, out)
 	}
-	return unix.Unmount(target, 0)
+
+	return mount.Unmount(target)
 }
diff --git a/hack/ci/windows.ps1 b/hack/ci/windows.ps1
index 8f8b919..6d87f32 100644
--- a/hack/ci/windows.ps1
+++ b/hack/ci/windows.ps1
@@ -119,6 +119,7 @@
 #$env:INTEGRATION_IN_CONTAINER="yes"
 #$env:WINDOWS_BASE_IMAGE=""
 #$env:SKIP_COPY_GO="yes"
+#$env:INTEGRATION_TESTFLAGS="-test.v"
 
 Function Nuke-Everything {
     $ErrorActionPreference = 'SilentlyContinue'
@@ -825,18 +826,32 @@
                                                         docker `
                                                         "`$env`:PATH`='c`:\target;'+`$env:PATH`;  `$env:DOCKER_HOST`='tcp`://'+(ipconfig | select -last 1).Substring(39)+'`:2357'; c:\target\runIntegrationCLI.ps1" | Out-Host } )
             } else  {
-                Write-Host -ForegroundColor Green "INFO: Integration tests being run from the host:"
-                Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
                 $env:DOCKER_HOST=$DASHH_CUT  
                 $env:PATH="$env:TEMP\binary;$env:PATH;"  # Force to use the test binaries, not the host ones.
-                Write-Host -ForegroundColor Green "INFO: $c"
                 Write-Host -ForegroundColor Green "INFO: DOCKER_HOST at $DASHH_CUT"
+
+                $ErrorActionPreference = "SilentlyContinue"
+                Write-Host -ForegroundColor Cyan "INFO: Integration API tests being run from the host:"
+                if (!($env:INTEGRATION_TESTFLAGS)) {
+                    $env:INTEGRATION_TESTFLAGS = "-test.v"
+                }
+                Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker"
+                $start=(Get-Date); Invoke-Expression ".\hack\make.ps1 -TestIntegration"; $Duration=New-Timespan -Start $start -End (Get-Date)
+                $ErrorActionPreference = "Stop"
+                if (-not($LastExitCode -eq 0)) {
+                    Throw "ERROR: Integration API tests failed at $(Get-Date). Duration`:$Duration"
+                }
+
+                $ErrorActionPreference = "SilentlyContinue"
+                Write-Host -ForegroundColor Green "INFO: Integration CLI tests being run from the host:"
+                Write-Host -ForegroundColor Green "INFO: $c"
+                Set-Location "$env:SOURCES_DRIVE`:\$env:SOURCES_SUBDIR\src\github.com\docker\docker\integration-cli"
                 # Explicit to not use measure-command otherwise don't get output as it goes
                 $start=(Get-Date); Invoke-Expression $c; $Duration=New-Timespan -Start $start -End (Get-Date)
             }
             $ErrorActionPreference = "Stop"
             if (-not($LastExitCode -eq 0)) {
-                Throw "ERROR: Integration tests failed at $(Get-Date). Duration`:$Duration"
+                Throw "ERROR: Integration CLI tests failed at $(Get-Date). Duration`:$Duration"
             }
             Write-Host  -ForegroundColor Green "INFO: Integration tests ended at $(Get-Date). Duration`:$Duration"
         } else {
diff --git a/hack/make.ps1 b/hack/make.ps1
index 2211f07..6e5bc45 100644
--- a/hack/make.ps1
+++ b/hack/make.ps1
@@ -60,6 +60,9 @@
 .PARAMETER TestUnit
      Runs unit tests.
 
+.PARAMETER TestIntegration
+     Runs integration tests.
+
 .PARAMETER All
      Runs everything this script knows about that can run in a container.
 
@@ -84,6 +87,7 @@
     [Parameter(Mandatory=$False)][switch]$PkgImports,
     [Parameter(Mandatory=$False)][switch]$GoFormat,
     [Parameter(Mandatory=$False)][switch]$TestUnit,
+    [Parameter(Mandatory=$False)][switch]$TestIntegration,
     [Parameter(Mandatory=$False)][switch]$All
 )
 
@@ -320,6 +324,39 @@
     if ($LASTEXITCODE -ne 0) { Throw "Unit tests failed" }
 }
 
+# Run the integration tests
+Function Run-IntegrationTests() {
+    $env:DOCKER_INTEGRATION_DAEMON_DEST = $root + "\bundles\tmp"
+    $dirs =  Get-ChildItem -Path integration -Directory -Recurse
+    $integration_api_dirs = @()
+    ForEach($dir in $dirs) {
+        $RelativePath = "." + $dir.FullName -replace "$($PWD.Path -replace "\\","\\")",""
+        If ($RelativePath -notmatch '(^.\\integration($|\\internal)|\\testdata)') {
+            $integration_api_dirs += $dir
+            Write-Host "Building test suite binary $RelativePath"
+            go test -c -o "$RelativePath\test.exe" $RelativePath
+        }
+    }
+
+    ForEach($dir in $integration_api_dirs) {
+        Set-Location $dir.FullName
+        Write-Host "Running $($PWD.Path)"
+        $pinfo = New-Object System.Diagnostics.ProcessStartInfo
+        $pinfo.FileName = "$($PWD.Path)\test.exe"
+        $pinfo.RedirectStandardError = $true
+        $pinfo.UseShellExecute = $false
+        $pinfo.Arguments = $env:INTEGRATION_TESTFLAGS
+        $p = New-Object System.Diagnostics.Process
+        $p.StartInfo = $pinfo
+        $p.Start() | Out-Null
+        $p.WaitForExit()
+        $err = $p.StandardError.ReadToEnd()
+        if (($LASTEXITCODE -ne 0) -and ($err -notlike "*warning: no tests to run*")) {
+            Throw "Integration tests failed: $err"
+        }
+    }
+}
+
 # Start of main code.
 Try {
     Write-Host -ForegroundColor Cyan "INFO: make.ps1 starting at $(Get-Date)"
@@ -331,13 +368,13 @@
     # Handle the "-All" shortcut to turn on all things we can handle.
     # Note we expressly only include the items which can run in a container - the validations tests cannot
     # as they require the .git directory which is excluded from the image by .dockerignore
-    if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True }
+    if ($All) { $Client=$True; $Daemon=$True; $TestUnit=$True; }
 
     # Handle the "-Binary" shortcut to build both client and daemon.
     if ($Binary) { $Client = $True; $Daemon = $True }
 
     # Default to building the daemon if not asked for anything explicitly.
-    if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit)) { $Daemon=$True }
+    if (-not($Client) -and -not($Daemon) -and -not($DCO) -and -not($PkgImports) -and -not($GoFormat) -and -not($TestUnit) -and -not($TestIntegration)) { $Daemon=$True }
 
     # Verify git is installed
     if ($(Get-Command git -ErrorAction SilentlyContinue) -eq $nil) { Throw "Git does not appear to be installed" }
@@ -425,6 +462,9 @@
     # Run unit tests
     if ($TestUnit) { Run-UnitTests }
 
+    # Run integration tests
+    if ($TestIntegration) { Run-IntegrationTests }
+
     # Gratuitous ASCII art.
     if ($Daemon -or $Client) {
         Write-Host
diff --git a/integration/container/copy_test.go b/integration/container/copy_test.go
index 9c5c5ce..9020b80 100644
--- a/integration/container/copy_test.go
+++ b/integration/container/copy_test.go
@@ -1,13 +1,20 @@
 package container // import "github.com/docker/docker/integration/container"
 
 import (
+	"archive/tar"
 	"context"
+	"encoding/json"
 	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
 	"testing"
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration/internal/container"
+	"github.com/docker/docker/internal/test/fakecontext"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"gotest.tools/assert"
 	is "gotest.tools/assert/cmp"
 	"gotest.tools/skip"
@@ -64,3 +71,93 @@
 	err := apiclient.CopyToContainer(ctx, cid, "/etc/passwd/", nil, types.CopyToContainerOptions{})
 	assert.Assert(t, is.ErrorContains(err, "not a directory"))
 }
+
+func TestCopyFromContainer(t *testing.T) {
+	skip.If(t, testEnv.DaemonInfo.OSType == "windows")
+	defer setupTest(t)()
+
+	ctx := context.Background()
+	apiClient := testEnv.APIClient()
+
+	dir, err := ioutil.TempDir("", t.Name())
+	assert.NilError(t, err)
+	defer os.RemoveAll(dir)
+
+	buildCtx := fakecontext.New(t, dir, fakecontext.WithFile("foo", "hello"), fakecontext.WithFile("baz", "world"), fakecontext.WithDockerfile(`
+		FROM busybox
+		COPY foo /foo
+		COPY baz /bar/quux/baz
+		RUN ln -s notexist /bar/notarget && ln -s quux/baz /bar/filesymlink && ln -s quux /bar/dirsymlink && ln -s / /bar/root
+		CMD /fake
+	`))
+	defer buildCtx.Close()
+
+	resp, err := apiClient.ImageBuild(ctx, buildCtx.AsTarReader(t), types.ImageBuildOptions{})
+	assert.NilError(t, err)
+	defer resp.Body.Close()
+
+	var imageID string
+	err = jsonmessage.DisplayJSONMessagesStream(resp.Body, ioutil.Discard, 0, false, func(msg jsonmessage.JSONMessage) {
+		var r types.BuildResult
+		assert.NilError(t, json.Unmarshal(*msg.Aux, &r))
+		imageID = r.ID
+	})
+	assert.NilError(t, err)
+	assert.Assert(t, imageID != "")
+
+	cid := container.Create(t, ctx, apiClient, container.WithImage(imageID))
+
+	for _, x := range []struct {
+		src    string
+		expect map[string]string
+	}{
+		{"/", map[string]string{"/": "", "/foo": "hello", "/bar/quux/baz": "world", "/bar/filesymlink": "", "/bar/dirsymlink": "", "/bar/notarget": ""}},
+		{"/bar/root", map[string]string{"root": ""}},
+		{"/bar/root/", map[string]string{"root/": "", "root/foo": "hello", "root/bar/quux/baz": "world", "root/bar/filesymlink": "", "root/bar/dirsymlink": "", "root/bar/notarget": ""}},
+
+		{"bar/quux", map[string]string{"quux/": "", "quux/baz": "world"}},
+		{"bar/quux/", map[string]string{"quux/": "", "quux/baz": "world"}},
+		{"bar/quux/baz", map[string]string{"baz": "world"}},
+
+		{"bar/filesymlink", map[string]string{"filesymlink": ""}},
+		{"bar/dirsymlink", map[string]string{"dirsymlink": ""}},
+		{"bar/dirsymlink/", map[string]string{"dirsymlink/": "", "dirsymlink/baz": "world"}},
+		{"bar/notarget", map[string]string{"notarget": ""}},
+	} {
+		t.Run(x.src, func(t *testing.T) {
+			rdr, _, err := apiClient.CopyFromContainer(ctx, cid, x.src)
+			assert.NilError(t, err)
+			defer rdr.Close()
+
+			found := make(map[string]bool, len(x.expect))
+			var numFound int
+			tr := tar.NewReader(rdr)
+			for numFound < len(x.expect) {
+				h, err := tr.Next()
+				if err == io.EOF {
+					break
+				}
+				assert.NilError(t, err)
+
+				expected, exists := x.expect[h.Name]
+				if !exists {
+					// this archive will have extra stuff in it since we are copying from root
+					// and docker adds a bunch of stuff
+					continue
+				}
+
+				numFound++
+				found[h.Name] = true
+
+				buf, err := ioutil.ReadAll(tr)
+				if err == nil {
+					assert.Check(t, is.Equal(string(buf), expected))
+				}
+			}
+
+			for f := range x.expect {
+				assert.Check(t, found[f], f+" not found in archive")
+			}
+		})
+	}
+}
diff --git a/integration/internal/container/container.go b/integration/internal/container/container.go
index 20ad774..85e6a24 100644
--- a/integration/internal/container/container.go
+++ b/integration/internal/container/container.go
@@ -2,6 +2,7 @@
 
 import (
 	"context"
+	"runtime"
 	"testing"
 
 	"github.com/docker/docker/api/types"
@@ -24,10 +25,14 @@
 // nolint: golint
 func Create(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint
 	t.Helper()
+	cmd := []string{"top"}
+	if runtime.GOOS == "windows" {
+		cmd = []string{"sleep", "240"}
+	}
 	config := &TestContainerConfig{
 		Config: &container.Config{
 			Image: "busybox",
-			Cmd:   []string{"top"},
+			Cmd:   cmd,
 		},
 		HostConfig:       &container.HostConfig{},
 		NetworkingConfig: &network.NetworkingConfig{},
diff --git a/integration/volume/volume_test.go b/integration/volume/volume_test.go
index b2ee810..a16a1da 100644
--- a/integration/volume/volume_test.go
+++ b/integration/volume/volume_test.go
@@ -25,6 +25,10 @@
 	ctx := context.Background()
 
 	name := t.Name()
+	// Windows file system is case insensitive
+	if testEnv.OSType == "windows" {
+		name = strings.ToLower(name)
+	}
 	vol, err := client.VolumeCreate(ctx, volumetypes.VolumeCreateBody{
 		Name: name,
 	})
diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go
index 1eec912..d626336 100644
--- a/pkg/archive/archive_unix.go
+++ b/pkg/archive/archive_unix.go
@@ -7,6 +7,7 @@
 	"errors"
 	"os"
 	"path/filepath"
+	"strings"
 	"syscall"
 
 	"github.com/docker/docker/pkg/idtools"
@@ -26,7 +27,7 @@
 // can't use filepath.Join(srcPath,include) because this will clean away
 // a trailing "." or "/" which may be important.
 func getWalkRoot(srcPath string, include string) string {
-	return srcPath + string(filepath.Separator) + include
+	return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
 }
 
 // CanonicalTarNameForPath returns platform-specific filepath
diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go
index 2d9d662..6ff61e6 100644
--- a/pkg/chrootarchive/archive.go
+++ b/pkg/chrootarchive/archive.go
@@ -27,18 +27,34 @@
 // The archive may be compressed with one of the following algorithms:
 //  identity (uncompressed), gzip, bzip2, xz.
 func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
-	return untarHandler(tarArchive, dest, options, true)
+	return untarHandler(tarArchive, dest, options, true, dest)
+}
+
+// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory
+// The root directory is the directory that will be chrooted to.
+// `dest` must be a path within `root`, if it is not an error will be returned.
+//
+// `root` should set to a directory which is not controlled by any potentially
+// malicious process.
+//
+// This should be used to prevent a potential attacker from manipulating `dest`
+// such that it would provide access to files outside of `dest` through things
+// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
+// sanitizing symlinks in this manner is inherrently racey:
+// ref: CVE-2018-15664
+func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+	return untarHandler(tarArchive, dest, options, true, root)
 }
 
 // UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
 // and unpacks it into the directory at `dest`.
 // The archive must be an uncompressed stream.
 func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
-	return untarHandler(tarArchive, dest, options, false)
+	return untarHandler(tarArchive, dest, options, false, dest)
 }
 
 // Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error {
 	if tarArchive == nil {
 		return fmt.Errorf("Empty archive")
 	}
@@ -69,5 +85,13 @@
 		r = decompressedArchive
 	}
 
-	return invokeUnpack(r, dest, options)
+	return invokeUnpack(r, dest, options, root)
+}
+
+// Tar tars the requested path while chrooted to the specified root.
+func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+	if options == nil {
+		options = &archive.TarOptions{}
+	}
+	return invokePack(srcPath, options, root)
 }
diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go
index 5df8afd..ea2879d 100644
--- a/pkg/chrootarchive/archive_unix.go
+++ b/pkg/chrootarchive/archive_unix.go
@@ -10,10 +10,13 @@
 	"io"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"runtime"
+	"strings"
 
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/reexec"
+	"github.com/pkg/errors"
 )
 
 // untar is the entry-point for docker-untar on re-exec. This is not used on
@@ -23,18 +26,28 @@
 	runtime.LockOSThread()
 	flag.Parse()
 
-	var options *archive.TarOptions
+	var options archive.TarOptions
 
 	//read the options from the pipe "ExtraFiles"
 	if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
 		fatal(err)
 	}
 
-	if err := chroot(flag.Arg(0)); err != nil {
+	dst := flag.Arg(0)
+	var root string
+	if len(flag.Args()) > 1 {
+		root = flag.Arg(1)
+	}
+
+	if root == "" {
+		root = dst
+	}
+
+	if err := chroot(root); err != nil {
 		fatal(err)
 	}
 
-	if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+	if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
 		fatal(err)
 	}
 	// fully consume stdin in case it is zero padded
@@ -45,7 +58,10 @@
 	os.Exit(0)
 }
 
-func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error {
+	if root == "" {
+		return errors.New("must specify a root to chroot to")
+	}
 
 	// We can't pass a potentially large exclude list directly via cmd line
 	// because we easily overrun the kernel's max argument/environment size
@@ -57,7 +73,21 @@
 		return fmt.Errorf("Untar pipe failure: %v", err)
 	}
 
-	cmd := reexec.Command("docker-untar", dest)
+	if root != "" {
+		relDest, err := filepath.Rel(root, dest)
+		if err != nil {
+			return err
+		}
+		if relDest == "." {
+			relDest = "/"
+		}
+		if relDest[0] != '/' {
+			relDest = "/" + relDest
+		}
+		dest = relDest
+	}
+
+	cmd := reexec.Command("docker-untar", dest, root)
 	cmd.Stdin = decompressedArchive
 
 	cmd.ExtraFiles = append(cmd.ExtraFiles, r)
@@ -69,6 +99,7 @@
 		w.Close()
 		return fmt.Errorf("Untar error on re-exec cmd: %v", err)
 	}
+
 	//write the options to the pipe for the untar exec to read
 	if err := json.NewEncoder(w).Encode(options); err != nil {
 		w.Close()
@@ -86,3 +117,92 @@
 	}
 	return nil
 }
+
+func tar() {
+	runtime.LockOSThread()
+	flag.Parse()
+
+	src := flag.Arg(0)
+	var root string
+	if len(flag.Args()) > 1 {
+		root = flag.Arg(1)
+	}
+
+	if root == "" {
+		root = src
+	}
+
+	if err := realChroot(root); err != nil {
+		fatal(err)
+	}
+
+	var options archive.TarOptions
+	if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+		fatal(err)
+	}
+
+	rdr, err := archive.TarWithOptions(src, &options)
+	if err != nil {
+		fatal(err)
+	}
+	defer rdr.Close()
+
+	if _, err := io.Copy(os.Stdout, rdr); err != nil {
+		fatal(err)
+	}
+
+	os.Exit(0)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+	if root == "" {
+		return nil, errors.New("root path must not be empty")
+	}
+
+	relSrc, err := filepath.Rel(root, srcPath)
+	if err != nil {
+		return nil, err
+	}
+	if relSrc == "." {
+		relSrc = "/"
+	}
+	if relSrc[0] != '/' {
+		relSrc = "/" + relSrc
+	}
+
+	// make sure we didn't trim a trailing slash with the call to `Rel`
+	if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
+		relSrc += "/"
+	}
+
+	cmd := reexec.Command("docker-tar", relSrc, root)
+
+	errBuff := bytes.NewBuffer(nil)
+	cmd.Stderr = errBuff
+
+	tarR, tarW := io.Pipe()
+	cmd.Stdout = tarW
+
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		return nil, errors.Wrap(err, "error getting options pipe for tar process")
+	}
+
+	if err := cmd.Start(); err != nil {
+		return nil, errors.Wrap(err, "tar error on re-exec cmd")
+	}
+
+	go func() {
+		err := cmd.Wait()
+		err = errors.Wrapf(err, "error processing tar file: %s", errBuff)
+		tarW.CloseWithError(err)
+	}()
+
+	if err := json.NewEncoder(stdin).Encode(options); err != nil {
+		stdin.Close()
+		return nil, errors.Wrap(err, "tar json encode to pipe failed")
+	}
+	stdin.Close()
+
+	return tarR, nil
+}
diff --git a/pkg/chrootarchive/archive_unix_test.go b/pkg/chrootarchive/archive_unix_test.go
new file mode 100644
index 0000000..f39a88a
--- /dev/null
+++ b/pkg/chrootarchive/archive_unix_test.go
@@ -0,0 +1,171 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+	gotar "archive/tar"
+	"bytes"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/pkg/archive"
+	"golang.org/x/sys/unix"
+	"gotest.tools/assert"
+)
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not copy data to a
+// container path that will actually overwrite data on the host
+func TestUntarWithMaliciousSymlinks(t *testing.T) {
+	dir, err := ioutil.TempDir("", t.Name())
+	assert.NilError(t, err)
+	defer os.RemoveAll(dir)
+
+	root := filepath.Join(dir, "root")
+
+	err = os.MkdirAll(root, 0755)
+	assert.NilError(t, err)
+
+	// Add a file into a directory above root
+	// Ensure that we can't access this file while tarring.
+	err = ioutil.WriteFile(filepath.Join(dir, "host-file"), []byte("I am a host file"), 0644)
+	assert.NilError(t, err)
+
+	// Create some data which which will be copied into the "container" root into
+	// the symlinked path.
+	// Before this change, the copy would overwrite the "host" content.
+	// With this change it should not.
+	data := filepath.Join(dir, "data")
+	err = os.MkdirAll(data, 0755)
+	assert.NilError(t, err)
+	err = ioutil.WriteFile(filepath.Join(data, "local-file"), []byte("pwn3d"), 0644)
+	assert.NilError(t, err)
+
+	safe := filepath.Join(root, "safe")
+	err = unix.Symlink(dir, safe)
+	assert.NilError(t, err)
+
+	rdr, err := archive.TarWithOptions(data, &archive.TarOptions{IncludeFiles: []string{"local-file"}, RebaseNames: map[string]string{"local-file": "host-file"}})
+	assert.NilError(t, err)
+
+	// Use tee to test both the good case and the bad case w/o recreating the archive
+	bufRdr := bytes.NewBuffer(nil)
+	tee := io.TeeReader(rdr, bufRdr)
+
+	err = UntarWithRoot(tee, safe, nil, root)
+	assert.Assert(t, err != nil)
+	assert.ErrorContains(t, err, "open /safe/host-file: no such file or directory")
+
+	// Make sure the "host" file is still in tact
+	// Before the fix the host file would be overwritten
+	hostData, err := ioutil.ReadFile(filepath.Join(dir, "host-file"))
+	assert.NilError(t, err)
+	assert.Equal(t, string(hostData), "I am a host file")
+
+	// Now test by chrooting to an attacker controlled path
+	// This should succeed as is and overwrite a "host" file
+	// Note that this would be a mis-use of this function.
+	err = UntarWithRoot(bufRdr, safe, nil, safe)
+	assert.NilError(t, err)
+
+	hostData, err = ioutil.ReadFile(filepath.Join(dir, "host-file"))
+	assert.NilError(t, err)
+	assert.Equal(t, string(hostData), "pwn3d")
+}
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not unwittingly leak
+// host data into the archive.
+func TestTarWithMaliciousSymlinks(t *testing.T) {
+	dir, err := ioutil.TempDir("", t.Name())
+	assert.NilError(t, err)
+	// defer os.RemoveAll(dir)
+	t.Log(dir)
+
+	root := filepath.Join(dir, "root")
+
+	err = os.MkdirAll(root, 0755)
+	assert.NilError(t, err)
+
+	hostFileData := []byte("I am a host file")
+
+	// Add a file into a directory above root
+	// Ensure that we can't access this file while tarring.
+	err = ioutil.WriteFile(filepath.Join(dir, "host-file"), hostFileData, 0644)
+	assert.NilError(t, err)
+
+	safe := filepath.Join(root, "safe")
+	err = unix.Symlink(dir, safe)
+	assert.NilError(t, err)
+
+	data := filepath.Join(dir, "data")
+	err = os.MkdirAll(data, 0755)
+	assert.NilError(t, err)
+
+	type testCase struct {
+		p        string
+		includes []string
+	}
+
+	cases := []testCase{
+		{p: safe, includes: []string{"host-file"}},
+		{p: safe + "/", includes: []string{"host-file"}},
+		{p: safe, includes: nil},
+		{p: safe + "/", includes: nil},
+		{p: root, includes: []string{"safe/host-file"}},
+		{p: root, includes: []string{"/safe/host-file"}},
+		{p: root, includes: nil},
+	}
+
+	maxBytes := len(hostFileData)
+
+	for _, tc := range cases {
+		t.Run(path.Join(tc.p+"_"+strings.Join(tc.includes, "_")), func(t *testing.T) {
+			// Here if we use archive.TarWithOptions directly or change the "root" parameter
+			// to be the same as "safe", data from the host will be leaked into the archive
+			var opts *archive.TarOptions
+			if tc.includes != nil {
+				opts = &archive.TarOptions{
+					IncludeFiles: tc.includes,
+				}
+			}
+			rdr, err := Tar(tc.p, opts, root)
+			assert.NilError(t, err)
+			defer rdr.Close()
+
+			tr := gotar.NewReader(rdr)
+			assert.Assert(t, !isDataInTar(t, tr, hostFileData, int64(maxBytes)), "host data leaked to archive")
+		})
+	}
+}
+
+func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes int64) bool {
+	for {
+		h, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		assert.NilError(t, err)
+
+		if h.Size == 0 {
+			continue
+		}
+		assert.Assert(t, h.Size <= maxBytes, "%s: file size exceeds max expected size %d: %d", h.Name, maxBytes, h.Size)
+
+		data := make([]byte, int(h.Size))
+		_, err = io.ReadFull(tr, data)
+		assert.NilError(t, err)
+		if bytes.Contains(data, compare) {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/pkg/chrootarchive/archive_windows.go b/pkg/chrootarchive/archive_windows.go
index f297313..de87113 100644
--- a/pkg/chrootarchive/archive_windows.go
+++ b/pkg/chrootarchive/archive_windows.go
@@ -14,9 +14,16 @@
 
 func invokeUnpack(decompressedArchive io.ReadCloser,
 	dest string,
-	options *archive.TarOptions) error {
+	options *archive.TarOptions, root string) error {
 	// Windows is different to Linux here because Windows does not support
 	// chroot. Hence there is no point sandboxing a chrooted process to
 	// do the unpack. We call inline instead within the daemon process.
 	return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
 }
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) {
+	// Windows is different to Linux here because Windows does not support
+	// chroot. Hence there is no point sandboxing a chrooted process to
+	// do the pack. We call inline instead within the daemon process.
+	return archive.TarWithOptions(srcPath, options)
+}
diff --git a/pkg/chrootarchive/init_unix.go b/pkg/chrootarchive/init_unix.go
index a15e4bb..c24fea7 100644
--- a/pkg/chrootarchive/init_unix.go
+++ b/pkg/chrootarchive/init_unix.go
@@ -14,6 +14,7 @@
 func init() {
 	reexec.Register("docker-applyLayer", applyLayer)
 	reexec.Register("docker-untar", untar)
+	reexec.Register("docker-tar", tar)
 }
 
 func fatal(err error) {