Merge pull request #23029 from mlaventure/bump-to-1.11.2

Bump version to 1.11.2-rc1
diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le
index 1f75623..d5434b1 100644
--- a/Dockerfile.ppc64le
+++ b/Dockerfile.ppc64le
@@ -71,13 +71,11 @@
 	&& make install_device-mapper
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
-# TODO install Go, using gccgo as GOROOT_BOOTSTRAP (Go 1.5+ supports ppc64le properly)
-# possibly a ppc64le/golang image?
-
-## BUILD GOLANG
-ENV GO_VERSION 1.5.4
+## BUILD GOLANG 1.6
+# NOTE: ppc64le has compatibility issues with older versions of go, so make sure the version >= 1.6
+ENV GO_VERSION 1.6.2
 ENV GO_DOWNLOAD_URL https://golang.org/dl/go${GO_VERSION}.src.tar.gz
-ENV GO_DOWNLOAD_SHA256 002acabce7ddc140d0d55891f9d4fcfbdd806b9332fb8b110c91bc91afb0bc93
+ENV GO_DOWNLOAD_SHA256 787b0b750d037016a30c6ed05a8a70a91b2e9db4bd9b1a2453aa502a63f1bccc
 ENV GOROOT_BOOTSTRAP /usr/local
 
 RUN curl -fsSL "$GO_DOWNLOAD_URL" -o golang.tar.gz \
@@ -155,7 +153,7 @@
 
 VOLUME /var/lib/docker
 WORKDIR /go/src/github.com/docker/docker
-ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux
+ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux
 
 # Let us use a .bashrc file
 RUN ln -sfv $PWD/.bashrc ~/.bashrc
diff --git a/builder/context.go b/builder/context.go
index 53a90f1..3b5cc2a 100644
--- a/builder/context.go
+++ b/builder/context.go
@@ -29,6 +29,16 @@
 		return err
 	}
 	return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error {
+		if err != nil {
+			if os.IsPermission(err) {
+				return fmt.Errorf("can't stat '%s'", filePath)
+			}
+			if os.IsNotExist(err) {
+				return nil
+			}
+			return err
+		}
+
 		// skip this directory/file if it's not in the path, it won't get added to the context
 		if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil {
 			return err
@@ -41,16 +51,6 @@
 			return nil
 		}
 
-		if err != nil {
-			if os.IsPermission(err) {
-				return fmt.Errorf("can't stat '%s'", filePath)
-			}
-			if os.IsNotExist(err) {
-				return nil
-			}
-			return err
-		}
-
 		// skip checking if symlinks point to non-existing files, such symlinks can be useful
 		// also skip named pipes, because they hanging on open
 		if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
diff --git a/container/container.go b/container/container.go
index ebafde7..8781b79 100644
--- a/container/container.go
+++ b/container/container.go
@@ -667,7 +667,8 @@
 	return pm, nil
 }
 
-func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap {
+// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox
+func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap {
 	pm := nat.PortMap{}
 	if sb == nil {
 		return pm
@@ -824,7 +825,7 @@
 	}
 
 	// Port-mapping rules belong to the container & applicable only to non-internal networks
-	portmaps := getSandboxPortMapInfo(sb)
+	portmaps := GetSandboxPortMapInfo(sb)
 	if n.Info().Internal() || len(portmaps) > 0 {
 		return createOptions, nil
 	}
diff --git a/container/history.go b/container/history.go
index afce1d4..c80c2aa 100644
--- a/container/history.go
+++ b/container/history.go
@@ -24,11 +24,6 @@
 	containers[i], containers[j] = containers[j], containers[i]
 }
 
-// Add the given container to history.
-func (history *History) Add(container *Container) {
-	*history = append(*history, container)
-}
-
 // sort orders the history by creation date in descendant order.
 func (history *History) sort() {
 	sort.Sort(history)
diff --git a/container/memory_store.go b/container/memory_store.go
index 30c1f7a..9fa1165 100644
--- a/container/memory_store.go
+++ b/container/memory_store.go
@@ -41,14 +41,9 @@
 // List returns a sorted list of containers from the store.
 // The containers are ordered by creation date.
 func (c *memoryStore) List() []*Container {
-	containers := new(History)
-	c.RLock()
-	for _, cont := range c.s {
-		containers.Add(cont)
-	}
-	c.RUnlock()
+	containers := History(c.all())
 	containers.sort()
-	return *containers
+	return containers
 }
 
 // Size returns the number of containers in the store.
@@ -60,9 +55,7 @@
 
 // First returns the first container found in the store by a given filter.
 func (c *memoryStore) First(filter StoreFilter) *Container {
-	c.RLock()
-	defer c.RUnlock()
-	for _, cont := range c.s {
+	for _, cont := range c.all() {
 		if filter(cont) {
 			return cont
 		}
@@ -74,11 +67,8 @@
 // This operation is asyncronous in the memory store.
 // NOTE: Modifications to the store MUST NOT be done by the StoreReducer.
 func (c *memoryStore) ApplyAll(apply StoreReducer) {
-	c.RLock()
-	defer c.RUnlock()
-
 	wg := new(sync.WaitGroup)
-	for _, cont := range c.s {
+	for _, cont := range c.all() {
 		wg.Add(1)
 		go func(container *Container) {
 			apply(container)
@@ -89,4 +79,14 @@
 	wg.Wait()
 }
 
+func (c *memoryStore) all() []*Container {
+	c.RLock()
+	containers := make([]*Container, 0, len(c.s))
+	for _, cont := range c.s {
+		containers = append(containers, cont)
+	}
+	c.RUnlock()
+	return containers
+}
+
 var _ Store = &memoryStore{}
diff --git a/daemon/container_operations.go b/daemon/container_operations.go
index 310f9bb..461439a 100644
--- a/daemon/container_operations.go
+++ b/daemon/container_operations.go
@@ -26,6 +26,7 @@
 	// ErrRootFSReadOnly is returned when a container
 	// rootfs is marked readonly.
 	ErrRootFSReadOnly = errors.New("container rootfs is marked read-only")
+	getPortMapInfo    = container.GetSandboxPortMapInfo
 )
 
 func (daemon *Daemon) buildSandboxOptions(container *container.Container, n libnetwork.Network) ([]libnetwork.SandboxOption, error) {
@@ -581,6 +582,8 @@
 		return fmt.Errorf("Updating join info failed: %v", err)
 	}
 
+	container.NetworkSettings.Ports = getPortMapInfo(sb)
+
 	daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID})
 	return nil
 }
@@ -633,6 +636,8 @@
 		return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err)
 	}
 
+	container.NetworkSettings.Ports = getPortMapInfo(sbox)
+
 	if err := ep.Delete(false); err != nil {
 		return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err)
 	}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index b7eadaa..2502596 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -367,6 +367,9 @@
 					}
 				}
 			}
+
+			// Make sure networks are available before starting
+			daemon.waitForNetworks(c)
 			if err := daemon.containerStart(c); err != nil {
 				logrus.Errorf("Failed to start container %s: %s", c.ID, err)
 			}
@@ -410,6 +413,33 @@
 	return nil
 }
 
+// waitForNetworks is used during daemon initialization when starting up containers
+// It ensures that all of a container's networks are available before the daemon tries to start the container.
+// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
+func (daemon *Daemon) waitForNetworks(c *container.Container) {
+	if daemon.discoveryWatcher == nil {
+		return
+	}
+	// Make sure if the container has a network that requires discovery that the discovery service is available before starting
+	for netName := range c.NetworkSettings.Networks {
+		// If we get `ErrNoSuchNetwork` here, it can assumed that it is due to discovery not being ready
+		// Most likely this is because the K/V store used for discovery is in a container and needs to be started
+		if _, err := daemon.netController.NetworkByName(netName); err != nil {
+			if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
+				continue
+			}
+			// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
+			// FIXME: why is this slow???
+			logrus.Debugf("Container %s waiting for network to be ready", c.Name)
+			select {
+			case <-daemon.discoveryWatcher.ReadyCh():
+			case <-time.After(60 * time.Second):
+			}
+			return
+		}
+	}
+}
+
 func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error {
 	if img != nil && img.Config != nil {
 		if err := merge(config, img.Config); err != nil {
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index da6bc25..609ed95 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -381,6 +381,12 @@
 		&discovery.Entry{Host: "127.0.0.1", Port: "3333"},
 	}
 
+	select {
+	case <-time.After(10 * time.Second):
+		t.Fatal("timeout waiting for discovery")
+	case <-daemon.discoveryWatcher.ReadyCh():
+	}
+
 	stopCh := make(chan struct{})
 	defer close(stopCh)
 	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
@@ -414,6 +420,13 @@
 	if err := daemon.Reload(newConfig); err != nil {
 		t.Fatal(err)
 	}
+
+	select {
+	case <-time.After(10 * time.Second):
+		t.Fatal("timeout waiting for discovery")
+	case <-daemon.discoveryWatcher.ReadyCh():
+	}
+
 	ch, errCh = daemon.discoveryWatcher.Watch(stopCh)
 
 	select {
@@ -450,6 +463,13 @@
 	if err := daemon.Reload(newConfig); err != nil {
 		t.Fatal(err)
 	}
+
+	select {
+	case <-time.After(10 * time.Second):
+		t.Fatal("timeout waiting for discovery")
+	case <-daemon.discoveryWatcher.ReadyCh():
+	}
+
 	stopCh := make(chan struct{})
 	defer close(stopCh)
 	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
@@ -488,6 +508,12 @@
 	if err := daemon.Reload(newConfig); err != nil {
 		t.Fatal(err)
 	}
+
+	select {
+	case <-daemon.discoveryWatcher.ReadyCh():
+	case <-time.After(10 * time.Second):
+		t.Fatal("Timeout waiting for discovery")
+	}
 	stopCh := make(chan struct{})
 	defer close(stopCh)
 	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
diff --git a/daemon/discovery.go b/daemon/discovery.go
index 1346946..30d2e02 100644
--- a/daemon/discovery.go
+++ b/daemon/discovery.go
@@ -27,18 +27,24 @@
 	discovery.Watcher
 	Stop()
 	Reload(backend, address string, clusterOpts map[string]string) error
+	ReadyCh() <-chan struct{}
 }
 
 type daemonDiscoveryReloader struct {
 	backend discovery.Backend
 	ticker  *time.Ticker
 	term    chan bool
+	readyCh chan struct{}
 }
 
 func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
 	return d.backend.Watch(stopCh)
 }
 
+func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} {
+	return d.readyCh
+}
+
 func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) {
 	var (
 		heartbeat = defaultDiscoveryHeartbeat
@@ -87,38 +93,64 @@
 		backend: backend,
 		ticker:  time.NewTicker(heartbeat),
 		term:    make(chan bool),
+		readyCh: make(chan struct{}),
 	}
 	// We call Register() on the discovery backend in a loop for the whole lifetime of the daemon,
 	// but we never actually Watch() for nodes appearing and disappearing for the moment.
-	reloader.advertise(advertiseAddress)
+	go reloader.advertiseHeartbeat(advertiseAddress)
 	return reloader, nil
 }
 
-func (d *daemonDiscoveryReloader) advertise(address string) {
-	d.registerAddr(address)
-	go d.advertiseHeartbeat(address)
-}
-
-func (d *daemonDiscoveryReloader) registerAddr(addr string) {
-	if err := d.backend.Register(addr); err != nil {
-		log.Warnf("Registering as %q in discovery failed: %v", addr, err)
-	}
-}
-
 // advertiseHeartbeat registers the current node against the discovery backend using the specified
 // address. The function never returns, as registration against the backend comes with a TTL and
 // requires regular heartbeats.
 func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) {
+	var ready bool
+	if err := d.initHeartbeat(address); err == nil {
+		ready = true
+		close(d.readyCh)
+	}
+
 	for {
 		select {
 		case <-d.ticker.C:
-			d.registerAddr(address)
+			if err := d.backend.Register(address); err != nil {
+				log.Warnf("Registering as %q in discovery failed: %v", address, err)
+			} else {
+				if !ready {
+					close(d.readyCh)
+					ready = true
+				}
+			}
 		case <-d.term:
 			return
 		}
 	}
 }
 
+// initHeartbeat is used to do the first heartbeat. It uses a tight loop until
+// either the timeout period is reached or the heartbeat is successful and returns.
+func (d *daemonDiscoveryReloader) initHeartbeat(address string) error {
+	// Setup a short ticker until the first heartbeat has succeeded
+	t := time.NewTicker(500 * time.Millisecond)
+	defer t.Stop()
+	// timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service
+	timeout := time.After(60 * time.Second)
+
+	for {
+		select {
+		case <-timeout:
+			return errors.New("timeout waiting for initial discovery")
+		case <-d.term:
+			return errors.New("terminated")
+		case <-t.C:
+			if err := d.backend.Register(address); err == nil {
+				return nil
+			}
+		}
+	}
+}
+
 // Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address.
 func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error {
 	d.Stop()
@@ -130,8 +162,9 @@
 
 	d.backend = backend
 	d.ticker = time.NewTicker(heartbeat)
+	d.readyCh = make(chan struct{})
 
-	d.advertise(advertiseAddress)
+	go d.advertiseHeartbeat(advertiseAddress)
 	return nil
 }
 
diff --git a/daemon/graphdriver/counter.go b/daemon/graphdriver/counter.go
new file mode 100644
index 0000000..572fc9b
--- /dev/null
+++ b/daemon/graphdriver/counter.go
@@ -0,0 +1,32 @@
+package graphdriver
+
+import "sync"
+
+// RefCounter is a generic counter for use by graphdriver Get/Put calls
+type RefCounter struct {
+	counts map[string]int
+	mu     sync.Mutex
+}
+
+// NewRefCounter returns a new RefCounter
+func NewRefCounter() *RefCounter {
+	return &RefCounter{counts: make(map[string]int)}
+}
+
+// Increment increaes the ref count for the given id and returns the current count
+func (c *RefCounter) Increment(id string) int {
+	c.mu.Lock()
+	c.counts[id]++
+	count := c.counts[id]
+	c.mu.Unlock()
+	return count
+}
+
+// Decrement decreases the ref count for the given id and returns the current count
+func (c *RefCounter) Decrement(id string) int {
+	c.mu.Lock()
+	c.counts[id]--
+	count := c.counts[id]
+	c.mu.Unlock()
+	return count
+}
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index 7de6907..8756f1f 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -28,6 +28,7 @@
 	home    string
 	uidMaps []idtools.IDMap
 	gidMaps []idtools.IDMap
+	ctr     *graphdriver.RefCounter
 }
 
 // Init creates a driver with the given home and the set of options.
@@ -46,6 +47,7 @@
 		home:      home,
 		uidMaps:   uidMaps,
 		gidMaps:   gidMaps,
+		ctr:       graphdriver.NewRefCounter(),
 	}
 
 	return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
@@ -151,26 +153,35 @@
 // Get mounts a device with given id into the root filesystem
 func (d *Driver) Get(id, mountLabel string) (string, error) {
 	mp := path.Join(d.home, "mnt", id)
+	rootFs := path.Join(mp, "rootfs")
+	if count := d.ctr.Increment(id); count > 1 {
+		return rootFs, nil
+	}
 
 	uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
+		d.ctr.Decrement(id)
 		return "", err
 	}
+
 	// Create the target directories if they don't exist
 	if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
+		d.ctr.Decrement(id)
 		return "", err
 	}
 	if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
+		d.ctr.Decrement(id)
 		return "", err
 	}
 
 	// Mount the device
 	if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
+		d.ctr.Decrement(id)
 		return "", err
 	}
 
-	rootFs := path.Join(mp, "rootfs")
 	if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
+		d.ctr.Decrement(id)
 		d.DeviceSet.UnmountDevice(id, mp)
 		return "", err
 	}
@@ -180,6 +191,7 @@
 		// Create an "id" file with the container/image id in it to help reconstruct this in case
 		// of later problems
 		if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
+			d.ctr.Decrement(id)
 			d.DeviceSet.UnmountDevice(id, mp)
 			return "", err
 		}
@@ -190,6 +202,9 @@
 
 // Put unmounts a device and removes it.
 func (d *Driver) Put(id string) error {
+	if count := d.ctr.Decrement(id); count > 0 {
+		return nil
+	}
 	mp := path.Join(d.home, "mnt", id)
 	err := d.DeviceSet.UnmountDevice(id, mp)
 	if err != nil {
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index 476b789..33678f6 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -95,6 +95,7 @@
 	pathCache     map[string]string
 	uidMaps       []idtools.IDMap
 	gidMaps       []idtools.IDMap
+	ctr           *graphdriver.RefCounter
 }
 
 var backingFs = "<unknown>"
@@ -147,6 +148,7 @@
 		pathCache: make(map[string]string),
 		uidMaps:   uidMaps,
 		gidMaps:   gidMaps,
+		ctr:       graphdriver.NewRefCounter(),
 	}
 
 	return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil
@@ -348,28 +350,39 @@
 	workDir := path.Join(dir, "work")
 	mergedDir := path.Join(dir, "merged")
 
+	if count := d.ctr.Increment(id); count > 1 {
+		return mergedDir, nil
+	}
+
 	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
 
 	// if it's mounted already, just return
 	mounted, err := d.mounted(mergedDir)
 	if err != nil {
+		d.ctr.Decrement(id)
 		return "", err
 	}
 	if mounted {
+		d.ctr.Decrement(id)
 		return mergedDir, nil
 	}
 
 	if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
+		d.ctr.Decrement(id)
 		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
 	}
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
 	// user namespace requires this to move a directory from lower to upper.
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
+		d.ctr.Decrement(id)
+		syscall.Unmount(mergedDir, 0)
 		return "", err
 	}
 
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
+		d.ctr.Decrement(id)
+		syscall.Unmount(mergedDir, 0)
 		return "", err
 	}
 
@@ -386,6 +399,9 @@
 
 // Put unmounts the mount path created for the give id.
 func (d *Driver) Put(id string) error {
+	if count := d.ctr.Decrement(id); count > 0 {
+		return nil
+	}
 	d.pathCacheLock.Lock()
 	mountpoint, exists := d.pathCache[id]
 	d.pathCacheLock.Unlock()
diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go
index e92045b..2db187c 100644
--- a/daemon/graphdriver/zfs/zfs.go
+++ b/daemon/graphdriver/zfs/zfs.go
@@ -105,6 +105,7 @@
 		filesystemsCache: filesystemsCache,
 		uidMaps:          uidMaps,
 		gidMaps:          gidMaps,
+		ctr:              graphdriver.NewRefCounter(),
 	}
 	return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
 }
@@ -161,6 +162,7 @@
 	filesystemsCache map[string]bool
 	uidMaps          []idtools.IDMap
 	gidMaps          []idtools.IDMap
+	ctr              *graphdriver.RefCounter
 }
 
 func (d *Driver) String() string {
@@ -295,25 +297,35 @@
 // Get returns the mountpoint for the given id after creating the target directories if necessary.
 func (d *Driver) Get(id, mountLabel string) (string, error) {
 	mountpoint := d.mountPath(id)
+	if count := d.ctr.Increment(id); count > 1 {
+		return mountpoint, nil
+	}
+
 	filesystem := d.zfsPath(id)
 	options := label.FormatMountLabel("", mountLabel)
 	logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options)
 
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
+		d.ctr.Decrement(id)
 		return "", err
 	}
 	// Create the target directories if they don't exist
 	if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
+		d.ctr.Decrement(id)
 		return "", err
 	}
 
 	if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
+		d.ctr.Decrement(id)
 		return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
 	}
+
 	// this could be our first mount after creation of the filesystem, and the root dir may still have root
 	// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
 	if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
+		mount.Unmount(mountpoint)
+		d.ctr.Decrement(id)
 		return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
 	}
 
@@ -322,6 +334,9 @@
 
 // Put removes the existing mountpoint for the given id if it exists.
 func (d *Driver) Put(id string) error {
+	if count := d.ctr.Decrement(id); count > 0 {
+		return nil
+	}
 	mountpoint := d.mountPath(id)
 	mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
 	if err != nil || !mounted {
diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go
index 2ee2092..902766d 100644
--- a/integration-cli/docker_cli_network_unix_test.go
+++ b/integration-cli/docker_cli_network_unix_test.go
@@ -1080,6 +1080,52 @@
 	dockerCmd(c, "network", "connect", "test1", "c1")
 }
 
+func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) {
+	chk := checker.Equals
+	if !mustBeEqual {
+		chk = checker.Not(checker.Equals)
+	}
+	currentMapping, _ := dockerCmd(c, "port", container, port)
+	c.Assert(currentMapping, chk, originalMapping)
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) {
+	// Connect and disconnect a container with explicit and non-explicit
+	// host port mapping to/from networks which do cause and do not cause
+	// the container default gateway to change, and verify docker port cmd
+	// returns congruent information
+	testRequires(c, NotArm)
+	cnt := "c1"
+	dockerCmd(c, "network", "create", "aaa")
+	dockerCmd(c, "network", "create", "ccc")
+
+	dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top")
+	c.Assert(waitRun(cnt), check.IsNil)
+	curPortMap, _ := dockerCmd(c, "port", cnt, "70")
+	curExplPortMap, _ := dockerCmd(c, "port", cnt, "90")
+
+	// Connect to a network which causes the container's default gw switch
+	dockerCmd(c, "network", "connect", "aaa", cnt)
+	verifyPortMap(c, cnt, "70", curPortMap, false)
+	verifyPortMap(c, cnt, "90", curExplPortMap, true)
+
+	// Read current mapping
+	curPortMap, _ = dockerCmd(c, "port", cnt, "70")
+
+	// Disconnect from a network which causes the container's default gw switch
+	dockerCmd(c, "network", "disconnect", "aaa", cnt)
+	verifyPortMap(c, cnt, "70", curPortMap, false)
+	verifyPortMap(c, cnt, "90", curExplPortMap, true)
+
+	// Read current mapping
+	curPortMap, _ = dockerCmd(c, "port", cnt, "70")
+
+	// Connect to a network which does not cause the container's default gw switch
+	dockerCmd(c, "network", "connect", "ccc", cnt)
+	verifyPortMap(c, cnt, "70", curPortMap, true)
+	verifyPortMap(c, cnt, "90", curExplPortMap, true)
+}
+
 func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) {
 	macAddress := "02:42:ac:11:00:02"
 	dockerCmd(c, "network", "create", "mynetwork")
diff --git a/profiles/seccomp/default.json b/profiles/seccomp/default.json
index 28d564c..5c70f88 100755
--- a/profiles/seccomp/default.json
+++ b/profiles/seccomp/default.json
@@ -1380,6 +1380,11 @@
 			"args": []
 		},
 		{
+			"name": "socketcall",
+			"action": "SCMP_ACT_ALLOW",
+			"args": []
+		},
+		{
 			"name": "socketpair",
 			"action": "SCMP_ACT_ALLOW",
 			"args": []
diff --git a/profiles/seccomp/seccomp_default.go b/profiles/seccomp/seccomp_default.go
index be93d78..4fad7a6 100644
--- a/profiles/seccomp/seccomp_default.go
+++ b/profiles/seccomp/seccomp_default.go
@@ -1409,6 +1409,11 @@
 			Args:   []*types.Arg{},
 		},
 		{
+			Name:   "socketcall",
+			Action: types.ActAllow,
+			Args:   []*types.Arg{},
+		},
+		{
 			Name:   "socketpair",
 			Action: types.ActAllow,
 			Args:   []*types.Arg{},