Merge pull request #19355 from riyazdf/notary-revendor

notary revendor into docker
diff --git a/Dockerfile b/Dockerfile
index e3e0c0f..d498eb6 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -87,6 +87,9 @@
 # see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL
 
 # Install Go
+# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
+#            will need updating, to avoid errors. Ping #docker-maintainers on IRC 
+#            with a heads-up.
 ENV GO_VERSION 1.5.3
 RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \
 	| tar -xzC /usr/local
@@ -152,7 +155,7 @@
 # both. This allows integration-cli tests to cover push/pull with both schema1
 # and schema2 manifests.
 ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
-ENV REGISTRY_COMMIT a7ae88da459b98b481a245e5b1750134724ac67d
+ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
index 6ca4430..e0203b6 100644
--- a/Dockerfile.armhf
+++ b/Dockerfile.armhf
@@ -127,24 +127,33 @@
 	) \
 	&& rm -rf "$SECCOMP_PATH"
 
-# Install registry
-ENV REGISTRY_COMMIT ec87e9b6971d831f0eff752ddb54fb64693e51cd
+# Install two versions of the registry. The first is an older version that
+# only supports schema1 manifests. The second is a newer version that supports
+# both. This allows integration-cli tests to cover push/pull with both schema1
+# and schema2 manifests.
+ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
+ENV REGISTRY_COMMIT a7ae88da459b98b481a245e5b1750134724ac67d
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \
 	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \
 	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
 		go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \
+	&& (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \
+	&& GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
+		go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \
 	&& rm -rf "$GOPATH"
 
 # Install notary server
-ENV NOTARY_COMMIT f211b1826dde5fc8c117ccff9bb04ae458a8e3d0
+ENV NOTARY_VERSION docker-v1.10-2
 RUN set -x \
 	&& export GOPATH="$(mktemp -d)" \
 	&& git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \
-	&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_COMMIT") \
+	&& (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \
 	&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
 		go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \
+	&& GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \
+		go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
diff --git a/MAINTAINERS b/MAINTAINERS
index 8132bee..43a2f87 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -27,6 +27,7 @@
 
 		people = [
 			"calavera",
+			"coolljt0725",
 			"cpuguy83",
 			"crosbymichael",
 			"duglin",
@@ -115,6 +116,11 @@
 	Email = "david.calavera@gmail.com"
 	GitHub = "calavera"
 
+	[people.coolljt0725]
+	Name = "Lei Jitang"
+	Email = "leijitang@huawei.com"
+	GitHub = "coolljt0725"
+
 	[people.cpuguy83]
 	Name = "Brian Goff"
 	Email = "cpuguy83@gmail.com"
diff --git a/api/client/network.go b/api/client/network.go
index d0422e4..56adabc 100644
--- a/api/client/network.go
+++ b/api/client/network.go
@@ -40,12 +40,14 @@
 	flIpamIPRange := opts.NewListOpts(nil)
 	flIpamGateway := opts.NewListOpts(nil)
 	flIpamAux := opts.NewMapOpts(nil, nil)
+	flIpamOpt := opts.NewMapOpts(nil, nil)
 
 	cmd.Var(&flIpamSubnet, []string{"-subnet"}, "subnet in CIDR format that represents a network segment")
 	cmd.Var(&flIpamIPRange, []string{"-ip-range"}, "allocate container ip from a sub-range")
 	cmd.Var(&flIpamGateway, []string{"-gateway"}, "ipv4 or ipv6 Gateway for the master subnet")
 	cmd.Var(flIpamAux, []string{"-aux-address"}, "auxiliary ipv4 or ipv6 addresses used by Network driver")
 	cmd.Var(flOpts, []string{"o", "-opt"}, "set driver specific options")
+	cmd.Var(flIpamOpt, []string{"-ipam-opt"}, "set IPAM driver specific options")
 
 	flInternal := cmd.Bool([]string{"-internal"}, false, "restricts external access to the network")
 
@@ -71,7 +73,7 @@
 	nc := types.NetworkCreate{
 		Name:           cmd.Arg(0),
 		Driver:         driver,
-		IPAM:           network.IPAM{Driver: *flIpamDriver, Config: ipamCfg},
+		IPAM:           network.IPAM{Driver: *flIpamDriver, Config: ipamCfg, Options: flIpamOpt.GetAll()},
 		Options:        flOpts.GetAll(),
 		CheckDuplicate: true,
 		Internal:       *flInternal,
diff --git a/api/client/run.go b/api/client/run.go
index 1fd392c..3b3a1a2 100644
--- a/api/client/run.go
+++ b/api/client/run.go
@@ -91,7 +91,7 @@
 	}
 
 	if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
-		fmt.Fprintf(cli.err, "WARNING: Dangerous only disable the OOM Killer on containers but not set the '-m/--memory' option\n")
+		fmt.Fprintf(cli.err, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n")
 	}
 
 	if len(hostConfig.DNS) > 0 {
diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go
index ab7427b..85fa88d 100644
--- a/api/server/router/network/network_routes.go
+++ b/api/server/router/network/network_routes.go
@@ -182,12 +182,19 @@
 }
 
 func buildIpamResources(r *types.NetworkResource, nw libnetwork.Network) {
-	id, _, ipv4conf, ipv6conf := nw.Info().IpamConfig()
+	id, opts, ipv4conf, ipv6conf := nw.Info().IpamConfig()
+
+	ipv4Info, ipv6Info := nw.Info().IpamInfo()
 
 	r.IPAM.Driver = id
 
+	r.IPAM.Options = opts
+
 	r.IPAM.Config = []network.IPAMConfig{}
 	for _, ip4 := range ipv4conf {
+		if ip4.PreferredPool == "" {
+			continue
+		}
 		iData := network.IPAMConfig{}
 		iData.Subnet = ip4.PreferredPool
 		iData.IPRange = ip4.SubPool
@@ -196,7 +203,21 @@
 		r.IPAM.Config = append(r.IPAM.Config, iData)
 	}
 
+	if len(r.IPAM.Config) == 0 {
+		for _, ip4Info := range ipv4Info {
+			iData := network.IPAMConfig{}
+			iData.Subnet = ip4Info.IPAMData.Pool.String()
+			iData.Gateway = ip4Info.IPAMData.Gateway.String()
+			r.IPAM.Config = append(r.IPAM.Config, iData)
+		}
+	}
+
+	hasIpv6Conf := false
 	for _, ip6 := range ipv6conf {
+		if ip6.PreferredPool == "" {
+			continue
+		}
+		hasIpv6Conf = true
 		iData := network.IPAMConfig{}
 		iData.Subnet = ip6.PreferredPool
 		iData.IPRange = ip6.SubPool
@@ -204,6 +225,15 @@
 		iData.AuxAddress = ip6.AuxAddresses
 		r.IPAM.Config = append(r.IPAM.Config, iData)
 	}
+
+	if !hasIpv6Conf {
+		for _, ip6Info := range ipv6Info {
+			iData := network.IPAMConfig{}
+			iData.Subnet = ip6Info.IPAMData.Pool.String()
+			iData.Gateway = ip6Info.IPAMData.Gateway.String()
+			r.IPAM.Config = append(r.IPAM.Config, iData)
+		}
+	}
 }
 
 func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource {
diff --git a/api/server/router_swapper.go b/api/server/router_swapper.go
new file mode 100644
index 0000000..b5f1d06
--- /dev/null
+++ b/api/server/router_swapper.go
@@ -0,0 +1,30 @@
+package server
+
+import (
+	"net/http"
+	"sync"
+
+	"github.com/gorilla/mux"
+)
+
+// routerSwapper is an http.Handler that allow you to swap
+// mux routers.
+type routerSwapper struct {
+	mu     sync.Mutex
+	router *mux.Router
+}
+
+// Swap changes the old router with the new one.
+func (rs *routerSwapper) Swap(newRouter *mux.Router) {
+	rs.mu.Lock()
+	rs.router = newRouter
+	rs.mu.Unlock()
+}
+
+// ServeHTTP makes the routerSwapper to implement the http.Handler interface.
+func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	rs.mu.Lock()
+	router := rs.router
+	rs.mu.Unlock()
+	router.ServeHTTP(w, r)
+}
diff --git a/api/server/server.go b/api/server/server.go
index 03200c4..f312f23 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -4,7 +4,6 @@
 	"crypto/tls"
 	"net"
 	"net/http"
-	"os"
 	"strings"
 
 	"github.com/Sirupsen/logrus"
@@ -42,10 +41,11 @@
 
 // Server contains instance details for the server
 type Server struct {
-	cfg          *Config
-	servers      []*HTTPServer
-	routers      []router.Router
-	authZPlugins []authorization.Plugin
+	cfg           *Config
+	servers       []*HTTPServer
+	routers       []router.Router
+	authZPlugins  []authorization.Plugin
+	routerSwapper *routerSwapper
 }
 
 // Addr contains string representation of address and its protocol (tcp, unix...).
@@ -80,12 +80,14 @@
 	}
 }
 
-// ServeAPI loops through all initialized servers and spawns goroutine
-// with Server method for each. It sets CreateMux() as Handler also.
-func (s *Server) ServeAPI() error {
+// serveAPI loops through all initialized servers and spawns goroutine
+// with Server method for each. It sets createMux() as Handler also.
+func (s *Server) serveAPI() error {
+	s.initRouterSwapper()
+
 	var chErrors = make(chan error, len(s.servers))
 	for _, srv := range s.servers {
-		srv.srv.Handler = s.CreateMux()
+		srv.srv.Handler = s.routerSwapper
 		go func(srv *HTTPServer) {
 			var err error
 			logrus.Infof("API listen on %s", srv.l.Addr())
@@ -186,11 +188,11 @@
 	s.routers = append(s.routers, r)
 }
 
-// CreateMux initializes the main router the server uses.
+// createMux initializes the main router the server uses.
 // we keep enableCors just for legacy usage, need to be removed in the future
-func (s *Server) CreateMux() *mux.Router {
+func (s *Server) createMux() *mux.Router {
 	m := mux.NewRouter()
-	if os.Getenv("DEBUG") != "" {
+	if utils.IsDebugEnabled() {
 		profilerSetup(m, "/debug/")
 	}
 
@@ -207,3 +209,36 @@
 
 	return m
 }
+
+// Wait blocks the server goroutine until it exits.
+// It sends an error message if there is any error during
+// the API execution.
+func (s *Server) Wait(waitChan chan error) {
+	if err := s.serveAPI(); err != nil {
+		logrus.Errorf("ServeAPI error: %v", err)
+		waitChan <- err
+		return
+	}
+	waitChan <- nil
+}
+
+func (s *Server) initRouterSwapper() {
+	s.routerSwapper = &routerSwapper{
+		router: s.createMux(),
+	}
+}
+
+// Reload reads configuration changes and modifies the
+// server according to those changes.
+// Currently, only the --debug configuration is taken into account.
+func (s *Server) Reload(config *daemon.Config) {
+	debugEnabled := utils.IsDebugEnabled()
+	switch {
+	case debugEnabled && !config.Debug: // disable debug
+		utils.DisableDebug()
+		s.routerSwapper.Swap(s.createMux())
+	case config.Debug && !debugEnabled: // enable debug
+		utils.EnableDebug()
+		s.routerSwapper.Swap(s.createMux())
+	}
+}
diff --git a/container/monitor.go b/container/monitor.go
index 0010a76..2f3368f 100644
--- a/container/monitor.go
+++ b/container/monitor.go
@@ -80,7 +80,6 @@
 // StartMonitor initializes a containerMonitor for this container with the provided supervisor and restart policy
 // and starts the container's process.
 func (container *Container) StartMonitor(s supervisor, policy container.RestartPolicy) error {
-	container.Lock()
 	container.monitor = &containerMonitor{
 		supervisor:    s,
 		container:     container,
@@ -89,7 +88,6 @@
 		stopChan:      make(chan struct{}),
 		startSignal:   make(chan struct{}),
 	}
-	container.Unlock()
 
 	return container.monitor.wait()
 }
@@ -159,8 +157,6 @@
 		}
 		m.Close()
 	}()
-
-	m.container.Lock()
 	// reset stopped flag
 	if m.container.HasBeenManuallyStopped {
 		m.container.HasBeenManuallyStopped = false
@@ -175,20 +171,16 @@
 		if err := m.supervisor.StartLogging(m.container); err != nil {
 			m.resetContainer(false)
 
-			m.container.Unlock()
 			return err
 		}
 
 		pipes := execdriver.NewPipes(m.container.Stdin(), m.container.Stdout(), m.container.Stderr(), m.container.Config.OpenStdin)
-		m.container.Unlock()
 
 		m.logEvent("start")
 
 		m.lastStartTime = time.Now()
 
-		// don't lock Run because m.callback has own lock
 		if exitStatus, err = m.supervisor.Run(m.container, pipes, m.callback); err != nil {
-			m.container.Lock()
 			// if we receive an internal error from the initial start of a container then lets
 			// return it instead of entering the restart loop
 			// set to 127 for container cmd not found/does not exist)
@@ -198,7 +190,6 @@
 				if m.container.RestartCount == 0 {
 					m.container.ExitCode = 127
 					m.resetContainer(false)
-					m.container.Unlock()
 					return derr.ErrorCodeCmdNotFound
 				}
 			}
@@ -207,7 +198,6 @@
 				if m.container.RestartCount == 0 {
 					m.container.ExitCode = 126
 					m.resetContainer(false)
-					m.container.Unlock()
 					return derr.ErrorCodeCmdCouldNotBeInvoked
 				}
 			}
@@ -216,13 +206,11 @@
 				m.container.ExitCode = -1
 				m.resetContainer(false)
 
-				m.container.Unlock()
 				return derr.ErrorCodeCantStart.WithArgs(m.container.ID, utils.GetErrorMessage(err))
 			}
 
-			m.container.Unlock()
 			logrus.Errorf("Error running container: %s", err)
-		} // end if
+		}
 
 		// here container.Lock is already lost
 		afterRun = true
@@ -243,14 +231,13 @@
 			if m.shouldStop {
 				return err
 			}
-			m.container.Lock()
 			continue
 		}
 
 		m.logEvent("die")
 		m.resetContainer(true)
 		return err
-	} // end for
+	}
 }
 
 // resetMonitor resets the stateful fields on the containerMonitor based on the
@@ -331,7 +318,7 @@
 		}
 	}
 
-	m.container.SetRunningLocking(pid)
+	m.container.SetRunning(pid)
 
 	// signal that the process has started
 	// close channel only if not closed
diff --git a/container/state.go b/container/state.go
index d36ade9..138d798 100644
--- a/container/state.go
+++ b/container/state.go
@@ -179,13 +179,6 @@
 	return res
 }
 
-// SetRunningLocking locks container and sets it to "running"
-func (s *State) SetRunningLocking(pid int) {
-	s.Lock()
-	s.SetRunning(pid)
-	s.Unlock()
-}
-
 // SetRunning sets the state of the container to "running".
 func (s *State) SetRunning(pid int) {
 	s.Error = ""
@@ -199,7 +192,7 @@
 	s.waitChan = make(chan struct{})
 }
 
-// SetStoppedLocking locks the container state and sets it to "stopped".
+// SetStoppedLocking locks the container state is sets it to "stopped".
 func (s *State) SetStoppedLocking(exitStatus *execdriver.ExitStatus) {
 	s.Lock()
 	s.SetStopped(exitStatus)
diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker
index 53c58a9..4b12ce6 100644
--- a/contrib/completion/bash/docker
+++ b/contrib/completion/bash/docker
@@ -340,6 +340,25 @@
 	" -- "$cur" ) )
 }
 
+__docker_complete_detach-keys() {
+	case "$prev" in
+		--detach-keys)
+			case "$cur" in
+				*,)
+					COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) )
+					;;
+				*)
+					COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) )
+					;;
+			esac
+
+			__docker_nospace
+			return
+			;;
+	esac
+	return 1
+}
+
 __docker_complete_isolation() {
 	COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) )
 }
@@ -513,12 +532,14 @@
 }
 
 _docker_attach() {
-	case "$cur" in
+	__docker_complete_detach-keys && return
+
+ 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--help --no-stdin --sig-proxy" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy" -- "$cur" ) )
 			;;
 		*)
-			local counter="$(__docker_pos_first_nonflag)"
+			local counter=$(__docker_pos_first_nonflag '--detach-keys')
 			if [ $cword -eq $counter ]; then
 				__docker_complete_containers_running
 			fi
@@ -901,6 +922,8 @@
 }
 
 _docker_exec() {
+	__docker_complete_detach-keys && return
+
 	case "$prev" in
 		--user|-u)
 			return
@@ -909,7 +932,7 @@
 
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--detach -d --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--detach -d --detach-keys --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) )
 			;;
 		*)
 			__docker_complete_containers_running
@@ -1508,13 +1531,20 @@
 		--tty -t
 	"
 
+	if [ "$command" = "run" ] ; then
+		options_with_args="$options_with_args
+			--detach-keys
+		"
+		boolean_options="$boolean_options
+			--detach -d
+			--rm
+			--sig-proxy=false
+		"
+		__docker_complete_detach-keys && return
+	fi
+
 	local all_options="$options_with_args $boolean_options"
 
-	[ "$command" = "run" ] && all_options="$all_options
-		--detach -d
-		--rm
-		--sig-proxy=false
-	"
 
 	case "$prev" in
 		--add-host)
@@ -1701,9 +1731,11 @@
 }
 
 _docker_start() {
+	__docker_complete_detach-keys && return
+
 	case "$cur" in
 		-*)
-			COMPREPLY=( $( compgen -W "--attach -a --help --interactive -i" -- "$cur" ) )
+			COMPREPLY=( $( compgen -W "--attach -a --detach-keys --help --interactive -i" -- "$cur" ) )
 			;;
 		*)
 			__docker_complete_containers_stopped
diff --git a/daemon/config.go b/daemon/config.go
index 8356df8..a75178f 100644
--- a/daemon/config.go
+++ b/daemon/config.go
@@ -1,9 +1,19 @@
 package daemon
 
 import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"sync"
+
+	"github.com/Sirupsen/logrus"
 	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/discovery"
 	flag "github.com/docker/docker/pkg/mflag"
-	"github.com/docker/engine-api/types/container"
+	"github.com/imdario/mergo"
 )
 
 const (
@@ -11,42 +21,69 @@
 	disableNetworkBridge = "none"
 )
 
+// LogConfig represents the default log configuration.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
+type LogConfig struct {
+	Type   string            `json:"log-driver,omitempty"`
+	Config map[string]string `json:"log-opts,omitempty"`
+}
+
+// CommonTLSOptions defines TLS configuration for the daemon server.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
+type CommonTLSOptions struct {
+	CAFile   string `json:"tlscacert,omitempty"`
+	CertFile string `json:"tlscert,omitempty"`
+	KeyFile  string `json:"tlskey,omitempty"`
+}
+
 // CommonConfig defines the configuration of a docker daemon which are
 // common across platforms.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
 type CommonConfig struct {
-	AuthorizationPlugins []string // AuthorizationPlugins holds list of authorization plugins
-	AutoRestart          bool
-	Bridge               bridgeConfig // Bridge holds bridge network specific configuration.
-	Context              map[string][]string
-	DisableBridge        bool
-	DNS                  []string
-	DNSOptions           []string
-	DNSSearch            []string
-	ExecOptions          []string
-	ExecRoot             string
-	GraphDriver          string
-	GraphOptions         []string
-	Labels               []string
-	LogConfig            container.LogConfig
-	Mtu                  int
-	Pidfile              string
-	RemappedRoot         string
-	Root                 string
-	TrustKeyPath         string
+	AuthorizationPlugins []string            `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
+	AutoRestart          bool                `json:"-"`
+	Bridge               bridgeConfig        `json:"-"` // Bridge holds bridge network specific configuration.
+	Context              map[string][]string `json:"-"`
+	DisableBridge        bool                `json:"-"`
+	DNS                  []string            `json:"dns,omitempty"`
+	DNSOptions           []string            `json:"dns-opts,omitempty"`
+	DNSSearch            []string            `json:"dns-search,omitempty"`
+	ExecOptions          []string            `json:"exec-opts,omitempty"`
+	ExecRoot             string              `json:"exec-root,omitempty"`
+	GraphDriver          string              `json:"storage-driver,omitempty"`
+	GraphOptions         []string            `json:"storage-opts,omitempty"`
+	Labels               []string            `json:"labels,omitempty"`
+	LogConfig            LogConfig           `json:"log-config,omitempty"`
+	Mtu                  int                 `json:"mtu,omitempty"`
+	Pidfile              string              `json:"pidfile,omitempty"`
+	Root                 string              `json:"graph,omitempty"`
+	TrustKeyPath         string              `json:"-"`
 
 	// ClusterStore is the storage backend used for the cluster information. It is used by both
 	// multihost networking (to store networks and endpoints information) and by the node discovery
 	// mechanism.
-	ClusterStore string
+	ClusterStore string `json:"cluster-store,omitempty"`
 
 	// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
 	// as TLS configuration settings.
-	ClusterOpts map[string]string
+	ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
 
 	// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
 	// discovery. This should be a 'host:port' combination on which that daemon instance is
 	// reachable by other hosts.
-	ClusterAdvertise string
+	ClusterAdvertise string `json:"cluster-advertise,omitempty"`
+
+	Debug      bool             `json:"debug,omitempty"`
+	Hosts      []string         `json:"hosts,omitempty"`
+	LogLevel   string           `json:"log-level,omitempty"`
+	TLS        bool             `json:"tls,omitempty"`
+	TLSVerify  bool             `json:"tls-verify,omitempty"`
+	TLSOptions CommonTLSOptions `json:"tls-opts,omitempty"`
+
+	reloadLock sync.Mutex
 }
 
 // InstallCommonFlags adds command-line options to the top-level flag parser for
@@ -54,9 +91,9 @@
 // Subsequent calls to `flag.Parse` will populate config with values parsed
 // from the command-line.
 func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) {
-	cmd.Var(opts.NewListOptsRef(&config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
-	cmd.Var(opts.NewListOptsRef(&config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
-	cmd.Var(opts.NewListOptsRef(&config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options"))
+	cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Set storage driver options"))
+	cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("List authorization plugins in order from first evaluator to last"))
+	cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Set exec driver options"))
 	cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file"))
 	cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime"))
 	cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, "/var/run/docker", usageFn("Root of the Docker execdriver"))
@@ -65,12 +102,131 @@
 	cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU"))
 	// FIXME: why the inconsistency between "hosts" and "sockets"?
 	cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use"))
-	cmd.Var(opts.NewListOptsRef(&config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use"))
+	cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use"))
 	cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use"))
-	cmd.Var(opts.NewListOptsRef(&config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
+	cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon"))
 	cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs"))
-	cmd.Var(opts.NewMapOpts(config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))
+	cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Set log driver options"))
 	cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise"))
 	cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("Set the cluster store"))
-	cmd.Var(opts.NewMapOpts(config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options"))
+	cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options"))
+}
+
+func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
+	if clusterAdvertise == "" {
+		return "", errDiscoveryDisabled
+	}
+	if clusterStore == "" {
+		return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
+	}
+
+	advertise, err := discovery.ParseAdvertise(clusterAdvertise)
+	if err != nil {
+		return "", fmt.Errorf("discovery advertise parsing failed (%v)", err)
+	}
+	return advertise, nil
+}
+
+// ReloadConfiguration reads the configuration in the host and reloads the daemon and server.
+func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) {
+	logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
+	newConfig, err := getConflictFreeConfiguration(configFile, flags)
+	if err != nil {
+		logrus.Error(err)
+	} else {
+		reload(newConfig)
+	}
+}
+
+// MergeDaemonConfigurations reads a configuration file,
+// loads the file configuration in an isolated structure,
+// and merges the configuration provided from flags on top
+// if there are no conflicts.
+func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) {
+	fileConfig, err := getConflictFreeConfiguration(configFile, flags)
+	if err != nil {
+		return nil, err
+	}
+
+	// merge flags configuration on top of the file configuration
+	if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
+		return nil, err
+	}
+
+	return fileConfig, nil
+}
+
+// getConflictFreeConfiguration loads the configuration from a JSON file.
+// It compares that configuration with the one provided by the flags,
+// and returns an error if there are conflicts.
+func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) {
+	b, err := ioutil.ReadFile(configFile)
+	if err != nil {
+		return nil, err
+	}
+
+	var reader io.Reader
+	if flags != nil {
+		var jsonConfig map[string]interface{}
+		reader = bytes.NewReader(b)
+		if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
+			return nil, err
+		}
+
+		if err := findConfigurationConflicts(jsonConfig, flags); err != nil {
+			return nil, err
+		}
+	}
+
+	var config Config
+	reader = bytes.NewReader(b)
+	err = json.NewDecoder(reader).Decode(&config)
+	return &config, err
+}
+
+// findConfigurationConflicts iterates over the provided flags searching for
+// duplicated configurations. It returns an error with all the conflicts if
+// it finds any.
+func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error {
+	var conflicts []string
+	flatten := make(map[string]interface{})
+	for k, v := range config {
+		if m, ok := v.(map[string]interface{}); ok {
+			for km, vm := range m {
+				flatten[km] = vm
+			}
+		} else {
+			flatten[k] = v
+		}
+	}
+
+	printConflict := func(name string, flagValue, fileValue interface{}) string {
+		return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
+	}
+
+	collectConflicts := func(f *flag.Flag) {
+		// search option name in the json configuration payload if the value is a named option
+		if namedOption, ok := f.Value.(opts.NamedOption); ok {
+			if optsValue, ok := flatten[namedOption.Name()]; ok {
+				conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
+			}
+		} else {
+			// search flag name in the json configuration payload without trailing dashes
+			for _, name := range f.Names {
+				name = strings.TrimLeft(name, "-")
+
+				if value, ok := flatten[name]; ok {
+					conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
+					break
+				}
+			}
+		}
+	}
+
+	flags.Visit(collectConflicts)
+
+	if len(conflicts) > 0 {
+		return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
+	}
+	return nil
 }
diff --git a/daemon/config_test.go b/daemon/config_test.go
new file mode 100644
index 0000000..69a199e
--- /dev/null
+++ b/daemon/config_test.go
@@ -0,0 +1,177 @@
+package daemon
+
+import (
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/mflag"
+)
+
+func TestDaemonConfigurationMerge(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"debug": true}`))
+	f.Close()
+
+	c := &Config{
+		CommonConfig: CommonConfig{
+			AutoRestart: true,
+			LogConfig: LogConfig{
+				Type:   "syslog",
+				Config: map[string]string{"tag": "test"},
+			},
+		},
+	}
+
+	cc, err := MergeDaemonConfigurations(c, nil, configFile)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !cc.Debug {
+		t.Fatalf("expected %v, got %v\n", true, cc.Debug)
+	}
+	if !cc.AutoRestart {
+		t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart)
+	}
+	if cc.LogConfig.Type != "syslog" {
+		t.Fatalf("expected syslog config, got %q\n", cc.LogConfig)
+	}
+}
+
+func TestDaemonConfigurationNotFound(t *testing.T) {
+	_, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker")
+	if err == nil || !os.IsNotExist(err) {
+		t.Fatalf("expected does not exist error, got %v", err)
+	}
+}
+
+func TestDaemonBrokenConfiguration(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"Debug": tru`))
+	f.Close()
+
+	_, err = MergeDaemonConfigurations(&Config{}, nil, configFile)
+	if err == nil {
+		t.Fatalf("expected error, got %v", err)
+	}
+}
+
+func TestParseClusterAdvertiseSettings(t *testing.T) {
+	_, err := parseClusterAdvertiseSettings("something", "")
+	if err != errDiscoveryDisabled {
+		t.Fatalf("expected discovery disabled error, got %v\n", err)
+	}
+
+	_, err = parseClusterAdvertiseSettings("", "something")
+	if err == nil {
+		t.Fatalf("expected discovery store error, got %v\n", err)
+	}
+
+	_, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080")
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestFindConfigurationConflicts(t *testing.T) {
+	config := map[string]interface{}{"authorization-plugins": "foobar"}
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+
+	err := findConfigurationConflicts(config, flags)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	flags.String([]string{"authorization-plugins"}, "", "")
+	if err := flags.Set("authorization-plugins", "asdf"); err != nil {
+		t.Fatal(err)
+	}
+
+	err = findConfigurationConflicts(config, flags)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "authorization-plugins") {
+		t.Fatalf("expected authorization-plugins conflict, got %v", err)
+	}
+}
+
+func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) {
+	config := map[string]interface{}{"hosts": []string{"qwer"}}
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+
+	var hosts []string
+	flags.Var(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to")
+	if err := flags.Set("-host", "tcp://127.0.0.1:4444"); err != nil {
+		t.Fatal(err)
+	}
+	if err := flags.Set("H", "unix:///var/run/docker.sock"); err != nil {
+		t.Fatal(err)
+	}
+
+	err := findConfigurationConflicts(config, flags)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "hosts") {
+		t.Fatalf("expected hosts conflict, got %v", err)
+	}
+}
+
+func TestDaemonConfigurationMergeConflicts(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"debug": true}`))
+	f.Close()
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	flags.Bool([]string{"debug"}, false, "")
+	flags.Set("debug", "false")
+
+	_, err = MergeDaemonConfigurations(&Config{}, flags, configFile)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "debug") {
+		t.Fatalf("expected debug conflict, got %v", err)
+	}
+}
+
+func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`))
+	f.Close()
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	flags.String([]string{"tlscacert"}, "", "")
+	flags.Set("tlscacert", "~/.docker/ca.pem")
+
+	_, err = MergeDaemonConfigurations(&Config{}, flags, configFile)
+	if err == nil {
+		t.Fatal("expected error, got nil")
+	}
+	if !strings.Contains(err.Error(), "tlscacert") {
+		t.Fatalf("expected tlscacert conflict, got %v", err)
+	}
+}
diff --git a/daemon/config_unix.go b/daemon/config_unix.go
index a25df90..60fb3a9 100644
--- a/daemon/config_unix.go
+++ b/daemon/config_unix.go
@@ -18,18 +18,20 @@
 )
 
 // Config defines the configuration of a docker daemon.
+// It includes json tags to deserialize configuration from a file
+// using the same names that the flags in the command line uses.
 type Config struct {
 	CommonConfig
 
 	// Fields below here are platform specific.
 
-	CorsHeaders          string
-	EnableCors           bool
-	EnableSelinuxSupport bool
-	RemappedRoot         string
-	SocketGroup          string
-	CgroupParent         string
-	Ulimits              map[string]*units.Ulimit
+	CorsHeaders          string                   `json:"api-cors-headers,omitempty"`
+	EnableCors           bool                     `json:"api-enable-cors,omitempty"`
+	EnableSelinuxSupport bool                     `json:"selinux-enabled,omitempty"`
+	RemappedRoot         string                   `json:"userns-remap,omitempty"`
+	SocketGroup          string                   `json:"group,omitempty"`
+	CgroupParent         string                   `json:"cgroup-parent,omitempty"`
+	Ulimits              map[string]*units.Ulimit `json:"default-ulimits,omitempty"`
 }
 
 // bridgeConfig stores all the bridge driver specific
diff --git a/daemon/daemon.go b/daemon/daemon.go
index bbecc67..9e0e77e 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -46,7 +46,6 @@
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/migrate/v1"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/graphdb"
 	"github.com/docker/docker/pkg/idtools"
@@ -155,7 +154,7 @@
 	EventsService             *events.Events
 	netController             libnetwork.NetworkController
 	volumes                   *store.VolumeStore
-	discoveryWatcher          discovery.Watcher
+	discoveryWatcher          discoveryReloader
 	root                      string
 	seccompEnabled            bool
 	shutdown                  bool
@@ -292,7 +291,7 @@
 
 func (daemon *Daemon) restore() error {
 	var (
-		debug         = os.Getenv("DEBUG") != ""
+		debug         = utils.IsDebugEnabled()
 		currentDriver = daemon.GraphDriverName()
 		containers    = make(map[string]*container.Container)
 	)
@@ -772,19 +771,8 @@
 
 	// Discovery is only enabled when the daemon is launched with an address to advertise.  When
 	// initialized, the daemon is registered and we can store the discovery backend as its read-only
-	// DiscoveryWatcher version.
-	if config.ClusterStore != "" && config.ClusterAdvertise != "" {
-		advertise, err := discovery.ParseAdvertise(config.ClusterStore, config.ClusterAdvertise)
-		if err != nil {
-			return nil, fmt.Errorf("discovery advertise parsing failed (%v)", err)
-		}
-		config.ClusterAdvertise = advertise
-		d.discoveryWatcher, err = initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
-		if err != nil {
-			return nil, fmt.Errorf("discovery initialization failed (%v)", err)
-		}
-	} else if config.ClusterAdvertise != "" {
-		return nil, fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
+	if err := d.initDiscovery(config); err != nil {
+		return nil, err
 	}
 
 	d.netController, err = d.initNetworkController(config)
@@ -815,7 +803,10 @@
 	d.configStore = config
 	d.execDriver = ed
 	d.statsCollector = d.newStatsCollector(1 * time.Second)
-	d.defaultLogConfig = config.LogConfig
+	d.defaultLogConfig = containertypes.LogConfig{
+		Type:   config.LogConfig.Type,
+		Config: config.LogConfig.Config,
+	}
 	d.RegistryService = registryService
 	d.EventsService = eventsService
 	d.volumes = volStore
@@ -1521,6 +1512,76 @@
 	return container.NewBaseContainer(id, daemon.containerRoot(id))
 }
 
+// initDiscovery initializes the discovery watcher for this daemon.
+func (daemon *Daemon) initDiscovery(config *Config) error {
+	advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
+	if err != nil {
+		if err == errDiscoveryDisabled {
+			return nil
+		}
+		return err
+	}
+
+	config.ClusterAdvertise = advertise
+	discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
+	if err != nil {
+		return fmt.Errorf("discovery initialization failed (%v)", err)
+	}
+
+	daemon.discoveryWatcher = discoveryWatcher
+	return nil
+}
+
+// Reload reads configuration changes and modifies the
+// daemon according to those changes.
+// This are the settings that Reload changes:
+// - Daemon labels.
+// - Cluster discovery (reconfigure and restart).
+func (daemon *Daemon) Reload(config *Config) error {
+	daemon.configStore.reloadLock.Lock()
+	defer daemon.configStore.reloadLock.Unlock()
+
+	daemon.configStore.Labels = config.Labels
+	return daemon.reloadClusterDiscovery(config)
+}
+
+func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
+	newAdvertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
+	if err != nil && err != errDiscoveryDisabled {
+		return err
+	}
+
+	// check discovery modifications
+	if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, config.ClusterStore, config.ClusterOpts) {
+		return nil
+	}
+
+	// enable discovery for the first time if it was not previously enabled
+	if daemon.discoveryWatcher == nil {
+		discoveryWatcher, err := initDiscovery(config.ClusterStore, newAdvertise, config.ClusterOpts)
+		if err != nil {
+			return fmt.Errorf("discovery initialization failed (%v)", err)
+		}
+		daemon.discoveryWatcher = discoveryWatcher
+	} else {
+		if err == errDiscoveryDisabled {
+			// disable discovery if it was previously enabled and it's disabled now
+			daemon.discoveryWatcher.Stop()
+		} else {
+			// reload discovery
+			if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
+				return err
+			}
+		}
+	}
+
+	daemon.configStore.ClusterStore = config.ClusterStore
+	daemon.configStore.ClusterOpts = config.ClusterOpts
+	daemon.configStore.ClusterAdvertise = newAdvertise
+
+	return nil
+}
+
 func convertLnNetworkStats(name string, stats *lntypes.InterfaceStatistics) *libcontainer.NetworkInterface {
 	n := &libcontainer.NetworkInterface{Name: name}
 	n.RxBytes = stats.RxBytes
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index e6550a4..26e9c2f 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -4,9 +4,13 @@
 	"io/ioutil"
 	"os"
 	"path/filepath"
+	"reflect"
 	"testing"
+	"time"
 
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/pkg/discovery"
+	_ "github.com/docker/docker/pkg/discovery/memory"
 	"github.com/docker/docker/pkg/registrar"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/volume"
@@ -371,3 +375,118 @@
 		}
 	}
 }
+
+func TestDaemonReloadLabels(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &Config{
+		CommonConfig: CommonConfig{
+			Labels: []string{"foo:bar"},
+		},
+	}
+
+	newConfig := &Config{
+		CommonConfig: CommonConfig{
+			Labels: []string{"foo:baz"},
+		},
+	}
+
+	daemon.Reload(newConfig)
+	label := daemon.configStore.Labels[0]
+	if label != "foo:baz" {
+		t.Fatalf("Expected daemon label `foo:baz`, got %s", label)
+	}
+}
+
+func TestDaemonDiscoveryReload(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     "memory://127.0.0.1",
+			ClusterAdvertise: "127.0.0.1:3333",
+		},
+	}
+
+	if err := daemon.initDiscovery(daemon.configStore); err != nil {
+		t.Fatal(err)
+	}
+
+	expected := discovery.Entries{
+		&discovery.Entry{Host: "127.0.0.1", Port: "3333"},
+	}
+
+	stopCh := make(chan struct{})
+	defer close(stopCh)
+	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
+
+	select {
+	case <-time.After(1 * time.Second):
+		t.Fatal("failed to get discovery advertisements in time")
+	case e := <-ch:
+		if !reflect.DeepEqual(e, expected) {
+			t.Fatalf("expected %v, got %v\n", expected, e)
+		}
+	case e := <-errCh:
+		t.Fatal(e)
+	}
+
+	newConfig := &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     "memory://127.0.0.1:2222",
+			ClusterAdvertise: "127.0.0.1:5555",
+		},
+	}
+
+	expected = discovery.Entries{
+		&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
+	}
+
+	if err := daemon.Reload(newConfig); err != nil {
+		t.Fatal(err)
+	}
+	ch, errCh = daemon.discoveryWatcher.Watch(stopCh)
+
+	select {
+	case <-time.After(1 * time.Second):
+		t.Fatal("failed to get discovery advertisements in time")
+	case e := <-ch:
+		if !reflect.DeepEqual(e, expected) {
+			t.Fatalf("expected %v, got %v\n", expected, e)
+		}
+	case e := <-errCh:
+		t.Fatal(e)
+	}
+}
+
+func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) {
+	daemon := &Daemon{}
+	daemon.configStore = &Config{}
+
+	newConfig := &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     "memory://127.0.0.1:2222",
+			ClusterAdvertise: "127.0.0.1:5555",
+		},
+	}
+
+	expected := discovery.Entries{
+		&discovery.Entry{Host: "127.0.0.1", Port: "5555"},
+	}
+
+	if err := daemon.Reload(newConfig); err != nil {
+		t.Fatal(err)
+	}
+	stopCh := make(chan struct{})
+	defer close(stopCh)
+	ch, errCh := daemon.discoveryWatcher.Watch(stopCh)
+
+	select {
+	case <-time.After(1 * time.Second):
+		t.Fatal("failed to get discovery advertisements in time")
+	case e := <-ch:
+		if !reflect.DeepEqual(e, expected) {
+			t.Fatalf("expected %v, got %v\n", expected, e)
+		}
+	case e := <-errCh:
+		t.Fatal(e)
+	}
+}
diff --git a/daemon/discovery.go b/daemon/discovery.go
index ef9307d..6c4bcc4 100644
--- a/daemon/discovery.go
+++ b/daemon/discovery.go
@@ -1,7 +1,9 @@
 package daemon
 
 import (
+	"errors"
 	"fmt"
+	"reflect"
 	"strconv"
 	"time"
 
@@ -19,6 +21,24 @@
 	defaultDiscoveryTTLFactor = 3
 )
 
+var errDiscoveryDisabled = errors.New("discovery is disabled")
+
+type discoveryReloader interface {
+	discovery.Watcher
+	Stop()
+	Reload(backend, address string, clusterOpts map[string]string) error
+}
+
+type daemonDiscoveryReloader struct {
+	backend discovery.Backend
+	ticker  *time.Ticker
+	term    chan bool
+}
+
+func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+	return d.backend.Watch(stopCh)
+}
+
 func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) {
 	var (
 		heartbeat = defaultDiscoveryHeartbeat
@@ -57,36 +77,94 @@
 
 // initDiscovery initialized the nodes discovery subsystem by connecting to the specified backend
 // and start a registration loop to advertise the current node under the specified address.
-func initDiscovery(backend, address string, clusterOpts map[string]string) (discovery.Backend, error) {
-
-	heartbeat, ttl, err := discoveryOpts(clusterOpts)
+func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) {
+	heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
 	if err != nil {
 		return nil, err
 	}
 
-	discoveryBackend, err := discovery.New(backend, heartbeat, ttl, clusterOpts)
-	if err != nil {
-		return nil, err
+	reloader := &daemonDiscoveryReloader{
+		backend: backend,
+		ticker:  time.NewTicker(heartbeat),
+		term:    make(chan bool),
 	}
-
 	// We call Register() on the discovery backend in a loop for the whole lifetime of the daemon,
 	// but we never actually Watch() for nodes appearing and disappearing for the moment.
-	go registrationLoop(discoveryBackend, address, heartbeat)
-	return discoveryBackend, nil
+	reloader.advertise(advertiseAddress)
+	return reloader, nil
 }
 
-func registerAddr(backend discovery.Backend, addr string) {
-	if err := backend.Register(addr); err != nil {
+func (d *daemonDiscoveryReloader) advertise(address string) {
+	d.registerAddr(address)
+	go d.advertiseHeartbeat(address)
+}
+
+func (d *daemonDiscoveryReloader) registerAddr(addr string) {
+	if err := d.backend.Register(addr); err != nil {
 		log.Warnf("Registering as %q in discovery failed: %v", addr, err)
 	}
 }
 
-// registrationLoop registers the current node against the discovery backend using the specified
+// advertiseHeartbeat registers the current node against the discovery backend using the specified
 // address. The function never returns, as registration against the backend comes with a TTL and
 // requires regular heartbeats.
-func registrationLoop(discoveryBackend discovery.Backend, address string, heartbeat time.Duration) {
-	registerAddr(discoveryBackend, address)
-	for range time.Tick(heartbeat) {
-		registerAddr(discoveryBackend, address)
+func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) {
+	for {
+		select {
+		case <-d.ticker.C:
+			d.registerAddr(address)
+		case <-d.term:
+			return
+		}
 	}
 }
+
+// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address.
+func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error {
+	d.Stop()
+
+	heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts)
+	if err != nil {
+		return err
+	}
+
+	d.backend = backend
+	d.ticker = time.NewTicker(heartbeat)
+
+	d.advertise(advertiseAddress)
+	return nil
+}
+
+// Stop terminates the discovery advertising.
+func (d *daemonDiscoveryReloader) Stop() {
+	d.ticker.Stop()
+	d.term <- true
+}
+
+func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) {
+	heartbeat, ttl, err := discoveryOpts(clusterOpts)
+	if err != nil {
+		return 0, nil, err
+	}
+
+	backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts)
+	if err != nil {
+		return 0, nil, err
+	}
+	return heartbeat, backend, nil
+}
+
+// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
+func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
+	if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
+		return true
+	}
+
+	if (config.ClusterOpts == nil && clusterOpts == nil) ||
+		(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
+		(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
+		return false
+	}
+
+	return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
+}
diff --git a/daemon/discovery_test.go b/daemon/discovery_test.go
index e65aecb..c761a69 100644
--- a/daemon/discovery_test.go
+++ b/daemon/discovery_test.go
@@ -89,3 +89,64 @@
 		t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl)
 	}
 }
+
+func TestModifiedDiscoverySettings(t *testing.T) {
+	cases := []struct {
+		current  *Config
+		modified *Config
+		expected bool
+	}{
+		{
+			current:  discoveryConfig("foo", "bar", map[string]string{}),
+			modified: discoveryConfig("foo", "bar", map[string]string{}),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
+			modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", map[string]string{}),
+			modified: discoveryConfig("foo", "bar", nil),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("foo", "bar", map[string]string{}),
+			expected: false,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("baz", "bar", nil),
+			expected: true,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("foo", "baz", nil),
+			expected: true,
+		},
+		{
+			current:  discoveryConfig("foo", "bar", nil),
+			modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}),
+			expected: true,
+		},
+	}
+
+	for _, c := range cases {
+		got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts)
+		if c.expected != got {
+			t.Fatalf("expected %v, got %v: current config %q, new config %q", c.expected, got, c.current, c.modified)
+		}
+	}
+}
+
+func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config {
+	return &Config{
+		CommonConfig: CommonConfig{
+			ClusterStore:     backendAddr,
+			ClusterAdvertise: advertiseAddr,
+			ClusterOpts:      opts,
+		},
+	}
+}
diff --git a/daemon/info.go b/daemon/info.go
index f5f6f96..804d6e4 100644
--- a/daemon/info.go
+++ b/daemon/info.go
@@ -79,7 +79,7 @@
 		IPv4Forwarding:     !sysInfo.IPv4ForwardingDisabled,
 		BridgeNfIptables:   !sysInfo.BridgeNfCallIptablesDisabled,
 		BridgeNfIP6tables:  !sysInfo.BridgeNfCallIP6tablesDisabled,
-		Debug:              os.Getenv("DEBUG") != "",
+		Debug:              utils.IsDebugEnabled(),
 		NFd:                fileutils.GetTotalUsedFds(),
 		NGoroutines:        runtime.NumGoroutine(),
 		SystemTime:         time.Now().Format(time.RFC3339Nano),
diff --git a/daemon/network.go b/daemon/network.go
index be8bc33..f6a2515 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -114,7 +114,7 @@
 		return nil, err
 	}
 
-	nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, nil))
+	nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options))
 	nwOptions = append(nwOptions, libnetwork.NetworkOptionDriverOpts(options))
 	if internal {
 		nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
diff --git a/daemon/start.go b/daemon/start.go
index 55262d2..418dace 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -142,15 +142,9 @@
 	mounts = append(mounts, container.TmpfsMounts()...)
 
 	container.Command.Mounts = mounts
-	container.Unlock()
-
-	// don't lock waitForStart because it has potential risk of blocking
-	// which will lead to dead lock, forever.
 	if err := daemon.waitForStart(container); err != nil {
-		container.Lock()
 		return err
 	}
-	container.Lock()
 	container.HasBeenStartedBefore = true
 	return nil
 }
diff --git a/distribution/metadata/blobsum_service.go b/distribution/metadata/blobsum_service.go
deleted file mode 100644
index 88ed7bb..0000000
--- a/distribution/metadata/blobsum_service.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package metadata
-
-import (
-	"encoding/json"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/layer"
-)
-
-// BlobSumService maps layer IDs to a set of known blobsums for
-// the layer.
-type BlobSumService struct {
-	store Store
-}
-
-// maxBlobSums is the number of blobsums to keep per layer DiffID.
-const maxBlobSums = 5
-
-// NewBlobSumService creates a new blobsum mapping service.
-func NewBlobSumService(store Store) *BlobSumService {
-	return &BlobSumService{
-		store: store,
-	}
-}
-
-func (blobserv *BlobSumService) diffIDNamespace() string {
-	return "blobsum-storage"
-}
-
-func (blobserv *BlobSumService) blobSumNamespace() string {
-	return "blobsum-lookup"
-}
-
-func (blobserv *BlobSumService) diffIDKey(diffID layer.DiffID) string {
-	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
-}
-
-func (blobserv *BlobSumService) blobSumKey(blobsum digest.Digest) string {
-	return string(blobsum.Algorithm()) + "/" + blobsum.Hex()
-}
-
-// GetBlobSums finds the blobsums associated with a layer DiffID.
-func (blobserv *BlobSumService) GetBlobSums(diffID layer.DiffID) ([]digest.Digest, error) {
-	jsonBytes, err := blobserv.store.Get(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID))
-	if err != nil {
-		return nil, err
-	}
-
-	var blobsums []digest.Digest
-	if err := json.Unmarshal(jsonBytes, &blobsums); err != nil {
-		return nil, err
-	}
-
-	return blobsums, nil
-}
-
-// GetDiffID finds a layer DiffID from a blobsum hash.
-func (blobserv *BlobSumService) GetDiffID(blobsum digest.Digest) (layer.DiffID, error) {
-	diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum))
-	if err != nil {
-		return layer.DiffID(""), err
-	}
-
-	return layer.DiffID(diffIDBytes), nil
-}
-
-// Add associates a blobsum with a layer DiffID. If too many blobsums are
-// present, the oldest one is dropped.
-func (blobserv *BlobSumService) Add(diffID layer.DiffID, blobsum digest.Digest) error {
-	oldBlobSums, err := blobserv.GetBlobSums(diffID)
-	if err != nil {
-		oldBlobSums = nil
-	}
-	newBlobSums := make([]digest.Digest, 0, len(oldBlobSums)+1)
-
-	// Copy all other blobsums to new slice
-	for _, oldSum := range oldBlobSums {
-		if oldSum != blobsum {
-			newBlobSums = append(newBlobSums, oldSum)
-		}
-	}
-
-	newBlobSums = append(newBlobSums, blobsum)
-
-	if len(newBlobSums) > maxBlobSums {
-		newBlobSums = newBlobSums[len(newBlobSums)-maxBlobSums:]
-	}
-
-	jsonBytes, err := json.Marshal(newBlobSums)
-	if err != nil {
-		return err
-	}
-
-	err = blobserv.store.Set(blobserv.diffIDNamespace(), blobserv.diffIDKey(diffID), jsonBytes)
-	if err != nil {
-		return err
-	}
-
-	return blobserv.store.Set(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum), []byte(diffID))
-}
diff --git a/distribution/metadata/blobsum_service_test.go b/distribution/metadata/blobsum_service_test.go
deleted file mode 100644
index dee64df..0000000
--- a/distribution/metadata/blobsum_service_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package metadata
-
-import (
-	"io/ioutil"
-	"os"
-	"reflect"
-	"testing"
-
-	"github.com/docker/distribution/digest"
-	"github.com/docker/docker/layer"
-)
-
-func TestBlobSumService(t *testing.T) {
-	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
-	if err != nil {
-		t.Fatalf("could not create temp dir: %v", err)
-	}
-	defer os.RemoveAll(tmpDir)
-
-	metadataStore, err := NewFSMetadataStore(tmpDir)
-	if err != nil {
-		t.Fatalf("could not create metadata store: %v", err)
-	}
-	blobSumService := NewBlobSumService(metadataStore)
-
-	testVectors := []struct {
-		diffID   layer.DiffID
-		blobsums []digest.Digest
-	}{
-		{
-			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-			},
-		},
-		{
-			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
-			},
-		},
-		{
-			diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
-			blobsums: []digest.Digest{
-				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
-				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
-				digest.Digest("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"),
-				digest.Digest("sha256:8902a7ca89aabbb868835260912159026637634090dd8899eee969523252236e"),
-				digest.Digest("sha256:c84364306344ccc48532c52ff5209236273525231dddaaab53262322352883aa"),
-				digest.Digest("sha256:aa7583bbc87532a8352bbb72520a821b3623523523a8352523a52352aaa888fe"),
-			},
-		},
-	}
-
-	// Set some associations
-	for _, vec := range testVectors {
-		for _, blobsum := range vec.blobsums {
-			err := blobSumService.Add(vec.diffID, blobsum)
-			if err != nil {
-				t.Fatalf("error calling Set: %v", err)
-			}
-		}
-	}
-
-	// Check the correct values are read back
-	for _, vec := range testVectors {
-		blobsums, err := blobSumService.GetBlobSums(vec.diffID)
-		if err != nil {
-			t.Fatalf("error calling Get: %v", err)
-		}
-		expectedBlobsums := len(vec.blobsums)
-		if expectedBlobsums > 5 {
-			expectedBlobsums = 5
-		}
-		if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) {
-			t.Fatal("Get returned incorrect layer ID")
-		}
-	}
-
-	// Test GetBlobSums on a nonexistent entry
-	_, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
-	if err == nil {
-		t.Fatal("expected error looking up nonexistent entry")
-	}
-
-	// Test GetDiffID on a nonexistent entry
-	_, err = blobSumService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
-	if err == nil {
-		t.Fatal("expected error looking up nonexistent entry")
-	}
-
-	// Overwrite one of the entries and read it back
-	err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0])
-	if err != nil {
-		t.Fatalf("error calling Add: %v", err)
-	}
-	diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0])
-	if err != nil {
-		t.Fatalf("error calling GetDiffID: %v", err)
-	}
-	if diffID != testVectors[1].diffID {
-		t.Fatal("GetDiffID returned incorrect diffID")
-	}
-}
diff --git a/distribution/metadata/metadata.go b/distribution/metadata/metadata.go
index ab9cc5b..9f744d4 100644
--- a/distribution/metadata/metadata.go
+++ b/distribution/metadata/metadata.go
@@ -15,6 +15,8 @@
 	Get(namespace string, key string) ([]byte, error)
 	// Set writes data indexed by namespace and key.
 	Set(namespace, key string, value []byte) error
+	// Delete removes data indexed by namespace and key.
+	Delete(namespace, key string) error
 }
 
 // FSMetadataStore uses the filesystem to associate metadata with layer and
@@ -63,3 +65,13 @@
 	}
 	return os.Rename(tempFilePath, path)
 }
+
+// Delete removes data indexed by namespace and key. The data file named after
+// the key, stored in the namespace's directory is deleted.
+func (store *FSMetadataStore) Delete(namespace, key string) error {
+	store.Lock()
+	defer store.Unlock()
+
+	path := store.path(namespace, key)
+	return os.Remove(path)
+}
diff --git a/distribution/metadata/v2_metadata_service.go b/distribution/metadata/v2_metadata_service.go
new file mode 100644
index 0000000..239cd1f
--- /dev/null
+++ b/distribution/metadata/v2_metadata_service.go
@@ -0,0 +1,137 @@
+package metadata
+
+import (
+	"encoding/json"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+// V2MetadataService maps layer IDs to a set of known metadata for
+// the layer.
+type V2MetadataService struct {
+	store Store
+}
+
+// V2Metadata contains the digest and source repository information for a layer.
+type V2Metadata struct {
+	Digest           digest.Digest
+	SourceRepository string
+}
+
+// maxMetadata is the number of metadata entries to keep per layer DiffID.
+const maxMetadata = 50
+
+// NewV2MetadataService creates a new diff ID to v2 metadata mapping service.
+func NewV2MetadataService(store Store) *V2MetadataService {
+	return &V2MetadataService{
+		store: store,
+	}
+}
+
+func (serv *V2MetadataService) diffIDNamespace() string {
+	return "v2metadata-by-diffid"
+}
+
+func (serv *V2MetadataService) digestNamespace() string {
+	return "diffid-by-digest"
+}
+
+func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string {
+	return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex()
+}
+
+func (serv *V2MetadataService) digestKey(dgst digest.Digest) string {
+	return string(dgst.Algorithm()) + "/" + dgst.Hex()
+}
+
+// GetMetadata finds the metadata associated with a layer DiffID.
+func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
+	jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
+	if err != nil {
+		return nil, err
+	}
+
+	var metadata []V2Metadata
+	if err := json.Unmarshal(jsonBytes, &metadata); err != nil {
+		return nil, err
+	}
+
+	return metadata, nil
+}
+
+// GetDiffID finds a layer DiffID from a digest.
+func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
+	diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
+	if err != nil {
+		return layer.DiffID(""), err
+	}
+
+	return layer.DiffID(diffIDBytes), nil
+}
+
+// Add associates metadata with a layer DiffID. If too many metadata entries are
+// present, the oldest one is dropped.
+func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
+	oldMetadata, err := serv.GetMetadata(diffID)
+	if err != nil {
+		oldMetadata = nil
+	}
+	newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1)
+
+	// Copy all other metadata to new slice
+	for _, oldMeta := range oldMetadata {
+		if oldMeta != metadata {
+			newMetadata = append(newMetadata, oldMeta)
+		}
+	}
+
+	newMetadata = append(newMetadata, metadata)
+
+	if len(newMetadata) > maxMetadata {
+		newMetadata = newMetadata[len(newMetadata)-maxMetadata:]
+	}
+
+	jsonBytes, err := json.Marshal(newMetadata)
+	if err != nil {
+		return err
+	}
+
+	err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
+	if err != nil {
+		return err
+	}
+
+	return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID))
+}
+
+// Remove unassociates a metadata entry from a layer DiffID.
+func (serv *V2MetadataService) Remove(metadata V2Metadata) error {
+	diffID, err := serv.GetDiffID(metadata.Digest)
+	if err != nil {
+		return err
+	}
+	oldMetadata, err := serv.GetMetadata(diffID)
+	if err != nil {
+		oldMetadata = nil
+	}
+	newMetadata := make([]V2Metadata, 0, len(oldMetadata))
+
+	// Copy all other metadata to new slice
+	for _, oldMeta := range oldMetadata {
+		if oldMeta != metadata {
+			newMetadata = append(newMetadata, oldMeta)
+		}
+	}
+
+	if len(newMetadata) == 0 {
+		return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID))
+	}
+
+	jsonBytes, err := json.Marshal(newMetadata)
+	if err != nil {
+		return err
+	}
+
+	return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
+}
diff --git a/distribution/metadata/v2_metadata_service_test.go b/distribution/metadata/v2_metadata_service_test.go
new file mode 100644
index 0000000..7b0ecb1
--- /dev/null
+++ b/distribution/metadata/v2_metadata_service_test.go
@@ -0,0 +1,115 @@
+package metadata
+
+import (
+	"encoding/hex"
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"reflect"
+	"testing"
+
+	"github.com/docker/distribution/digest"
+	"github.com/docker/docker/layer"
+)
+
+func TestV2MetadataService(t *testing.T) {
+	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
+	if err != nil {
+		t.Fatalf("could not create temp dir: %v", err)
+	}
+	defer os.RemoveAll(tmpDir)
+
+	metadataStore, err := NewFSMetadataStore(tmpDir)
+	if err != nil {
+		t.Fatalf("could not create metadata store: %v", err)
+	}
+	V2MetadataService := NewV2MetadataService(metadataStore)
+
+	tooManyBlobSums := make([]V2Metadata, 100)
+	for i := range tooManyBlobSums {
+		randDigest := randomDigest()
+		tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
+	}
+
+	testVectors := []struct {
+		diffID   layer.DiffID
+		metadata []V2Metadata
+	}{
+		{
+			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
+			metadata: []V2Metadata{
+				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
+			},
+		},
+		{
+			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
+			metadata: []V2Metadata{
+				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
+				{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
+			},
+		},
+		{
+			diffID:   layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
+			metadata: tooManyBlobSums,
+		},
+	}
+
+	// Set some associations
+	for _, vec := range testVectors {
+		for _, blobsum := range vec.metadata {
+			err := V2MetadataService.Add(vec.diffID, blobsum)
+			if err != nil {
+				t.Fatalf("error calling Set: %v", err)
+			}
+		}
+	}
+
+	// Check the correct values are read back
+	for _, vec := range testVectors {
+		metadata, err := V2MetadataService.GetMetadata(vec.diffID)
+		if err != nil {
+			t.Fatalf("error calling Get: %v", err)
+		}
+		expectedMetadataEntries := len(vec.metadata)
+		if expectedMetadataEntries > 50 {
+			expectedMetadataEntries = 50
+		}
+		if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
+			t.Fatal("Get returned incorrect layer ID")
+		}
+	}
+
+	// Test GetMetadata on a nonexistent entry
+	_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
+	if err == nil {
+		t.Fatal("expected error looking up nonexistent entry")
+	}
+
+	// Test GetDiffID on a nonexistent entry
+	_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
+	if err == nil {
+		t.Fatal("expected error looking up nonexistent entry")
+	}
+
+	// Overwrite one of the entries and read it back
+	err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
+	if err != nil {
+		t.Fatalf("error calling Add: %v", err)
+	}
+	diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
+	if err != nil {
+		t.Fatalf("error calling GetDiffID: %v", err)
+	}
+	if diffID != testVectors[1].diffID {
+		t.Fatal("GetDiffID returned incorrect diffID")
+	}
+}
+
+func randomDigest() digest.Digest {
+	b := [32]byte{}
+	for i := 0; i < len(b); i++ {
+		b[i] = byte(rand.Intn(256))
+	}
+	d := hex.EncodeToString(b[:])
+	return digest.Digest("sha256:" + d)
+}
diff --git a/distribution/pull.go b/distribution/pull.go
index db6e29d..5f38a67 100644
--- a/distribution/pull.go
+++ b/distribution/pull.go
@@ -61,10 +61,10 @@
 	switch endpoint.Version {
 	case registry.APIVersion2:
 		return &v2Puller{
-			blobSumService: metadata.NewBlobSumService(imagePullConfig.MetadataStore),
-			endpoint:       endpoint,
-			config:         imagePullConfig,
-			repoInfo:       repoInfo,
+			V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore),
+			endpoint:          endpoint,
+			config:            imagePullConfig,
+			repoInfo:          repoInfo,
 		}, nil
 	case registry.APIVersion1:
 		return &v1Puller{
diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go
index 7277d07..7bb1710 100644
--- a/distribution/pull_v2.go
+++ b/distribution/pull_v2.go
@@ -33,11 +33,11 @@
 var errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
 
 type v2Puller struct {
-	blobSumService *metadata.BlobSumService
-	endpoint       registry.APIEndpoint
-	config         *ImagePullConfig
-	repoInfo       *registry.RepositoryInfo
-	repo           distribution.Repository
+	V2MetadataService *metadata.V2MetadataService
+	endpoint          registry.APIEndpoint
+	config            *ImagePullConfig
+	repoInfo          *registry.RepositoryInfo
+	repo              distribution.Repository
 	// confirmedV2 is set to true if we confirm we're talking to a v2
 	// registry. This is used to limit fallbacks to the v1 protocol.
 	confirmedV2 bool
@@ -110,9 +110,10 @@
 }
 
 type v2LayerDescriptor struct {
-	digest         digest.Digest
-	repo           distribution.Repository
-	blobSumService *metadata.BlobSumService
+	digest            digest.Digest
+	repoInfo          *registry.RepositoryInfo
+	repo              distribution.Repository
+	V2MetadataService *metadata.V2MetadataService
 }
 
 func (ld *v2LayerDescriptor) Key() string {
@@ -124,7 +125,7 @@
 }
 
 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
-	return ld.blobSumService.GetDiffID(ld.digest)
+	return ld.V2MetadataService.GetDiffID(ld.digest)
 }
 
 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
@@ -196,7 +197,7 @@
 
 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
 	// Cache mapping from this layer's DiffID to the blobsum
-	ld.blobSumService.Add(diffID, ld.digest)
+	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()})
 }
 
 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
@@ -333,9 +334,10 @@
 		}
 
 		layerDescriptor := &v2LayerDescriptor{
-			digest:         blobSum,
-			repo:           p.repo,
-			blobSumService: p.blobSumService,
+			digest:            blobSum,
+			repoInfo:          p.repoInfo,
+			repo:              p.repo,
+			V2MetadataService: p.V2MetadataService,
 		}
 
 		descriptors = append(descriptors, layerDescriptor)
@@ -398,9 +400,10 @@
 	// to top-most, so that the downloads slice gets ordered correctly.
 	for _, d := range mfst.References() {
 		layerDescriptor := &v2LayerDescriptor{
-			digest:         d.Digest,
-			repo:           p.repo,
-			blobSumService: p.blobSumService,
+			digest:            d.Digest,
+			repo:              p.repo,
+			repoInfo:          p.repoInfo,
+			V2MetadataService: p.V2MetadataService,
 		}
 
 		descriptors = append(descriptors, layerDescriptor)
diff --git a/distribution/push.go b/distribution/push.go
index 092b07f..445f6bb 100644
--- a/distribution/push.go
+++ b/distribution/push.go
@@ -71,11 +71,11 @@
 	switch endpoint.Version {
 	case registry.APIVersion2:
 		return &v2Pusher{
-			blobSumService: metadata.NewBlobSumService(imagePushConfig.MetadataStore),
-			ref:            ref,
-			endpoint:       endpoint,
-			repoInfo:       repoInfo,
-			config:         imagePushConfig,
+			v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore),
+			ref:               ref,
+			endpoint:          endpoint,
+			repoInfo:          repoInfo,
+			config:            imagePushConfig,
 		}, nil
 	case registry.APIVersion1:
 		return &v1Pusher{
diff --git a/distribution/push_v2.go b/distribution/push_v2.go
index 98fb13e..68c8f69 100644
--- a/distribution/push_v2.go
+++ b/distribution/push_v2.go
@@ -11,6 +11,7 @@
 	"github.com/docker/distribution/digest"
 	"github.com/docker/distribution/manifest/schema1"
 	"github.com/docker/distribution/manifest/schema2"
+	distreference "github.com/docker/distribution/reference"
 	"github.com/docker/distribution/registry/client"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
@@ -34,12 +35,12 @@
 }
 
 type v2Pusher struct {
-	blobSumService *metadata.BlobSumService
-	ref            reference.Named
-	endpoint       registry.APIEndpoint
-	repoInfo       *registry.RepositoryInfo
-	config         *ImagePushConfig
-	repo           distribution.Repository
+	v2MetadataService *metadata.V2MetadataService
+	ref               reference.Named
+	endpoint          registry.APIEndpoint
+	repoInfo          *registry.RepositoryInfo
+	config            *ImagePushConfig
+	repo              distribution.Repository
 
 	// pushState is state built by the Download functions.
 	pushState pushState
@@ -130,9 +131,10 @@
 	var descriptors []xfer.UploadDescriptor
 
 	descriptorTemplate := v2PushDescriptor{
-		blobSumService: p.blobSumService,
-		repo:           p.repo,
-		pushState:      &p.pushState,
+		v2MetadataService: p.v2MetadataService,
+		repoInfo:          p.repoInfo,
+		repo:              p.repo,
+		pushState:         &p.pushState,
 	}
 
 	// Loop bounds condition is to avoid pushing the base layer on Windows.
@@ -209,10 +211,11 @@
 }
 
 type v2PushDescriptor struct {
-	layer          layer.Layer
-	blobSumService *metadata.BlobSumService
-	repo           distribution.Repository
-	pushState      *pushState
+	layer             layer.Layer
+	v2MetadataService *metadata.V2MetadataService
+	repoInfo          reference.Named
+	repo              distribution.Repository
+	pushState         *pushState
 }
 
 func (pd *v2PushDescriptor) Key() string {
@@ -240,10 +243,10 @@
 	}
 	pd.pushState.Unlock()
 
-	// Do we have any blobsums associated with this layer's DiffID?
-	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
+	// Do we have any metadata associated with this layer's DiffID?
+	v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
 	if err == nil {
-		descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
+		descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
 		if err != nil {
 			progress.Update(progressOutput, pd.ID(), "Image push failed")
 			return retryOnError(err)
@@ -263,8 +266,69 @@
 	// then push the blob.
 	bs := pd.repo.Blobs(ctx)
 
+	var mountFrom metadata.V2Metadata
+
+	// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
+	for _, metadata := range v2Metadata {
+		sourceRepo, err := reference.ParseNamed(metadata.SourceRepository)
+		if err != nil {
+			continue
+		}
+		if pd.repoInfo.Hostname() == sourceRepo.Hostname() {
+			logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName())
+			mountFrom = metadata
+			break
+		}
+	}
+
+	var createOpts []distribution.BlobCreateOption
+
+	if mountFrom.SourceRepository != "" {
+		namedRef, err := reference.WithName(mountFrom.SourceRepository)
+		if err != nil {
+			return err
+		}
+
+		// TODO (brianbland): We need to construct a reference where the Name is
+		// only the full remote name, so clean this up when distribution has a
+		// richer reference package
+		remoteRef, err := distreference.WithName(namedRef.RemoteName())
+		if err != nil {
+			return err
+		}
+
+		canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
+		if err != nil {
+			return err
+		}
+
+		createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
+	}
+
 	// Send the layer
-	layerUpload, err := bs.Create(ctx)
+	layerUpload, err := bs.Create(ctx, createOpts...)
+	switch err := err.(type) {
+	case distribution.ErrBlobMounted:
+		progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())
+
+		pd.pushState.Lock()
+		pd.pushState.confirmedV2 = true
+		pd.pushState.remoteLayers[diffID] = err.Descriptor
+		pd.pushState.Unlock()
+
+		// Cache mapping from this layer's DiffID to the blobsum
+		if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
+			return xfer.DoNotRetry{Err: err}
+		}
+
+		return nil
+	}
+	if mountFrom.SourceRepository != "" {
+		// unable to mount layer from this repository, so this source mapping is no longer valid
+		logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
+		pd.v2MetadataService.Remove(mountFrom)
+	}
+
 	if err != nil {
 		return retryOnError(err)
 	}
@@ -300,7 +364,7 @@
 	progress.Update(progressOutput, pd.ID(), "Pushed")
 
 	// Cache mapping from this layer's DiffID to the blobsum
-	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
+	if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
 		return xfer.DoNotRetry{Err: err}
 	}
 
@@ -329,12 +393,16 @@
 	return pd.pushState.remoteLayers[pd.DiffID()]
 }
 
-// blobSumAlreadyExists checks if the registry already know about any of the
-// blobsums passed in the "blobsums" slice. If it finds one that the registry
+// layerAlreadyExists checks if the registry already know about any of the
+// metadata passed in the "metadata" slice. If it finds one that the registry
 // knows about, it returns the known digest and "true".
-func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
-	for _, dgst := range blobsums {
-		descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
+func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
+	for _, meta := range metadata {
+		// Only check blobsums that are known to this repository or have an unknown source
+		if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() {
+			continue
+		}
+		descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest)
 		switch err {
 		case nil:
 			descriptor.MediaType = schema2.MediaTypeLayer
diff --git a/docker/common.go b/docker/common.go
index 2509246..893de71 100644
--- a/docker/common.go
+++ b/docker/common.go
@@ -21,7 +21,6 @@
 )
 
 var (
-	daemonFlags *flag.FlagSet
 	commonFlags = &cli.CommonFlags{FlagSet: new(flag.FlagSet)}
 
 	dockerCertPath  = os.Getenv("DOCKER_CERT_PATH")
@@ -50,7 +49,7 @@
 	cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, defaultCertFile), "Path to TLS certificate file")
 	cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, defaultKeyFile), "Path to TLS key file")
 
-	cmd.Var(opts.NewListOptsRef(&commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to")
+	cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to")
 }
 
 func postParseCommon() {
@@ -67,11 +66,6 @@
 		logrus.SetLevel(logrus.InfoLevel)
 	}
 
-	if commonFlags.Debug {
-		os.Setenv("DEBUG", "1")
-		logrus.SetLevel(logrus.DebugLevel)
-	}
-
 	// Regardless of whether the user sets it to true or false, if they
 	// specify --tlsverify at all then we need to turn on tls
 	// TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need to check that here as well
diff --git a/docker/daemon.go b/docker/daemon.go
index e65cb77..a842212 100644
--- a/docker/daemon.go
+++ b/docker/daemon.go
@@ -30,23 +30,34 @@
 	"github.com/docker/go-connections/tlsconfig"
 )
 
-const daemonUsage = "       docker daemon [ --help | ... ]\n"
+const (
+	daemonUsage          = "       docker daemon [ --help | ... ]\n"
+	daemonConfigFileFlag = "-config-file"
+)
 
 var (
 	daemonCli cli.Handler = NewDaemonCli()
 )
 
+// DaemonCli represents the daemon CLI.
+type DaemonCli struct {
+	*daemon.Config
+	registryOptions *registry.Options
+	flags           *flag.FlagSet
+}
+
 func presentInHelp(usage string) string { return usage }
 func absentFromHelp(string) string      { return "" }
 
 // NewDaemonCli returns a pre-configured daemon CLI
 func NewDaemonCli() *DaemonCli {
-	daemonFlags = cli.Subcmd("daemon", nil, "Enable daemon mode", true)
+	daemonFlags := cli.Subcmd("daemon", nil, "Enable daemon mode", true)
 
 	// TODO(tiborvass): remove InstallFlags?
 	daemonConfig := new(daemon.Config)
 	daemonConfig.LogConfig.Config = make(map[string]string)
 	daemonConfig.ClusterOpts = make(map[string]string)
+
 	daemonConfig.InstallFlags(daemonFlags, presentInHelp)
 	daemonConfig.InstallFlags(flag.CommandLine, absentFromHelp)
 	registryOptions := new(registry.Options)
@@ -57,6 +68,7 @@
 	return &DaemonCli{
 		Config:          daemonConfig,
 		registryOptions: registryOptions,
+		flags:           daemonFlags,
 	}
 }
 
@@ -101,12 +113,6 @@
 	return nil
 }
 
-// DaemonCli represents the daemon CLI.
-type DaemonCli struct {
-	*daemon.Config
-	registryOptions *registry.Options
-}
-
 func getGlobalFlag() (globalFlag *flag.Flag) {
 	defer func() {
 		if x := recover(); x != nil {
@@ -136,15 +142,27 @@
 		os.Exit(1)
 	} else {
 		// allow new form `docker daemon -D`
-		flag.Merge(daemonFlags, commonFlags.FlagSet)
+		flag.Merge(cli.flags, commonFlags.FlagSet)
 	}
 
-	daemonFlags.ParseFlags(args, true)
+	configFile := cli.flags.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file")
+
+	cli.flags.ParseFlags(args, true)
 	commonFlags.PostParse()
 
 	if commonFlags.TrustKey == "" {
 		commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), defaultTrustKeyFile)
 	}
+	cliConfig, err := loadDaemonCliConfig(cli.Config, cli.flags, commonFlags, *configFile)
+	if err != nil {
+		fmt.Fprint(os.Stderr, err)
+		os.Exit(1)
+	}
+	cli.Config = cliConfig
+
+	if cli.Config.Debug {
+		utils.EnableDebug()
+	}
 
 	if utils.ExperimentalBuild() {
 		logrus.Warn("Running experimental build")
@@ -184,12 +202,18 @@
 	serverConfig = setPlatformServerConfig(serverConfig, cli.Config)
 
 	defaultHost := opts.DefaultHost
-	if commonFlags.TLSOptions != nil {
-		if !commonFlags.TLSOptions.InsecureSkipVerify {
-			// server requires and verifies client's certificate
-			commonFlags.TLSOptions.ClientAuth = tls.RequireAndVerifyClientCert
+	if cli.Config.TLS {
+		tlsOptions := tlsconfig.Options{
+			CAFile:   cli.Config.TLSOptions.CAFile,
+			CertFile: cli.Config.TLSOptions.CertFile,
+			KeyFile:  cli.Config.TLSOptions.KeyFile,
 		}
-		tlsConfig, err := tlsconfig.Server(*commonFlags.TLSOptions)
+
+		if cli.Config.TLSVerify {
+			// server requires and verifies client's certificate
+			tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
+		}
+		tlsConfig, err := tlsconfig.Server(tlsOptions)
 		if err != nil {
 			logrus.Fatal(err)
 		}
@@ -197,22 +221,23 @@
 		defaultHost = opts.DefaultTLSHost
 	}
 
-	if len(commonFlags.Hosts) == 0 {
-		commonFlags.Hosts = make([]string, 1)
+	if len(cli.Config.Hosts) == 0 {
+		cli.Config.Hosts = make([]string, 1)
 	}
-	for i := 0; i < len(commonFlags.Hosts); i++ {
+	for i := 0; i < len(cli.Config.Hosts); i++ {
 		var err error
-		if commonFlags.Hosts[i], err = opts.ParseHost(defaultHost, commonFlags.Hosts[i]); err != nil {
-			logrus.Fatalf("error parsing -H %s : %v", commonFlags.Hosts[i], err)
+		if cli.Config.Hosts[i], err = opts.ParseHost(defaultHost, cli.Config.Hosts[i]); err != nil {
+			logrus.Fatalf("error parsing -H %s : %v", cli.Config.Hosts[i], err)
 		}
-	}
-	for _, protoAddr := range commonFlags.Hosts {
+
+		protoAddr := cli.Config.Hosts[i]
 		protoAddrParts := strings.SplitN(protoAddr, "://", 2)
 		if len(protoAddrParts) != 2 {
 			logrus.Fatalf("bad format %s, expected PROTO://ADDR", protoAddr)
 		}
 		serverConfig.Addrs = append(serverConfig.Addrs, apiserver.Addr{Proto: protoAddrParts[0], Addr: protoAddrParts[1]})
 	}
+
 	api, err := apiserver.New(serverConfig)
 	if err != nil {
 		logrus.Fatal(err)
@@ -245,18 +270,21 @@
 
 	api.InitRouters(d)
 
+	reload := func(config *daemon.Config) {
+		if err := d.Reload(config); err != nil {
+			logrus.Errorf("Error reconfiguring the daemon: %v", err)
+			return
+		}
+		api.Reload(config)
+	}
+
+	setupConfigReloadTrap(*configFile, cli.flags, reload)
+
 	// The serve API routine never exits unless an error occurs
 	// We need to start it as a goroutine and wait on it so
 	// daemon doesn't exit
 	serveAPIWait := make(chan error)
-	go func() {
-		if err := api.ServeAPI(); err != nil {
-			logrus.Errorf("ServeAPI error: %v", err)
-			serveAPIWait <- err
-			return
-		}
-		serveAPIWait <- nil
-	}()
+	go api.Wait(serveAPIWait)
 
 	signal.Trap(func() {
 		api.Close()
@@ -303,3 +331,34 @@
 		logrus.Error("Force shutdown daemon")
 	}
 }
+
+func loadDaemonCliConfig(config *daemon.Config, daemonFlags *flag.FlagSet, commonConfig *cli.CommonFlags, configFile string) (*daemon.Config, error) {
+	config.Debug = commonConfig.Debug
+	config.Hosts = commonConfig.Hosts
+	config.LogLevel = commonConfig.LogLevel
+	config.TLS = commonConfig.TLS
+	config.TLSVerify = commonConfig.TLSVerify
+	config.TLSOptions = daemon.CommonTLSOptions{}
+
+	if commonConfig.TLSOptions != nil {
+		config.TLSOptions.CAFile = commonConfig.TLSOptions.CAFile
+		config.TLSOptions.CertFile = commonConfig.TLSOptions.CertFile
+		config.TLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile
+	}
+
+	if configFile != "" {
+		c, err := daemon.MergeDaemonConfigurations(config, daemonFlags, configFile)
+		if err != nil {
+			if daemonFlags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) {
+				return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err)
+			}
+		}
+		// the merged configuration can be nil if the config file didn't exist.
+		// leave the current configuration as it is if when that happens.
+		if c != nil {
+			config = c
+		}
+	}
+
+	return config, nil
+}
diff --git a/docker/daemon_test.go b/docker/daemon_test.go
new file mode 100644
index 0000000..bc519e7
--- /dev/null
+++ b/docker/daemon_test.go
@@ -0,0 +1,91 @@
+// +build daemon
+
+package main
+
+import (
+	"io/ioutil"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/cli"
+	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/mflag"
+	"github.com/docker/go-connections/tlsconfig"
+)
+
+func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) {
+	c := &daemon.Config{}
+	common := &cli.CommonFlags{
+		Debug: true,
+	}
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if loadedConfig == nil {
+		t.Fatalf("expected configuration %v, got nil", c)
+	}
+	if !loadedConfig.Debug {
+		t.Fatalf("expected debug to be copied from the common flags, got false")
+	}
+}
+
+func TestLoadDaemonCliConfigWithTLS(t *testing.T) {
+	c := &daemon.Config{}
+	common := &cli.CommonFlags{
+		TLS: true,
+		TLSOptions: &tlsconfig.Options{
+			CAFile: "/tmp/ca.pem",
+		},
+	}
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if loadedConfig == nil {
+		t.Fatalf("expected configuration %v, got nil", c)
+	}
+	if loadedConfig.TLSOptions.CAFile != "/tmp/ca.pem" {
+		t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.TLSOptions.CAFile, loadedConfig)
+	}
+}
+
+func TestLoadDaemonCliConfigWithConflicts(t *testing.T) {
+	c := &daemon.Config{}
+	common := &cli.CommonFlags{}
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{"labels": ["l3=foo"]}`))
+	f.Close()
+
+	var labels []string
+
+	flags := mflag.NewFlagSet("test", mflag.ContinueOnError)
+	flags.String([]string{daemonConfigFileFlag}, "", "")
+	flags.Var(opts.NewNamedListOptsRef("labels", &labels, opts.ValidateLabel), []string{"-label"}, "")
+
+	flags.Set(daemonConfigFileFlag, configFile)
+	if err := flags.Set("-label", "l1=bar"); err != nil {
+		t.Fatal(err)
+	}
+	if err := flags.Set("-label", "l2=baz"); err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = loadDaemonCliConfig(c, flags, common, configFile)
+	if err == nil {
+		t.Fatalf("expected configuration error, got nil")
+	}
+	if !strings.Contains(err.Error(), "labels") {
+		t.Fatalf("expected labels conflict, got %v", err)
+	}
+}
diff --git a/docker/daemon_unix.go b/docker/daemon_unix.go
index 7754130..eba0bee 100644
--- a/docker/daemon_unix.go
+++ b/docker/daemon_unix.go
@@ -5,15 +5,19 @@
 import (
 	"fmt"
 	"os"
+	"os/signal"
 	"syscall"
 
 	apiserver "github.com/docker/docker/api/server"
 	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/mflag"
 	"github.com/docker/docker/pkg/system"
 
 	_ "github.com/docker/docker/daemon/execdriver/native"
 )
 
+const defaultDaemonConfigFile = "/etc/docker/daemon.json"
+
 func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config {
 	serverConfig.SocketGroup = daemonCfg.SocketGroup
 	serverConfig.EnableCors = daemonCfg.EnableCors
@@ -48,3 +52,14 @@
 func getDaemonConfDir() string {
 	return "/etc/docker"
 }
+
+// setupConfigReloadTrap configures the USR2 signal to reload the configuration.
+func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) {
+	c := make(chan os.Signal, 1)
+	signal.Notify(c, syscall.SIGHUP)
+	go func() {
+		for range c {
+			daemon.ReloadConfiguration(configFile, flags, reload)
+		}
+	}()
+}
diff --git a/docker/daemon_windows.go b/docker/daemon_windows.go
index a930152..307bbcc 100644
--- a/docker/daemon_windows.go
+++ b/docker/daemon_windows.go
@@ -3,12 +3,19 @@
 package main
 
 import (
+	"fmt"
 	"os"
+	"syscall"
 
+	"github.com/Sirupsen/logrus"
 	apiserver "github.com/docker/docker/api/server"
 	"github.com/docker/docker/daemon"
+	"github.com/docker/docker/pkg/mflag"
+	"github.com/docker/docker/pkg/system"
 )
 
+var defaultDaemonConfigFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker" + string(os.PathSeparator) + "config" + string(os.PathSeparator) + "daemon.json"
+
 func setPlatformServerConfig(serverConfig *apiserver.Config, daemonCfg *daemon.Config) *apiserver.Config {
 	return serverConfig
 }
@@ -31,3 +38,20 @@
 // notifySystem sends a message to the host when the server is ready to be used
 func notifySystem() {
 }
+
+// setupConfigReloadTrap configures a Win32 event to reload the configuration.
+func setupConfigReloadTrap(configFile string, flags *mflag.FlagSet, reload func(*daemon.Config)) {
+	go func() {
+		sa := syscall.SecurityAttributes{
+			Length: 0,
+		}
+		ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid())
+		if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 {
+			logrus.Debugf("Config reload - waiting signal at %s", ev)
+			for {
+				syscall.WaitForSingleObject(h, syscall.INFINITE)
+				daemon.ReloadConfiguration(configFile, flags, reload)
+			}
+		}
+	}()
+}
diff --git a/docs/installation/binaries.md b/docs/installation/binaries.md
index 2f1d55d..b5f56d0 100644
--- a/docs/installation/binaries.md
+++ b/docs/installation/binaries.md
@@ -186,7 +186,7 @@
 
 > **Warning**: 
 > The *docker* group (or the group specified with `-G`) is root-equivalent;
-> see [*Docker Daemon Attack Surface*](../articles/security.md#docker-daemon-attack-surface) details.
+> see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
 
 ## Upgrades
 
diff --git a/docs/installation/centos.md b/docs/installation/centos.md
index e447e21..84a9b79 100644
--- a/docs/installation/centos.md
+++ b/docs/installation/centos.md
@@ -134,7 +134,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/debian.md b/docs/installation/debian.md
index 154650f..2621507 100644
--- a/docs/installation/debian.md
+++ b/docs/installation/debian.md
@@ -17,8 +17,8 @@
  - [*Debian 8.0 Jessie (64-bit)*](#debian-jessie-80-64-bit)
  - [*Debian 7.7 Wheezy (64-bit)*](#debian-wheezy-stable-7-x-64-bit)
 
- >**Note**: If you previously installed Docker using `apt`, make sure you update
- your `apt` sources to the new `apt` repository.
+ >**Note**: If you previously installed Docker using `APT`, make sure you update
+ your `APT` sources to the new `APT` repository.
 
 ## Prerequisites
 
@@ -37,7 +37,7 @@
 
 ### Update your apt repository
 
-Docker's `apt` repository contains Docker 1.7.1 and higher. To set `apt` to use
+Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use
 from the new repository:
 
  1. If you haven't already done so, log into your machine as a user with `sudo` or `root` privileges.
@@ -49,17 +49,22 @@
          $ apt-get purge lxc-docker*
          $ apt-get purge docker.io*
 
- 4. Add the new `gpg` key.
+ 4. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed.
+
+         $ apt-get update
+         $ apt-get install apt-transport-https ca-certificates
+
+ 5. Add the new `GPG` key.
 
          $ apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
 
- 5. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
+ 6. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
 
      If the file doesn't exist, create it.
 
- 6. Remove any existing entries.
+ 7. Remove any existing entries.
 
- 7. Add an entry for your Debian operating system.
+ 8. Add an entry for your Debian operating system.
 
      The possible entries are:
 
@@ -80,23 +85,23 @@
     > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources)
     > for details.
 
- 8. Save and close the file.
+ 9. Save and close the file.
 
- 9. Update the `apt` package index.
+ 10. Update the `APT` package index.
 
          $ apt-get update
 
- 10. Verify that `apt` is pulling from the right repository.
+ 11. Verify that `APT` is pulling from the right repository.
 
          $ apt-cache policy docker-engine
 
-     From now on when you run `apt-get upgrade`, `apt` pulls from the new apt repository.  
+     From now on when you run `apt-get upgrade`, `APT` pulls from the new apt repository.
 
 ## Install Docker
 
-Before installing Docker, make sure you have set your `apt` repository correctly as described in the prerequisites.
+Before installing Docker, make sure you have set your `APT` repository correctly as described in the prerequisites.
 
-1. Update the `apt` package index.
+1. Update the `APT` package index.
 
         $ sudo apt-get update
 
@@ -133,7 +138,7 @@
 
 > **Warning**:
 > The `docker` group (or the group specified with the `-G` flag) is
-> `root`-equivalent; see [*Docker Daemon Attack Surface*](../articles/security.md#docker-daemon-attack-surface) details.
+> `root`-equivalent; see [*Docker Daemon Attack Surface*](../security/security.md#docker-daemon-attack-surface) details.
 
 **Example:**
 
diff --git a/docs/installation/fedora.md b/docs/installation/fedora.md
index 3e9dd5d..b45a5de 100644
--- a/docs/installation/fedora.md
+++ b/docs/installation/fedora.md
@@ -128,7 +128,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/oracle.md b/docs/installation/oracle.md
index e189558..56c96aa 100644
--- a/docs/installation/oracle.md
+++ b/docs/installation/oracle.md
@@ -99,7 +99,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/rhel.md b/docs/installation/rhel.md
index 6c20f27..b550a37 100644
--- a/docs/installation/rhel.md
+++ b/docs/installation/rhel.md
@@ -126,7 +126,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/installation/ubuntulinux.md b/docs/installation/ubuntulinux.md
index 78731da..b888e33 100644
--- a/docs/installation/ubuntulinux.md
+++ b/docs/installation/ubuntulinux.md
@@ -22,7 +22,7 @@
 of Docker. If you wish to install using Ubuntu-managed packages, consult your
 Ubuntu documentation.
 
->**Note**: Ubuntu Utopic 14.10 and 15.04 exist in Docker's `apt` repository but
+>**Note**: Ubuntu Utopic 14.10 and 15.04 exist in Docker's `APT` repository but
 > are no longer officially supported.
 
 ## Prerequisites
@@ -41,29 +41,34 @@
     $ uname -r
     3.11.0-15-generic
 
->**Note**: If you previously installed Docker using `apt`, make sure you update
-your `apt` sources to the new Docker repository.
+>**Note**: If you previously installed Docker using `APT`, make sure you update
+your `APT` sources to the new Docker repository.
 
 ### Update your apt sources
 
-Docker's `apt` repository contains Docker 1.7.1 and higher. To set `apt` to use
+Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use
 packages from the new repository:
 
 1. If you haven't already done so, log into your Ubuntu instance as a privileged user.
 
 2. Open a terminal window.
 
-3. Add the new `gpg` key.
+3. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed.
+
+         $ apt-get update
+         $ apt-get install apt-transport-https ca-certificates
+
+4. Add the new `GPG` key.
 
         $ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
 
-4. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
+5. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor.
 
     If the file doesn't exist, create it.
 
-5. Remove any existing entries.
+6. Remove any existing entries.
 
-6. Add an entry for your Ubuntu operating system.
+7. Add an entry for your Ubuntu operating system.
 
     The possible entries are:
 
@@ -84,21 +89,21 @@
     > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources)
     > for details.
 
-7. Save and close the `/etc/apt/sources.list.d/docker.list` file.
+8. Save and close the `/etc/apt/sources.list.d/docker.list` file.
 
-8. Update the `apt` package index.
+9. Update the `APT` package index.
 
         $ apt-get update
 
-9. Purge the old repo if it exists.
+10. Purge the old repo if it exists.
 
         $ apt-get purge lxc-docker
 
-10. Verify that `apt` is pulling from the right repository.
+11. Verify that `APT` is pulling from the right repository.
 
         $ apt-cache policy docker-engine
 
-    From now on when you run `apt-get upgrade`, `apt` pulls from the new repository.  
+    From now on when you run `apt-get upgrade`, `APT` pulls from the new repository.
 
 ### Prerequisites by Ubuntu Version
 
@@ -183,7 +188,7 @@
 
 1. Log into your Ubuntu installation as a user with `sudo` privileges.
 
-2. Update your `apt` package index.
+2. Update your `APT` package index.
 
         $ sudo apt-get update
 
@@ -225,7 +230,7 @@
 
 >**Warning**: The `docker` group is equivalent to the `root` user; For details
 >on how this impacts security in your system, see [*Docker Daemon Attack
->Surface*](../articles/security.md#docker-daemon-attack-surface) for details.
+>Surface*](../security/security.md#docker-daemon-attack-surface) for details.
 
 To create the `docker` group and add your user:
 
diff --git a/docs/reference/api/docker_remote_api.md b/docs/reference/api/docker_remote_api.md
index c0ac75f..a7d8fdb 100644
--- a/docs/reference/api/docker_remote_api.md
+++ b/docs/reference/api/docker_remote_api.md
@@ -117,6 +117,11 @@
 * `POST /networks/create` now supports restricting external access to the network by setting the `internal` field.
 * `POST /networks/(id)/disconnect` now includes a `Force` option to forcefully disconnect a container from network
 * `GET /containers/(id)/json` now returns the `NetworkID` of containers.
+* `POST /networks/create` Now supports an options field in the IPAM config that provides options 
+  for custom IPAM plugins.
+* `GET /networks/{network-id}` Now returns IPAM config options for custom IPAM plugins if any
+  are available.
+* `GET /networks/<network-id>` now returns subnets info for user-defined networks.
 
 ### v1.21 API changes
 
diff --git a/docs/reference/api/docker_remote_api_v1.22.md b/docs/reference/api/docker_remote_api_v1.22.md
index 100659f..d523b95 100644
--- a/docs/reference/api/docker_remote_api_v1.22.md
+++ b/docs/reference/api/docker_remote_api_v1.22.md
@@ -2937,7 +2937,7 @@
 
 **Example request**:
 
-    GET /networks/f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566 HTTP/1.1
+    GET /networks/7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99 HTTP/1.1
 
 **Example response**:
 
@@ -2946,24 +2946,28 @@
 Content-Type: application/json
 
 {
-  "Name": "bridge",
-  "Id": "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566",
+  "Name": "net01",
+  "Id": "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99",
   "Scope": "local",
   "Driver": "bridge",
   "IPAM": {
     "Driver": "default",
     "Config": [
       {
-        "Subnet": "172.17.0.0/16"
+        "Subnet": "172.19.0.0/16",
+        "Gateway": "172.19.0.1/16"
       }
-    ]
+    ],
+    "Options": {
+        "foo": "bar"
+    }
   },
   "Containers": {
-    "39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867": {
-      "Name": "mad_mclean",
-      "EndpointID": "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda",
-      "MacAddress": "02:42:ac:11:00:02",
-      "IPv4Address": "172.17.0.2/16",
+    "19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c": {
+      "Name": "test",
+      "EndpointID": "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a",
+      "MacAddress": "02:42:ac:13:00:02",
+      "IPv4Address": "172.19.0.2/16",
       "IPv6Address": ""
     }
   },
@@ -3003,7 +3007,10 @@
       "Subnet":"172.20.0.0/16",
       "IPRange":"172.20.10.0/24",
       "Gateway":"172.20.10.11"
-    }]
+    }],
+    "Options": {
+        "foo": "bar"
+    }
   },
   "Internal":true
 }
diff --git a/docs/reference/commandline/cli.md b/docs/reference/commandline/cli.md
index e3773f7..26d2469 100644
--- a/docs/reference/commandline/cli.md
+++ b/docs/reference/commandline/cli.md
@@ -107,8 +107,8 @@
 Once attached to a container, users detach from it and leave it running using
 the using `CTRL-p CTRL-q` key sequence. This detach key sequence is customizable
 using the `detachKeys` property. Specify a `<sequence>` value for the
-property. The format of the `<sequence>` is either a letter [a-Z], or the `ctrl-`
-combined with any of the following:
+property. The format of the `<sequence>` is a comma-separated list of either 
+a letter [a-Z], or the `ctrl-` combined with any of the following:
 
 * `a-z` (a single lowercase alpha character )
 * `@` (ampersand)
diff --git a/docs/reference/commandline/daemon.md b/docs/reference/commandline/daemon.md
index a6f3992..856d913 100644
--- a/docs/reference/commandline/daemon.md
+++ b/docs/reference/commandline/daemon.md
@@ -27,6 +27,7 @@
       --cluster-store=""                     URL of the distributed storage backend
       --cluster-advertise=""                 Address of the daemon instance on the cluster
       --cluster-store-opt=map[]              Set cluster options
+      --config-file=/etc/docker/daemon.json  Daemon configuration file
       --dns=[]                               DNS server to use
       --dns-opt=[]                           DNS options to use
       --dns-search=[]                        DNS search domains to use
@@ -788,7 +789,7 @@
     /usr/local/bin/docker daemon -D -g /var/lib/docker -H unix:// > /var/lib/docker-machine/docker.log 2>&1
 
 
-# Default cgroup parent
+## Default cgroup parent
 
 The `--cgroup-parent` option allows you to set the default cgroup parent
 to use for containers. If this option is not set, it defaults to `/docker` for
@@ -806,3 +807,79 @@
 This setting can also be set per container, using the `--cgroup-parent`
 option on `docker create` and `docker run`, and takes precedence over
 the `--cgroup-parent` option on the daemon.
+
+## Daemon configuration file
+
+The `--config-file` option allows you to set any configuration option
+for the daemon in a JSON format. This file uses the same flag names as keys,
+except for flags that allow several entries, where it uses the plural
+of the flag name, e.g., `labels` for the `label` flag. By default,
+docker tries to load a configuration file from `/etc/docker/daemon.json`
+on Linux and `%programdata%\docker\config\daemon.json` on Windows.
+
+The options set in the configuration file must not conflict with options set
+via flags. The docker daemon fails to start if an option is duplicated between
+the file and the flags, regardless their value. We do this to avoid
+silently ignore changes introduced in configuration reloads.
+For example, the daemon fails to start if you set daemon labels
+in the configuration file and also set daemon labels via the `--label` flag.
+
+Options that are not present in the file are ignored when the daemon starts.
+This is a full example of the allowed configuration options in the file:
+
+```json
+{
+	"authorization-plugins": [],
+	"dns": [],
+	"dns-opts": [],
+	"dns-search": [],
+	"exec-opts": [],
+	"exec-root": "",
+	"storage-driver": "",
+	"storage-opts": "",
+	"labels": [],
+	"log-config": {
+		"log-driver": "",
+		"log-opts": []
+	},
+	"mtu": 0,
+	"pidfile": "",
+	"graph": "",
+	"cluster-store": "",
+	"cluster-store-opts": [],
+	"cluster-advertise": "",
+	"debug": true,
+	"hosts": [],
+	"log-level": "",
+	"tls": true,
+	"tls-verify": true,
+	"tls-opts": {
+		"tlscacert": "",
+		"tlscert": "",
+		"tlskey": ""
+	},
+	"api-cors-headers": "",
+	"selinux-enabled": false,
+	"userns-remap": "",
+	"group": "",
+	"cgroup-parent": "",
+	"default-ulimits": {}
+}
+```
+
+### Configuration reloading
+
+Some options can be reconfigured when the daemon is running without requiring
+to restart the process. We use the `SIGHUP` signal in Linux to reload, and a global event
+in Windows with the key `Global\docker-daemon-config-$PID`. The options can
+be modified in the configuration file but still will check for conflicts with
+the provided flags. The daemon fails to reconfigure itself
+if there are conflicts, but it won't stop execution.
+
+The list of currently supported options that can be reconfigured is this:
+
+- `debug`: it changes the daemon to debug mode when set to true.
+- `label`: it replaces the daemon labels with a new set of labels.
+- `cluster-store`: it reloads the discovery store with the new address.
+- `cluster-store-opts`: it uses the new options to reload the discovery store.
+- `cluster-advertise`: it modifies the address advertised after reloading.
diff --git a/docs/reference/commandline/login.md b/docs/reference/commandline/login.md
index b79c18b..faf3615 100644
--- a/docs/reference/commandline/login.md
+++ b/docs/reference/commandline/login.md
@@ -30,7 +30,7 @@
 `docker login` requires user to use `sudo` or be `root`, except when: 
 
 1.  connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`.
-2.  user is added to the `docker` group.  This will impact the security of your system; the `docker` group is `root` equivalent.  See [Docker Daemon Attack Surface](https://docs.docker.com/articles/security/#docker-daemon-attack-surface) for details. 
+2.  user is added to the `docker` group.  This will impact the security of your system; the `docker` group is `root` equivalent.  See [Docker Daemon Attack Surface](https://docs.docker.com/security/security/#docker-daemon-attack-surface) for details. 
 
 You can log into any public or private repository for which you have
 credentials.  When you log in, the command stores encoded credentials in
diff --git a/docs/reference/commandline/network_create.md b/docs/reference/commandline/network_create.md
index 3a9705b..a1bfdf5 100644
--- a/docs/reference/commandline/network_create.md
+++ b/docs/reference/commandline/network_create.md
@@ -22,6 +22,7 @@
     --ip-range=[]            Allocate container ip from a sub-range
     --ipam-driver=default    IP Address Management Driver
     -o --opt=map[]           Set custom network plugin options
+    --ipam-opt=map[]         Set custom IPAM plugin options
     --subnet=[]              Subnet in CIDR format that represents a network segment
 
 Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the
diff --git a/docs/reference/commandline/network_inspect.md b/docs/reference/commandline/network_inspect.md
index 00b886d..9e0d87f 100644
--- a/docs/reference/commandline/network_inspect.md
+++ b/docs/reference/commandline/network_inspect.md
@@ -17,7 +17,7 @@
       -f, --format=       Format the output using the given go template.
       --help             Print usage
 
-Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to a network:
+Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network:
 
 ```bash
 $ sudo docker run -itd --name=container1 busybox
@@ -78,6 +78,32 @@
 ]
 ```
 
+Returns the information about the user-defined network:
+
+```bash
+$ docker network create simple-network
+69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
+$ docker network inspect simple-network
+[
+    {
+        "Name": "simple-network",
+        "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
+        "Scope": "local",
+        "Driver": "bridge",
+        "IPAM": {
+            "Driver": "default",
+            "Config": [
+                {
+                    "Subnet": "172.22.0.0/16",
+                    "Gateway": "172.22.0.1/16"
+                }
+            ]
+        },
+        "Containers": {},
+        "Options": {}
+    }
+]
+```
 
 ## Related information
 
diff --git a/docs/security/apparmor.md b/docs/security/apparmor.md
index 07cd62c..c33240d 100644
--- a/docs/security/apparmor.md
+++ b/docs/security/apparmor.md
@@ -1,47 +1,74 @@
 <!-- [metadata]>
 +++
-draft = true
+title = "AppArmor security profiles for Docker"
+description = "Enabling AppArmor in Docker"
+keywords = ["AppArmor, security, docker, documentation"]
+[menu.main]
+parent= "smn_secure_docker"
 +++
 <![end-metadata]-->
 
-AppArmor security profiles for Docker
---------------------------------------
+# AppArmor security profiles for Docker
 
-AppArmor (Application Armor) is a security module that allows a system
-administrator to associate a security profile with each program. Docker
+AppArmor (Application Armor) is a Linux security module that protects an
+operating system and its applications from security threats. To use it, a system
+administrator associates an AppArmor security profile with each program. Docker
 expects to find an AppArmor policy loaded and enforced.
 
-Container profiles are loaded automatically by Docker. A profile
-for the Docker Engine itself also exists and is installed
-with the official *.deb* packages. Advanced users and package
-managers may find the profile for */usr/bin/docker* underneath
-[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
-in the Docker Engine source repository.
+Docker automatically loads container profiles. A profile for the Docker Engine
+itself also exists and is installed with the official *.deb* packages in
+`/etc/apparmor.d/docker` file.
 
 
-Understand the policies
-------------------------
+## Understand the policies
 
-The `docker-default` profile the default for running
-containers. It is moderately protective while
-providing wide application compatibility.
-
-The system's standard `unconfined` profile inherits all
-system-wide policies, applying path-based policies
-intended for the host system inside of containers.
-This was the default for privileged containers
-prior to Docker 1.8.
-
-
-Overriding the profile for a container
----------------------------------------
-
-Users may override the AppArmor profile using the
-`security-opt` option (per-container).
-
-For example, the following explicitly specifies the default policy:
+The `docker-default` profile is the default for running containers. It is
+moderately protective while providing wide application compatibility. The
+profile is the following:
 
 ```
+#include <tunables/global>
+
+
+profile docker-default flags=(attach_disconnected,mediate_deleted) {
+
+  #include <abstractions/base>
+
+
+  network,
+  capability,
+  file,
+  umount,
+
+  deny @{PROC}/{*,**^[0-9*],sys/kernel/shm*} wkx,
+  deny @{PROC}/sysrq-trigger rwklx,
+  deny @{PROC}/mem rwklx,
+  deny @{PROC}/kmem rwklx,
+  deny @{PROC}/kcore rwklx,
+
+  deny mount,
+
+  deny /sys/[^f]*/** wklx,
+  deny /sys/f[^s]*/** wklx,
+  deny /sys/fs/[^c]*/** wklx,
+  deny /sys/fs/c[^g]*/** wklx,
+  deny /sys/fs/cg[^r]*/** wklx,
+  deny /sys/firmware/efi/efivars/** rwklx,
+  deny /sys/kernel/security/** rwklx,
+}
+```
+
+When you run a container, it uses the `docker-default` policy unless you
+override it with the `security-opt` option. For example, the following
+explicitly specifies the default policy:
+
+```bash
 $ docker run --rm -it --security-opt apparmor:docker-default hello-world
 ```
 
+## Contributing to AppArmor code in Docker
+
+Advanced users and package managers can find a profile for `/usr/bin/docker`
+underneath
+[contrib/apparmor](https://github.com/docker/docker/tree/master/contrib/apparmor)
+in the Docker Engine source repository.
diff --git a/docs/security/index.md b/docs/security/index.md
new file mode 100644
index 0000000..6948b09
--- /dev/null
+++ b/docs/security/index.md
@@ -0,0 +1,20 @@
+<!-- [metadata]>
++++
+title = "Work with Docker security"
+description = "Sec"
+keywords = ["seccomp, security, docker, documentation"]
+[menu.main]
+identifier="smn_secure_docker"
+parent= "mn_use_docker"
++++
+<![end-metadata]-->
+
+# Work with Docker security
+
+This section discusses the security features you can configure and use within your Docker Engine installation.
+
+* You can configure Docker's trust features so that your users can push and pull trusted images. To learn how to do this, see [Use trusted images](trust/index.md) in this section.
+
+* You can configure secure computing mode (Seccomp) policies to secure system calls in a container. For more information, see [Seccomp security profiles for Docker](seccomp.md).
+
+* An AppArmor profile for Docker is installed with the official *.deb* packages. For information about this profile and overriding it, see [AppArmor security profiles for Docker](apparmor.md).
diff --git a/docs/security/seccomp.md b/docs/security/seccomp.md
index c8b7bde..b683be0 100644
--- a/docs/security/seccomp.md
+++ b/docs/security/seccomp.md
@@ -3,27 +3,26 @@
 title = "Seccomp security profiles for Docker"
 description = "Enabling seccomp in Docker"
 keywords = ["seccomp, security, docker, documentation"]
+[menu.main]
+parent= "smn_secure_docker"
 +++
 <![end-metadata]-->
 
-Seccomp security profiles for Docker
-------------------------------------
+# Seccomp security profiles for Docker
 
-The seccomp() system call operates on the Secure Computing (seccomp)
-state of the calling process.
+Secure computing mode (Seccomp) is a Linux kernel feature. You can use it to
+restrict the actions available within the container. The `seccomp()` system
+call operates on the seccomp state of the calling process. You can use this
+feature to restrict your application's access.
 
-This operation is available only if the kernel is configured
-with `CONFIG_SECCOMP` enabled.
+This feature is available only if the kernel is configured with `CONFIG_SECCOMP`
+enabled.
 
-This allows for allowing or denying of certain syscalls in a container.
+## Passing a profile for a container
 
-Passing a profile for a container
----------------------------------
-
-Users may pass a seccomp profile using the `security-opt` option
-(per-container).
-
-The profile has layout in the following form:
+The default seccomp profile provides a sane default for running containers with
+seccomp. It is moderately protective while providing wide application
+compatibility. The default Docker profile has layout in the following form:
 
 ```
 {
@@ -57,30 +56,14 @@
 }
 ```
 
-Then you can run with:
+When you run a container, it uses the default profile unless you override
+it with the `security-opt` option. For example, the following explicitly
+specifies the default policy:
 
 ```
 $ docker run --rm -it --security-opt seccomp:/path/to/seccomp/profile.json hello-world
 ```
 
-Default Profile
----------------
-
-The default seccomp profile provides a sane default for running
-containers with seccomp. It is moderately protective while
-providing wide application compatibility.
-
-
-### Overriding the default profile for a container
-
-You can pass `unconfined` to run a container without the default seccomp
-profile.
-
-```
-$ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \
-    unshare --map-root-user --user sh -c whoami
-```
-
 ### Syscalls blocked by the default profile
 
 Docker's default seccomp profile is a whitelist which specifies the calls that
@@ -91,55 +74,65 @@
 | Syscall             | Description                                                                                                                           |
 |---------------------|---------------------------------------------------------------------------------------------------------------------------------------|
 | `acct`              | Accounting syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_PACCT`. |
-| `add_key`           | Prevent containers from using the kernel keyring, which is not namespaced.                                                            |
-| `adjtimex`          | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced.                                                           |
-| `bpf`               | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`.                                       |
-| `clock_adjtime`     | Time/date is not namespaced.                                                                                                          |
-| `clock_settime`     | Time/date is not namespaced.                                                                                                          |
-| `clone`             | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`.                                  |
-| `create_module`     | Deny manipulation and functions on kernel modules.                                                                                    |
-| `delete_module`     | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                                                    |
-| `finit_module`      | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                                                    |
-| `get_kernel_syms`   | Deny retrieval of exported kernel and module symbols.                                                                                 |
-| `get_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                                               |
-| `init_module`       | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                                                    |
-| `ioperm`            | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.                                      |
-| `iopl`              | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.                                      |
-| `kcmp`              | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                                                   |
-| `kexec_file_load`   | Sister syscall of `kexec_load` that does the same thing, slightly different arguments.                                                |
-| `kexec_load`        | Deny loading a new kernel for later execution.                                                                                        |
-| `keyctl`            | Prevent containers from using the kernel keyring, which is not namespaced.                                                            |
-| `lookup_dcookie`    | Tracing/profiling syscall, which could leak a lot of information on the host.                                                         |
-| `mbind`             | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                                               |
-| `modify_ldt`        | Old syscall only used in 16-bit code and a potential information leak.                                                                |
-| `mount`             | Deny mounting, already gated by `CAP_SYS_ADMIN`.                                                                                      |
-| `move_pages`        | Syscall that modifies kernel memory and NUMA settings.                                                                                |
-| `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`.                                                               |
-| `nfsservctl`        | Deny interaction with the kernel nfs daemon.                                                                                          |
-| `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`.                                                              |
-| `perf_event_open`   | Tracing/profiling syscall, which could leak a lot of information on the host.                                                         |
-| `personality`       | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns.      |
-| `pivot_root`        | Deny `pivot_root`, should be privileged operation.                                                                                    |
-| `process_vm_readv`  | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                                                   |
-| `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                                                   |
-| `ptrace`            | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`.               |
-| `query_module`      | Deny manipulation and functions on kernel modules.                                                                                    |
-| `quotactl`          | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`.      |
-| `reboot`            | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`.                                                                   |
+| `add_key`           | Prevent containers from using the kernel keyring, which is not namespaced.                                   |
+| `adjtimex`          | Similar to `clock_settime` and `settimeofday`, time/date is not namespaced.                                  |
+| `bpf`               | Deny loading potentially persistent bpf programs into kernel, already gated by `CAP_SYS_ADMIN`.              |
+| `clock_adjtime`     | Time/date is not namespaced.                                                                                 |
+| `clock_settime`     | Time/date is not namespaced.                                                                                 |
+| `clone`             | Deny cloning new namespaces. Also gated by `CAP_SYS_ADMIN` for CLONE_* flags, except `CLONE_USERNS`.         |
+| `create_module`     | Deny manipulation and functions on kernel modules.                                                           |
+| `delete_module`     | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                           |
+| `finit_module`      | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                           |
+| `get_kernel_syms`   | Deny retrieval of exported kernel and module symbols.                                                        |
+| `get_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                      |
+| `init_module`       | Deny manipulation and functions on kernel modules. Also gated by `CAP_SYS_MODULE`.                           |
+| `ioperm`            | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.             |
+| `iopl`              | Prevent containers from modifying kernel I/O privilege levels. Already gated by `CAP_SYS_RAWIO`.             |
+| `kcmp`              | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                          |
+| `kexec_file_load`   | Sister syscall of `kexec_load` that does the same thing, slightly different arguments.                       |
+| `kexec_load`        | Deny loading a new kernel for later execution.                                                               |
+| `keyctl`            | Prevent containers from using the kernel keyring, which is not namespaced.                                   |
+| `lookup_dcookie`    | Tracing/profiling syscall, which could leak a lot of information on the host.                                |
+| `mbind`             | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                      |
+| `modify_ldt`        | Old syscall only used in 16-bit code and a potential information leak.                                       |
+| `mount`             | Deny mounting, already gated by `CAP_SYS_ADMIN`.                                                             |
+| `move_pages`        | Syscall that modifies kernel memory and NUMA settings.                                                       |
+| `name_to_handle_at` | Sister syscall to `open_by_handle_at`. Already gated by `CAP_SYS_NICE`.                                      |
+| `nfsservctl`        | Deny interaction with the kernel nfs daemon.                                                                 |
+| `open_by_handle_at` | Cause of an old container breakout. Also gated by `CAP_DAC_READ_SEARCH`.                                     |
+| `perf_event_open`   | Tracing/profiling syscall, which could leak a lot of information on the host.                                |
+| `personality`       | Prevent container from enabling BSD emulation. Not inherently dangerous, but poorly tested, potential for a lot of kernel vulns. |
+| `pivot_root`        | Deny `pivot_root`, should be privileged operation.                                                           |
+| `process_vm_readv`  | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                          |
+| `process_vm_writev` | Restrict process inspection capabilities, already blocked by dropping `CAP_PTRACE`.                          |
+| `ptrace`            | Tracing/profiling syscall, which could leak a lot of information on the host. Already blocked by dropping `CAP_PTRACE`. |
+| `query_module`      | Deny manipulation and functions on kernel modules.                                                            |
+| `quotactl`          | Quota syscall which could let containers disable their own resource limits or process accounting. Also gated by `CAP_SYS_ADMIN`. |
+| `reboot`            | Don't let containers reboot the host. Also gated by `CAP_SYS_BOOT`.                                           |
 | `restart_syscall`   | Don't allow containers to restart a syscall. Possible seccomp bypass see: https://code.google.com/p/chromium/issues/detail?id=408827. |
-| `request_key`       | Prevent containers from using the kernel keyring, which is not namespaced.                                                            |
-| `set_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                                               |
-| `setns`             | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`.                                                            |
-| `settimeofday`      | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                                            |
-| `stime`             | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                                            |
-| `swapon`            | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                                               |
-| `swapoff`           | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                                               |
-| `sysfs`             | Obsolete syscall.                                                                                                                     |
-| `_sysctl`           | Obsolete, replaced by /proc/sys.                                                                                                      |
-| `umount`            | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`.                                                                      |
-| `umount2`           | Should be a privileged operation.                                                                                                     |
-| `unshare`           | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`.                     |
-| `uselib`            | Older syscall related to shared libraries, unused for a long time.                                                                    |
-| `ustat`             | Obsolete syscall.                                                                                                                     |
-| `vm86`              | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                                               |
-| `vm86old`           | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                                               |
+| `request_key`       | Prevent containers from using the kernel keyring, which is not namespaced.                                    |
+| `set_mempolicy`     | Syscall that modifies kernel memory and NUMA settings. Already gated by `CAP_SYS_NICE`.                       |
+| `setns`             | Deny associating a thread with a namespace. Also gated by `CAP_SYS_ADMIN`.                                    |
+| `settimeofday`      | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                    |
+| `stime`             | Time/date is not namespaced. Also gated by `CAP_SYS_TIME`.                                                    |
+| `swapon`            | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                       |
+| `swapoff`           | Deny start/stop swapping to file/device. Also gated by `CAP_SYS_ADMIN`.                                       |
+| `sysfs`             | Obsolete syscall.                                                                                             |
+| `_sysctl`           | Obsolete, replaced by /proc/sys.                                                                              |
+| `umount`            | Should be a privileged operation. Also gated by `CAP_SYS_ADMIN`.                                              |
+| `umount2`           | Should be a privileged operation.                                                                             |
+| `unshare`           | Deny cloning new namespaces for processes. Also gated by `CAP_SYS_ADMIN`, with the exception of `unshare --user`. |
+| `uselib`            | Older syscall related to shared libraries, unused for a long time.                                            |
+| `ustat`             | Obsolete syscall.                                                                                             |
+| `vm86`              | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                       |
+| `vm86old`           | In kernel x86 real mode virtual machine. Also gated by `CAP_SYS_ADMIN`.                                       |
+
+## Run without the default seccomp profile
+
+You can pass `unconfined` to run a container without the default seccomp
+profile.
+
+```
+$ docker run --rm -it --security-opt seccomp:unconfined debian:jessie \
+    unshare --map-root-user --user sh -c whoami
+```
diff --git a/docs/articles/security.md b/docs/security/security.md
similarity index 97%
rename from docs/articles/security.md
rename to docs/security/security.md
index 92f02dc..d6b11e4 100644
--- a/docs/articles/security.md
+++ b/docs/security/security.md
@@ -1,11 +1,12 @@
 <!--[metadata]>
 +++
+aliases = ["/engine/articles/security/"]
 title = "Docker security"
 description = "Review of the Docker Daemon attack surface"
 keywords = ["Docker, Docker documentation,  security"]
 [menu.main]
-parent = "smn_administrate"
-weight = 2
+parent = "smn_secure_docker"
+weight =-99
 +++
 <![end-metadata]-->
 
@@ -277,8 +278,9 @@
 be implemented in Docker as well. We welcome users to submit issues,
 pull requests, and communicate via the mailing list.
 
-References:
+## Related Information
 
-* [Docker Containers: How Secure Are They? (2013)](
-http://blog.docker.com/2013/08/containers-docker-how-secure-are-they/).
-* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e).
+* [Use trusted images](../security/trust/index.md)
+* [Seccomp security profiles for Docker](../security/seccomp.md)
+* [AppArmor security profiles for Docker](../security/apparmor.md)
+* [On the Security of Containers (2014)](https://medium.com/@ewindisch/on-the-security-of-containers-2c60ffe25a9e)
diff --git a/docs/userguide/networking/dockernetworks.md b/docs/userguide/networking/dockernetworks.md
index 3f8b9a4..6e76884 100644
--- a/docs/userguide/networking/dockernetworks.md
+++ b/docs/userguide/networking/dockernetworks.md
@@ -305,19 +305,22 @@
 
 ```
 $ docker network create --driver bridge isolated_nw
-c5ee82f76de30319c75554a57164c682e7372d2c694fec41e42ac3b77e570f6b
+1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b
 
 $ docker network inspect isolated_nw
 [
     {
         "Name": "isolated_nw",
-        "Id": "c5ee82f76de30319c75554a57164c682e7372d2c694fec41e42ac3b77e570f6b",
+        "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
         "Scope": "local",
         "Driver": "bridge",
         "IPAM": {
             "Driver": "default",
             "Config": [
-                {}
+                {
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
+                }
             ]
         },
         "Containers": {},
@@ -338,13 +341,13 @@
 
 ```
 $ docker run --net=isolated_nw -itd --name=container3 busybox
-885b7b4f792bae534416c95caa35ba272f201fa181e18e59beba0c80d7d77c1d
+8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c
 
 $ docker network inspect isolated_nw
 [
     {
         "Name": "isolated_nw",
-        "Id": "c5ee82f76de30319c75554a57164c682e7372d2c694fec41e42ac3b77e570f6b",
+        "Id": "1196a4c5af43a21ae38ef34515b6af19236a3fc48122cf585e3f3054d509679b",
         "Scope": "local",
         "Driver": "bridge",
         "IPAM": {
@@ -354,8 +357,8 @@
             ]
         },
         "Containers": {
-            "885b7b4f792bae534416c95caa35ba272f201fa181e18e59beba0c80d7d77c1d": {
-                "EndpointID": "514e1b419074397ea92bcfaa6698d17feb62db49d1320a27393b853ec65319c3",
+            "8c1a0a5be480921d669a073393ade66a3fc49933f08bcc5515b37b8144f6d47c": {
+                "EndpointID": "93b2db4a9b9a997beb912d28bcfc117f7b0eb924ff91d48cfa251d473e6a9b08",
                 "MacAddress": "02:42:ac:15:00:02",
                 "IPv4Address": "172.21.0.2/16",
                 "IPv6Address": ""
diff --git a/docs/userguide/networking/work-with-networks.md b/docs/userguide/networking/work-with-networks.md
index b4dea7d..d5fac70 100644
--- a/docs/userguide/networking/work-with-networks.md
+++ b/docs/userguide/networking/work-with-networks.md
@@ -36,18 +36,21 @@
 
 ```bash
 $ docker network create simple-network
-de792b8258895cf5dc3b43835e9d61a9803500b991654dacb1f4f0546b1c88f8
+69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
 $ docker network inspect simple-network
 [
     {
         "Name": "simple-network",
-        "Id": "de792b8258895cf5dc3b43835e9d61a9803500b991654dacb1f4f0546b1c88f8",
+        "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
         "Scope": "local",
         "Driver": "bridge",
         "IPAM": {
             "Driver": "default",
             "Config": [
-                {}
+                {
+                    "Subnet": "172.22.0.0/16",
+                    "Gateway": "172.22.0.1/16"
+                }
             ]
         },
         "Containers": {},
@@ -134,7 +137,8 @@
             "Driver": "default",
             "Config": [
                 {
-                    "Subnet": "172.25.0.0/16"
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
                 }
             ]
         },
@@ -662,7 +666,8 @@
             "Driver": "default",
             "Config": [
                 {
-                    "Subnet": "172.25.0.0/16"
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
                 }
             ]
         },
@@ -746,7 +751,8 @@
             "Driver": "default",
             "Config": [
                 {
-                    "Subnet": "172.25.0.0/16"
+                    "Subnet": "172.21.0.0/16",
+                    "Gateway": "172.21.0.1/16"
                 }
             ]
         },
diff --git a/hack/make/build-rpm b/hack/make/build-rpm
index b8e8e8d..36abc6f 100644
--- a/hack/make/build-rpm
+++ b/hack/make/build-rpm
@@ -98,7 +98,6 @@
 		if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then
 			cat >> "$DEST/$version/Dockerfile.build" <<-EOF
 				RUN tar -cz -C /usr/src/${rpmName}/contrib -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux
-				RUN { echo '* $rpmDate $rpmPackager $rpmVersion-$rpmRelease'; echo '* Version: $VERSION'; } >> ${rpmName}-selinux.spec && tail >&2 ${rpmName}-selinux.spec
 				RUN rpmbuild -ba \
 						--define '_gitcommit $DOCKER_GITCOMMIT' \
 						--define '_release $rpmRelease' \
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 045d375..091c977 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -24,9 +24,10 @@
 clone git github.com/docker/go-connections v0.1.2
 clone git github.com/docker/engine-api v0.2.2
 clone git github.com/RackSec/srslog 6eb773f331e46fbba8eecb8e794e635e75fc04de
+clone git github.com/imdario/mergo 0.2.1
 
 #get libnetwork packages
-clone git github.com/docker/libnetwork v0.5.4
+clone git github.com/docker/libnetwork v0.5.6
 clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
 clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
@@ -45,7 +46,7 @@
 clone git github.com/miekg/dns d27455715200c7d3e321a1e5cadb27c9ee0b0f02
 
 # get graph and distribution packages
-clone git github.com/docker/distribution a7ae88da459b98b481a245e5b1750134724ac67d
+clone git github.com/docker/distribution cb08de17d74bef86ce6c5abe8b240e282f5750be
 clone git github.com/vbatts/tar-split v0.9.11
 
 # get desired notary commit, might also need to be updated in Dockerfile
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 92efc96..b60b24d 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -6170,8 +6170,8 @@
 	if err != nil {
 		c.Fatal(err)
 	}
-	if res != filepath.Clean(wdVal) {
-		c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.Clean(wdVal), res)
+	if res != filepath.ToSlash(filepath.Clean(wdVal)) {
+		c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res)
 	}
 
 	err = inspectFieldAndMarshall(imgName, "Config.Env", &resArr)
diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go
index 7d9a902..c8ebfd3 100644
--- a/integration-cli/docker_cli_help_test.go
+++ b/integration-cli/docker_cli_help_test.go
@@ -133,7 +133,7 @@
 			// Check each line for lots of stuff
 			lines := strings.Split(out, "\n")
 			for _, line := range lines {
-				c.Assert(len(line), checker.LessOrEqualThan, 103, check.Commentf("Help for %q is too long:\n%s", cmd, line))
+				c.Assert(len(line), checker.LessOrEqualThan, 107, check.Commentf("Help for %q is too long:\n%s", cmd, line))
 
 				if scanForHome && strings.Contains(line, `"`+home) {
 					c.Fatalf("Help for %q should use ~ instead of %q on:\n%s",
diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go
index b6b41be..d09c2f2 100644
--- a/integration-cli/docker_cli_network_unix_test.go
+++ b/integration-cli/docker_cli_network_unix_test.go
@@ -282,11 +282,11 @@
 	defer func() {
 		dockerCmd(c, "network", "rm", "dev")
 	}()
-	containerID := strings.TrimSpace(out)
+	networkID := strings.TrimSpace(out)
 
 	// filter with partial ID and partial name
 	// only show 'bridge' and 'dev' network
-	out, _ = dockerCmd(c, "network", "ls", "-f", "id="+containerID[0:5], "-f", "name=dge")
+	out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5], "-f", "name=dge")
 	assertNwList(c, out, []string{"dev", "bridge"})
 
 	// only show built-in network (bridge, none, host)
@@ -324,10 +324,11 @@
 	dockerCmd(c, "network", "create", "testDelMulti2")
 	assertNwIsAvailable(c, "testDelMulti2")
 	out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top")
-	waitRun(strings.TrimSpace(out))
+	containerID := strings.TrimSpace(out)
+	waitRun(containerID)
 
 	// delete three networks at the same time, since testDelMulti2
-	// contains active container, it's deletion should fail.
+	// contains active container, its deletion should fail.
 	out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2")
 	// err should not be nil due to deleting testDelMulti2 failed.
 	c.Assert(err, checker.NotNil, check.Commentf("out: %s", out))
@@ -335,7 +336,7 @@
 	c.Assert(out, checker.Contains, "has active endpoints")
 	assertNwNotAvailable(c, "testDelMulti0")
 	assertNwNotAvailable(c, "testDelMulti1")
-	// testDelMulti2 can't be deleted, so it should exists
+	// testDelMulti2 can't be deleted, so it should exist
 	assertNwIsAvailable(c, "testDelMulti2")
 }
 
@@ -523,8 +524,58 @@
 	assertNwNotAvailable(c, "br0")
 }
 
-func (s *DockerNetworkSuite) TestDockerNetworkInspect(c *check.C) {
-	// if unspecified, network gateway will be selected from inside preferred pool
+func (s *DockerNetworkSuite) TestDockerNetworkIpamOptions(c *check.C) {
+	// Create a bridge network using custom ipam driver and options
+	dockerCmd(c, "network", "create", "--ipam-driver", dummyIpamDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0")
+	assertNwIsAvailable(c, "br0")
+
+	// Verify expected network ipam options
+	nr := getNetworkResource(c, "br0")
+	opts := nr.IPAM.Options
+	c.Assert(opts["opt1"], checker.Equals, "drv1")
+	c.Assert(opts["opt2"], checker.Equals, "drv2")
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) {
+	nr := getNetworkResource(c, "none")
+	c.Assert(nr.Driver, checker.Equals, "null")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 0)
+
+	nr = getNetworkResource(c, "host")
+	c.Assert(nr.Driver, checker.Equals, "host")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 0)
+
+	nr = getNetworkResource(c, "bridge")
+	c.Assert(nr.Driver, checker.Equals, "bridge")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 1)
+	c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil)
+	c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil)
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) {
+	// if unspecified, network subnet will be selected from inside preferred pool
+	dockerCmd(c, "network", "create", "test01")
+	assertNwIsAvailable(c, "test01")
+
+	nr := getNetworkResource(c, "test01")
+	c.Assert(nr.Driver, checker.Equals, "bridge")
+	c.Assert(nr.Scope, checker.Equals, "local")
+	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
+	c.Assert(len(nr.IPAM.Config), checker.Equals, 1)
+	c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil)
+	c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil)
+
+	dockerCmd(c, "network", "rm", "test01")
+	assertNwNotAvailable(c, "test01")
+}
+
+func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) {
 	dockerCmd(c, "network", "create", "--driver=bridge", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0")
 	assertNwIsAvailable(c, "br0")
 
@@ -537,6 +588,7 @@
 	c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24")
 	c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254")
 	dockerCmd(c, "network", "rm", "br0")
+	assertNwNotAvailable(c, "test01")
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C) {
@@ -560,6 +612,7 @@
 	_, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1")
 	c.Assert(err, check.NotNil)
 	dockerCmd(c, "network", "rm", "test0")
+	assertNwNotAvailable(c, "test0")
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) {
@@ -572,6 +625,7 @@
 	c.Assert(opts["opt1"], checker.Equals, "drv1")
 	c.Assert(opts["opt2"], checker.Equals, "drv2")
 	dockerCmd(c, "network", "rm", "testopt")
+	assertNwNotAvailable(c, "testopt")
 
 }
 
@@ -806,7 +860,7 @@
 	out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
-	// Attach the container to other three networks
+	// Attach the container to other networks
 	for _, nw := range nws {
 		out, err = d.Cmd("network", "create", nw)
 		c.Assert(err, checker.IsNil, check.Commentf(out))
@@ -816,7 +870,7 @@
 }
 
 func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) {
-	// Verify container is connected to all three networks
+	// Verify container is connected to all the networks
 	for _, nw := range nws {
 		out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName)
 		c.Assert(err, checker.IsNil, check.Commentf(out))
@@ -866,7 +920,8 @@
 
 func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) {
 	out, _ := dockerCmd(c, "network", "create", "one")
-	dockerCmd(c, "run", "-d", "--net", strings.TrimSpace(out), "busybox", "top")
+	containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top")
+	c.Assert(err, checker.IsNil, check.Commentf(containerOut))
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) {
diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
index be5f9aa..c100772 100644
--- a/integration-cli/docker_cli_push_test.go
+++ b/integration-cli/docker_cli_push_test.go
@@ -147,6 +147,58 @@
 	testPushEmptyLayer(c)
 }
 
+func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) {
+	sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", sourceRepoName)
+	// push the image to the registry
+	out1, _, err := dockerCmdWithError("push", sourceRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1))
+	// ensure that none of the layers were mounted from another repository during push
+	c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false)
+
+	destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
+	// retag the image to upload the same layers to another repo in the same registry
+	dockerCmd(c, "tag", "busybox", destRepoName)
+	// push the image to the registry
+	out2, _, err := dockerCmdWithError("push", destRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2))
+	// ensure that layers were mounted from the first repo during push
+	c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true)
+
+	// ensure that we can pull and run the cross-repo-pushed repository
+	dockerCmd(c, "rmi", destRepoName)
+	dockerCmd(c, "pull", destRepoName)
+	out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
+	c.Assert(out3, check.Equals, "hello world")
+}
+
+func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) {
+	sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
+	// tag the image to upload it to the private registry
+	dockerCmd(c, "tag", "busybox", sourceRepoName)
+	// push the image to the registry
+	out1, _, err := dockerCmdWithError("push", sourceRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1))
+	// ensure that none of the layers were mounted from another repository during push
+	c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false)
+
+	destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL)
+	// retag the image to upload the same layers to another repo in the same registry
+	dockerCmd(c, "tag", "busybox", destRepoName)
+	// push the image to the registry
+	out2, _, err := dockerCmdWithError("push", destRepoName)
+	c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2))
+	// schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen
+	c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, false)
+
+	// ensure that we can pull and run the second pushed repository
+	dockerCmd(c, "rmi", destRepoName)
+	dockerCmd(c, "pull", destRepoName)
+	out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world")
+	c.Assert(out3, check.Equals, "hello world")
+}
+
 func (s *DockerTrustSuite) TestTrustedPush(c *check.C) {
 	repoName := fmt.Sprintf("%v/dockercli/trusted:latest", privateRegistryURL)
 	// tag the image and upload it to the private registry
diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go
index dc57a94..2bc28fd 100644
--- a/integration-cli/docker_cli_restart_test.go
+++ b/integration-cli/docker_cli_restart_test.go
@@ -153,3 +153,46 @@
 	err = waitInspect(id, "{{.State.Status}}", "running", 5*time.Second)
 	c.Assert(err, check.IsNil)
 }
+
+func (s *DockerSuite) TestUserDefinedNetworkWithRestartPolicy(c *check.C) {
+	testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace)
+	dockerCmd(c, "network", "create", "-d", "bridge", "udNet")
+
+	dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top")
+	c.Assert(waitRun("first"), check.IsNil)
+
+	dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second",
+		"--link=first:foo", "busybox", "top")
+	c.Assert(waitRun("second"), check.IsNil)
+
+	// ping to first and its alias foo must succeed
+	_, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+
+	// Now kill the second container and let the restart policy kick in
+	pidStr, err := inspectField("second", "State.Pid")
+	c.Assert(err, check.IsNil)
+
+	pid, err := strconv.Atoi(pidStr)
+	c.Assert(err, check.IsNil)
+
+	p, err := os.FindProcess(pid)
+	c.Assert(err, check.IsNil)
+	c.Assert(p, check.NotNil)
+
+	err = p.Kill()
+	c.Assert(err, check.IsNil)
+
+	err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second)
+	c.Assert(err, check.IsNil)
+
+	err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second)
+
+	// ping to first and its alias foo must still succeed
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first")
+	c.Assert(err, check.IsNil)
+	_, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo")
+	c.Assert(err, check.IsNil)
+}
diff --git a/man/docker-daemon.8.md b/man/docker-daemon.8.md
index 94a31a3..02adaed 100644
--- a/man/docker-daemon.8.md
+++ b/man/docker-daemon.8.md
@@ -14,6 +14,7 @@
 [**--cluster-store**[=*[]*]]
 [**--cluster-advertise**[=*[]*]]
 [**--cluster-store-opt**[=*map[]*]]
+[**--config-file**[=*/etc/docker/daemon.json*]]
 [**-D**|**--debug**]
 [**--default-gateway**[=*DEFAULT-GATEWAY*]]
 [**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]]
@@ -96,6 +97,9 @@
 **--cluster-store-opt**=""
   Specifies options for the Key/Value store.
 
+**--config-file**="/etc/docker/daemon.json"
+  Specifies the JSON file path to load the configuration from.
+
 **-D**, **--debug**=*true*|*false*
   Enable debug mode. Default is false.
 
diff --git a/man/docker-network-create.1.md b/man/docker-network-create.1.md
index 1c876d6..c560f7a 100644
--- a/man/docker-network-create.1.md
+++ b/man/docker-network-create.1.md
@@ -13,6 +13,7 @@
 [**--internal**]
 [**--ip-range**=*[]*]
 [**--ipam-driver**=*default*]
+[**--ipam-opt**=*map[]*]
 [**-o**|**--opt**=*map[]*]
 [**--subnet**=*[]*]
 NETWORK-NAME
@@ -148,6 +149,9 @@
 **--ipam-driver**=*default*
   IP Address Management Driver
 
+**--ipam-opt**=map[]
+  Set custom IPAM plugin options
+
 **-o**, **--opt**=map[]
   Set custom network plugin options
 
diff --git a/man/docker-network-inspect.1.md b/man/docker-network-inspect.1.md
index 889967a..ceba368 100644
--- a/man/docker-network-inspect.1.md
+++ b/man/docker-network-inspect.1.md
@@ -12,7 +12,7 @@
 
 # DESCRIPTION
 
-Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to a network:
+Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network:
 
 ```bash
 $ sudo docker run -itd --name=container1 busybox
@@ -73,6 +73,33 @@
 ]
 ```
 
+Returns the information about the user-defined network:
+
+```bash
+$ docker network create simple-network
+69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a
+$ docker network inspect simple-network
+[
+    {
+        "Name": "simple-network",
+        "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a",
+        "Scope": "local",
+        "Driver": "bridge",
+        "IPAM": {
+            "Driver": "default",
+            "Config": [
+                {
+                    "Subnet": "172.22.0.0/16",
+                    "Gateway": "172.22.0.1/16"
+                }
+            ]
+        },
+        "Containers": {},
+        "Options": {}
+    }
+]
+```
+
 # OPTIONS
 **-f**, **--format**=""
   Format the output using the given go template.
diff --git a/migrate/v1/migratev1.go b/migrate/v1/migratev1.go
index 77507c3..9243c5a 100644
--- a/migrate/v1/migratev1.go
+++ b/migrate/v1/migratev1.go
@@ -476,8 +476,8 @@
 	if err == nil { // best effort
 		dgst, err := digest.ParseDigest(string(checksum))
 		if err == nil {
-			blobSumService := metadata.NewBlobSumService(ms)
-			blobSumService.Add(layer.DiffID(), dgst)
+			V2MetadataService := metadata.NewV2MetadataService(ms)
+			V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst})
 		}
 	}
 	_, err = ls.Release(layer)
diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go
index 5fe2663..6e8af7f 100644
--- a/migrate/v1/migratev1_test.go
+++ b/migrate/v1/migratev1_test.go
@@ -210,19 +210,19 @@
 		t.Fatalf("invalid register count: expected %q, got %q", expected, actual)
 	}
 
-	blobSumService := metadata.NewBlobSumService(ms)
-	blobsums, err := blobSumService.GetBlobSums(layer.EmptyLayer.DiffID())
+	v2MetadataService := metadata.NewV2MetadataService(ms)
+	receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID())
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	expectedBlobsums := []digest.Digest{
-		"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57",
-		"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4",
+	expectedMetadata := []metadata.V2Metadata{
+		{Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")},
+		{Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
 	}
 
-	if !reflect.DeepEqual(expectedBlobsums, blobsums) {
-		t.Fatalf("invalid blobsums: expected %q, got %q", expectedBlobsums, blobsums)
+	if !reflect.DeepEqual(expectedMetadata, receivedMetadata) {
+		t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata)
 	}
 
 }
diff --git a/opts/opts.go b/opts/opts.go
index abc9ab8..05aadbe 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -100,6 +100,35 @@
 	return len((*opts.values))
 }
 
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+	Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+	name string
+	ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+	return &NamedListOpts{
+		name:     name,
+		ListOpts: *NewListOptsRef(values, validator),
+	}
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+	return o.name
+}
+
 //MapOpts holds a map of values and a validation function.
 type MapOpts struct {
 	values    map[string]string
@@ -145,6 +174,29 @@
 	}
 }
 
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+	name string
+	MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+	return &NamedMapOpts{
+		name:    name,
+		MapOpts: *NewMapOpts(values, validator),
+	}
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+	return o.name
+}
+
 // ValidatorFctType defines a validator function that returns a validated string and/or an error.
 type ValidatorFctType func(val string) (string, error)
 
diff --git a/opts/opts_test.go b/opts/opts_test.go
index da86b21..9f41e47 100644
--- a/opts/opts_test.go
+++ b/opts/opts_test.go
@@ -198,3 +198,35 @@
 	}
 	return "", fmt.Errorf("invalid key %s", vals[0])
 }
+
+func TestNamedListOpts(t *testing.T) {
+	var v []string
+	o := NewNamedListOptsRef("foo-name", &v, nil)
+
+	o.Set("foo")
+	if o.String() != "[foo]" {
+		t.Errorf("%s != [foo]", o.String())
+	}
+	if o.Name() != "foo-name" {
+		t.Errorf("%s != foo-name", o.Name())
+	}
+	if len(v) != 1 {
+		t.Errorf("expected foo to be in the values, got %v", v)
+	}
+}
+
+func TestNamedMapOpts(t *testing.T) {
+	tmpMap := make(map[string]string)
+	o := NewNamedMapOpts("max-name", tmpMap, nil)
+
+	o.Set("max-size=1")
+	if o.String() != "map[max-size:1]" {
+		t.Errorf("%s != [map[max-size:1]", o.String())
+	}
+	if o.Name() != "max-name" {
+		t.Errorf("%s != max-name", o.Name())
+	}
+	if _, exist := tmpMap["max-size"]; !exist {
+		t.Errorf("expected map-size to be in the values, got %v", tmpMap)
+	}
+}
diff --git a/pkg/discovery/backends.go b/pkg/discovery/backends.go
index 875a26c..f150115 100644
--- a/pkg/discovery/backends.go
+++ b/pkg/discovery/backends.go
@@ -12,12 +12,8 @@
 var (
 	// Backends is a global map of discovery backends indexed by their
 	// associated scheme.
-	backends map[string]Backend
-)
-
-func init() {
 	backends = make(map[string]Backend)
-}
+)
 
 // Register makes a discovery backend available by the provided scheme.
 // If Register is called twice with the same scheme an error is returned.
@@ -42,7 +38,7 @@
 
 // ParseAdvertise parses the --cluster-advertise daemon config which accepts
 // <ip-address>:<port> or <interface-name>:<port>
-func ParseAdvertise(store, advertise string) (string, error) {
+func ParseAdvertise(advertise string) (string, error) {
 	var (
 		iface *net.Interface
 		addrs []net.Addr
diff --git a/pkg/discovery/memory/memory.go b/pkg/discovery/memory/memory.go
new file mode 100644
index 0000000..777a9a1
--- /dev/null
+++ b/pkg/discovery/memory/memory.go
@@ -0,0 +1,83 @@
+package memory
+
+import (
+	"time"
+
+	"github.com/docker/docker/pkg/discovery"
+)
+
+// Discovery implements a descovery backend that keeps
+// data in memory.
+type Discovery struct {
+	heartbeat time.Duration
+	values    []string
+}
+
+func init() {
+	Init()
+}
+
+// Init registers the memory backend on demand.
+func Init() {
+	discovery.Register("memory", &Discovery{})
+}
+
+// Initialize sets the heartbeat for the memory backend.
+func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error {
+	s.heartbeat = heartbeat
+	s.values = make([]string, 0)
+	return nil
+}
+
+// Watch sends periodic discovery updates to a channel.
+func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+	ch := make(chan discovery.Entries)
+	errCh := make(chan error)
+	ticker := time.NewTicker(s.heartbeat)
+
+	go func() {
+		defer close(errCh)
+		defer close(ch)
+
+		// Send the initial entries if available.
+		var currentEntries discovery.Entries
+		if len(s.values) > 0 {
+			var err error
+			currentEntries, err = discovery.CreateEntries(s.values)
+			if err != nil {
+				errCh <- err
+			} else {
+				ch <- currentEntries
+			}
+		}
+
+		// Periodically send updates.
+		for {
+			select {
+			case <-ticker.C:
+				newEntries, err := discovery.CreateEntries(s.values)
+				if err != nil {
+					errCh <- err
+					continue
+				}
+
+				// Check if the file has really changed.
+				if !newEntries.Equals(currentEntries) {
+					ch <- newEntries
+				}
+				currentEntries = newEntries
+			case <-stopCh:
+				ticker.Stop()
+				return
+			}
+		}
+	}()
+
+	return ch, errCh
+}
+
+// Register adds a new address to the discovery.
+func (s *Discovery) Register(addr string) error {
+	s.values = append(s.values, addr)
+	return nil
+}
diff --git a/pkg/discovery/memory/memory_test.go b/pkg/discovery/memory/memory_test.go
new file mode 100644
index 0000000..c2da0a0
--- /dev/null
+++ b/pkg/discovery/memory/memory_test.go
@@ -0,0 +1,48 @@
+package memory
+
+import (
+	"testing"
+
+	"github.com/docker/docker/pkg/discovery"
+	"github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type discoverySuite struct{}
+
+var _ = check.Suite(&discoverySuite{})
+
+func (s *discoverySuite) TestWatch(c *check.C) {
+	d := &Discovery{}
+	d.Initialize("foo", 1000, 0, nil)
+	stopCh := make(chan struct{})
+	ch, errCh := d.Watch(stopCh)
+
+	// We have to drain the error channel otherwise Watch will get stuck.
+	go func() {
+		for range errCh {
+		}
+	}()
+
+	expected := discovery.Entries{
+		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+	}
+
+	c.Assert(d.Register("1.1.1.1:1111"), check.IsNil)
+	c.Assert(<-ch, check.DeepEquals, expected)
+
+	expected = discovery.Entries{
+		&discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+		&discovery.Entry{Host: "2.2.2.2", Port: "2222"},
+	}
+
+	c.Assert(d.Register("2.2.2.2:2222"), check.IsNil)
+	c.Assert(<-ch, check.DeepEquals, expected)
+
+	// Stop and make sure it closes all channels.
+	close(stopCh)
+	c.Assert(<-ch, check.IsNil)
+	c.Assert(<-errCh, check.IsNil)
+}
diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go
index db3535b..58cc401 100644
--- a/pkg/pidfile/pidfile.go
+++ b/pkg/pidfile/pidfile.go
@@ -9,6 +9,7 @@
 	"os"
 	"path/filepath"
 	"strconv"
+	"strings"
 )
 
 // PIDFile is a file used to store the process ID of a running process.
@@ -17,9 +18,10 @@
 }
 
 func checkPIDFileAlreadyExists(path string) error {
-	if pidString, err := ioutil.ReadFile(path); err == nil {
-		if pid, err := strconv.Atoi(string(pidString)); err == nil {
-			if _, err := os.Stat(filepath.Join("/proc", string(pid))); err == nil {
+	if pidByte, err := ioutil.ReadFile(path); err == nil {
+		pidString := strings.TrimSpace(string(pidByte))
+		if pid, err := strconv.Atoi(pidString); err == nil {
+			if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil {
 				return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path)
 			}
 		}
diff --git a/utils/debug.go b/utils/debug.go
new file mode 100644
index 0000000..d203891
--- /dev/null
+++ b/utils/debug.go
@@ -0,0 +1,26 @@
+package utils
+
+import (
+	"os"
+
+	"github.com/Sirupsen/logrus"
+)
+
+// EnableDebug sets the DEBUG env var to true
+// and makes the logger to log at debug level.
+func EnableDebug() {
+	os.Setenv("DEBUG", "1")
+	logrus.SetLevel(logrus.DebugLevel)
+}
+
+// DisableDebug sets the DEBUG env var to false
+// and makes the logger to log at info level.
+func DisableDebug() {
+	os.Setenv("DEBUG", "")
+	logrus.SetLevel(logrus.InfoLevel)
+}
+
+// IsDebugEnabled checks whether the debug flag is set or not.
+func IsDebugEnabled() bool {
+	return os.Getenv("DEBUG") != ""
+}
diff --git a/vendor/src/github.com/docker/distribution/blobs.go b/vendor/src/github.com/docker/distribution/blobs.go
index 40cd829..ce43ea2 100644
--- a/vendor/src/github.com/docker/distribution/blobs.go
+++ b/vendor/src/github.com/docker/distribution/blobs.go
@@ -9,6 +9,7 @@
 
 	"github.com/docker/distribution/context"
 	"github.com/docker/distribution/digest"
+	"github.com/docker/distribution/reference"
 )
 
 var (
@@ -40,6 +41,18 @@
 		err.Digest, err.Reason)
 }
 
+// ErrBlobMounted returned when a blob is mounted from another repository
+// instead of initiating an upload session.
+type ErrBlobMounted struct {
+	From       reference.Canonical
+	Descriptor Descriptor
+}
+
+func (err ErrBlobMounted) Error() string {
+	return fmt.Sprintf("blob mounted from: %v to: %v",
+		err.From, err.Descriptor)
+}
+
 // Descriptor describes targeted content. Used in conjunction with a blob
 // store, a descriptor can be used to fetch, store and target any kind of
 // blob. The struct also describes the wire protocol format. Fields should
@@ -151,12 +164,21 @@
 	// returned handle can be written to and later resumed using an opaque
 	// identifier. With this approach, one can Close and Resume a BlobWriter
 	// multiple times until the BlobWriter is committed or cancelled.
-	Create(ctx context.Context) (BlobWriter, error)
+	Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
 
 	// Resume attempts to resume a write to a blob, identified by an id.
 	Resume(ctx context.Context, id string) (BlobWriter, error)
 }
 
+// BlobCreateOption is a general extensible function argument for blob creation
+// methods. A BlobIngester may choose to honor any or none of the given
+// BlobCreateOptions, which can be specific to the implementation of the
+// BlobIngester receiving them.
+// TODO (brianbland): unify this with ManifestServiceOption in the future
+type BlobCreateOption interface {
+	Apply(interface{}) error
+}
+
 // BlobWriter provides a handle for inserting data into a blob store.
 // Instances should be obtained from BlobWriteService.Writer and
 // BlobWriteService.Resume. If supported by the store, a writer can be
diff --git a/vendor/src/github.com/docker/distribution/circle.yml b/vendor/src/github.com/docker/distribution/circle.yml
index e275b2e..e1995d4 100644
--- a/vendor/src/github.com/docker/distribution/circle.yml
+++ b/vendor/src/github.com/docker/distribution/circle.yml
@@ -11,7 +11,7 @@
 
   post:
   # go
-    - gvm install go1.5 --prefer-binary --name=stable
+    - gvm install go1.5.3 --prefer-binary --name=stable
 
   environment:
   # Convenient shortcuts to "common" locations
diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
index 52c725d..ad3da3e 100644
--- a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
+++ b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go
@@ -1041,6 +1041,70 @@
 							deniedResponseDescriptor,
 						},
 					},
+					{
+						Name:        "Mount Blob",
+						Description: "Mount a blob identified by the `mount` parameter from another repository.",
+						Headers: []ParameterDescriptor{
+							hostHeader,
+							authHeader,
+							contentLengthZeroHeader,
+						},
+						PathParameters: []ParameterDescriptor{
+							nameParameterDescriptor,
+						},
+						QueryParameters: []ParameterDescriptor{
+							{
+								Name:        "mount",
+								Type:        "query",
+								Format:      "<digest>",
+								Regexp:      digest.DigestRegexp,
+								Description: `Digest of blob to mount from the source repository.`,
+							},
+							{
+								Name:        "from",
+								Type:        "query",
+								Format:      "<repository name>",
+								Regexp:      reference.NameRegexp,
+								Description: `Name of the source repository.`,
+							},
+						},
+						Successes: []ResponseDescriptor{
+							{
+								Description: "The blob has been mounted in the repository and is available at the provided location.",
+								StatusCode:  http.StatusCreated,
+								Headers: []ParameterDescriptor{
+									{
+										Name:   "Location",
+										Type:   "url",
+										Format: "<blob location>",
+									},
+									contentLengthZeroHeader,
+									dockerUploadUUIDHeader,
+								},
+							},
+						},
+						Failures: []ResponseDescriptor{
+							{
+								Name:       "Invalid Name or Digest",
+								StatusCode: http.StatusBadRequest,
+								ErrorCodes: []errcode.ErrorCode{
+									ErrorCodeDigestInvalid,
+									ErrorCodeNameInvalid,
+								},
+							},
+							{
+								Name:        "Not allowed",
+								Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
+								StatusCode:  http.StatusMethodNotAllowed,
+								ErrorCodes: []errcode.ErrorCode{
+									errcode.ErrorCodeUnsupported,
+								},
+							},
+							unauthorizedResponseDescriptor,
+							repositoryNotFoundResponseDescriptor,
+							deniedResponseDescriptor,
+						},
+					},
 				},
 			},
 		},
diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go b/vendor/src/github.com/docker/distribution/registry/client/auth/session.go
index 9819b3c..6b483c6 100644
--- a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go
+++ b/vendor/src/github.com/docker/distribution/registry/client/auth/session.go
@@ -108,6 +108,8 @@
 	tokenLock       sync.Mutex
 	tokenCache      string
 	tokenExpiration time.Time
+
+	additionalScopes map[string]struct{}
 }
 
 // tokenScope represents the scope at which a token will be requested.
@@ -145,6 +147,7 @@
 			Scope:    scope,
 			Actions:  actions,
 		},
+		additionalScopes: map[string]struct{}{},
 	}
 }
 
@@ -160,7 +163,15 @@
 }
 
 func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
-	if err := th.refreshToken(params); err != nil {
+	var additionalScopes []string
+	if fromParam := req.URL.Query().Get("from"); fromParam != "" {
+		additionalScopes = append(additionalScopes, tokenScope{
+			Resource: "repository",
+			Scope:    fromParam,
+			Actions:  []string{"pull"},
+		}.String())
+	}
+	if err := th.refreshToken(params, additionalScopes...); err != nil {
 		return err
 	}
 
@@ -169,11 +180,18 @@
 	return nil
 }
 
-func (th *tokenHandler) refreshToken(params map[string]string) error {
+func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error {
 	th.tokenLock.Lock()
 	defer th.tokenLock.Unlock()
+	var addedScopes bool
+	for _, scope := range additionalScopes {
+		if _, ok := th.additionalScopes[scope]; !ok {
+			th.additionalScopes[scope] = struct{}{}
+			addedScopes = true
+		}
+	}
 	now := th.clock.Now()
-	if now.After(th.tokenExpiration) {
+	if now.After(th.tokenExpiration) || addedScopes {
 		tr, err := th.fetchToken(params)
 		if err != nil {
 			return err
@@ -223,6 +241,10 @@
 		reqParams.Add("scope", scopeField)
 	}
 
+	for scope := range th.additionalScopes {
+		reqParams.Add("scope", scope)
+	}
+
 	if th.creds != nil {
 		username, password := th.creds.Basic(realmURL)
 		if username != "" && password != "" {
diff --git a/vendor/src/github.com/docker/distribution/registry/client/repository.go b/vendor/src/github.com/docker/distribution/registry/client/repository.go
index 758c6e5..d652121 100644
--- a/vendor/src/github.com/docker/distribution/registry/client/repository.go
+++ b/vendor/src/github.com/docker/distribution/registry/client/repository.go
@@ -572,8 +572,57 @@
 	return writer.Commit(ctx, desc)
 }
 
-func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) {
-	u, err := bs.ub.BuildBlobUploadURL(bs.name)
+// createOptions is a collection of blob creation modifiers relevant to general
+// blob storage intended to be configured by the BlobCreateOption.Apply method.
+type createOptions struct {
+	Mount struct {
+		ShouldMount bool
+		From        reference.Canonical
+	}
+}
+
+type optionFunc func(interface{}) error
+
+func (f optionFunc) Apply(v interface{}) error {
+	return f(v)
+}
+
+// WithMountFrom returns a BlobCreateOption which designates that the blob should be
+// mounted from the given canonical reference.
+func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
+	return optionFunc(func(v interface{}) error {
+		opts, ok := v.(*createOptions)
+		if !ok {
+			return fmt.Errorf("unexpected options type: %T", v)
+		}
+
+		opts.Mount.ShouldMount = true
+		opts.Mount.From = ref
+
+		return nil
+	})
+}
+
+func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
+	var opts createOptions
+
+	for _, option := range options {
+		err := option.Apply(&opts)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var values []url.Values
+
+	if opts.Mount.ShouldMount {
+		values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
+	}
+
+	u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
+	if err != nil {
+		return nil, err
+	}
 
 	resp, err := bs.client.Post(u, "", nil)
 	if err != nil {
@@ -581,7 +630,14 @@
 	}
 	defer resp.Body.Close()
 
-	if SuccessStatus(resp.StatusCode) {
+	switch resp.StatusCode {
+	case http.StatusCreated:
+		desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
+		if err != nil {
+			return nil, err
+		}
+		return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
+	case http.StatusAccepted:
 		// TODO(dmcgowan): Check for invalid UUID
 		uuid := resp.Header.Get("Docker-Upload-UUID")
 		location, err := sanitizeLocation(resp.Header.Get("Location"), u)
@@ -596,8 +652,9 @@
 			startedAt: time.Now(),
 			location:  location,
 		}, nil
+	default:
+		return nil, HandleErrorResponse(resp)
 	}
-	return nil, HandleErrorResponse(resp)
 }
 
 func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
diff --git a/vendor/src/github.com/docker/libnetwork/.gitignore b/vendor/src/github.com/docker/libnetwork/.gitignore
index 08e11d7..f9cd104 100644
--- a/vendor/src/github.com/docker/libnetwork/.gitignore
+++ b/vendor/src/github.com/docker/libnetwork/.gitignore
@@ -8,6 +8,8 @@
 integration-tmp/
 _obj
 _test
+.vagrant
+
 
 # Architecture specific extensions/prefixes
 *.[568vq]
diff --git a/vendor/src/github.com/docker/libnetwork/CHANGELOG.md b/vendor/src/github.com/docker/libnetwork/CHANGELOG.md
index dc21f35..ea136da 100644
--- a/vendor/src/github.com/docker/libnetwork/CHANGELOG.md
+++ b/vendor/src/github.com/docker/libnetwork/CHANGELOG.md
@@ -1,5 +1,13 @@
 # Changelog
 
+## 0.5.6 (2016-01-14)
+- Setup embedded DNS server correctly on container restart. Fixes docker/docker#19354
+
+## 0.5.5 (2016-01-14)
+- Allow network-scoped alias to be resolved for anonymous endpoint
+- Self repair corrupted IP database that could happen in 1.9.0 & 1.9.1
+- Skip IPTables cleanup if --iptables=false is set. Fixes docker/docker#19063
+
 ## 0.5.4 (2016-01-12)
 - Removed the isNodeAlive protection when user forces an endpoint delete
 
diff --git a/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go b/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go
index a537ed0..270a36a 100644
--- a/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go
+++ b/vendor/src/github.com/docker/libnetwork/bitseq/sequence.go
@@ -9,6 +9,7 @@
 	"fmt"
 	"sync"
 
+	log "github.com/Sirupsen/logrus"
 	"github.com/docker/libnetwork/datastore"
 	"github.com/docker/libnetwork/types"
 )
@@ -243,6 +244,58 @@
 	return err != nil
 }
 
+func (h *Handle) runConsistencyCheck() bool {
+	corrupted := false
+	for p, c := h.head, h.head.next; c != nil; c = c.next {
+		if c.count == 0 {
+			corrupted = true
+			p.next = c.next
+			continue // keep same p
+		}
+		p = c
+	}
+	return corrupted
+}
+
+// CheckConsistency checks if the bit sequence is in an inconsistent state and attempts to fix it.
+// It looks for a corruption signature that may happen in docker 1.9.0 and 1.9.1.
+func (h *Handle) CheckConsistency() error {
+	for {
+		h.Lock()
+		store := h.store
+		h.Unlock()
+
+		if store != nil {
+			if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound {
+				return err
+			}
+		}
+
+		h.Lock()
+		nh := h.getCopy()
+		h.Unlock()
+
+		if !nh.runConsistencyCheck() {
+			return nil
+		}
+
+		if err := nh.writeToStore(); err != nil {
+			if _, ok := err.(types.RetryError); !ok {
+				return fmt.Errorf("internal failure while fixing inconsistent bitsequence: %v", err)
+			}
+			continue
+		}
+
+		log.Infof("Fixed inconsistent bit sequence in datastore:\n%s\n%s", h, nh)
+
+		h.Lock()
+		h.head = nh.head
+		h.Unlock()
+
+		return nil
+	}
+}
+
 // set/reset the bit
 func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64, error) {
 	var (
diff --git a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
index 7b2bdeb..2bb4350 100644
--- a/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -135,7 +135,7 @@
 	if err := iptables.FirewalldInit(); err != nil {
 		logrus.Debugf("Fail to initialize firewalld: %v, using raw iptables instead", err)
 	}
-	removeIPChains()
+
 	d := newDriver()
 	if err := d.configure(config); err != nil {
 		return err
@@ -378,6 +378,7 @@
 	}
 
 	if config.EnableIPTables {
+		removeIPChains()
 		natChain, filterChain, isolationChain, err = setupIPChains(config)
 		if err != nil {
 			return err
diff --git a/vendor/src/github.com/docker/libnetwork/ipam/allocator.go b/vendor/src/github.com/docker/libnetwork/ipam/allocator.go
index be8b4ac..ce404e2 100644
--- a/vendor/src/github.com/docker/libnetwork/ipam/allocator.go
+++ b/vendor/src/github.com/docker/libnetwork/ipam/allocator.go
@@ -70,6 +70,9 @@
 		}
 	}
 
+	a.checkConsistency(localAddressSpace)
+	a.checkConsistency(globalAddressSpace)
+
 	return a, nil
 }
 
@@ -115,6 +118,25 @@
 	return nil
 }
 
+// Checks for and fixes damaged bitmask. Meant to be called in constructor only.
+func (a *Allocator) checkConsistency(as string) {
+	// Retrieve this address space's configuration and bitmasks from the datastore
+	a.refresh(as)
+	aSpace, ok := a.addrSpaces[as]
+	if !ok {
+		return
+	}
+	a.updateBitMasks(aSpace)
+	for sk, pd := range aSpace.subnets {
+		if pd.Range != nil {
+			continue
+		}
+		if err := a.addresses[sk].CheckConsistency(); err != nil {
+			log.Warnf("Error while running consistency check for %s: %v", sk, err)
+		}
+	}
+}
+
 // GetDefaultAddressSpaces returns the local and global default address spaces
 func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) {
 	return localAddressSpace, globalAddressSpace, nil
diff --git a/vendor/src/github.com/docker/libnetwork/network.go b/vendor/src/github.com/docker/libnetwork/network.go
index e582767..7449c90 100644
--- a/vendor/src/github.com/docker/libnetwork/network.go
+++ b/vendor/src/github.com/docker/libnetwork/network.go
@@ -822,20 +822,20 @@
 }
 
 func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) {
-	if ep.isAnonymous() {
-		return
-	}
-
 	epName := ep.Name()
 	if iface := ep.Iface(); iface.Address() != nil {
 		myAliases := ep.MyAliases()
 		if isAdd {
-			n.addSvcRecords(epName, iface.Address().IP, true)
+			if !ep.isAnonymous() {
+				n.addSvcRecords(epName, iface.Address().IP, true)
+			}
 			for _, alias := range myAliases {
 				n.addSvcRecords(alias, iface.Address().IP, false)
 			}
 		} else {
-			n.deleteSvcRecords(epName, iface.Address().IP, true)
+			if !ep.isAnonymous() {
+				n.deleteSvcRecords(epName, iface.Address().IP, true)
+			}
 			for _, alias := range myAliases {
 				n.deleteSvcRecords(alias, iface.Address().IP, false)
 			}
diff --git a/vendor/src/github.com/docker/libnetwork/resolver.go b/vendor/src/github.com/docker/libnetwork/resolver.go
index 3cd74e0..d395ab4 100644
--- a/vendor/src/github.com/docker/libnetwork/resolver.go
+++ b/vendor/src/github.com/docker/libnetwork/resolver.go
@@ -15,7 +15,8 @@
 type Resolver interface {
 	// Start starts the name server for the container
 	Start() error
-	// Stop stops the name server for the container
+	// Stop stops the name server for the container. Stopped resolver
+	// can be reused after running the SetupFunc again.
 	Stop()
 	// SetupFunc() provides the setup function that should be run
 	// in the container's network namespace.
@@ -102,6 +103,8 @@
 	if r.server != nil {
 		r.server.Shutdown()
 	}
+	r.conn = nil
+	r.err = fmt.Errorf("setup not done yet")
 }
 
 func (r *resolver) SetExtServers(dns []string) {
diff --git a/vendor/src/github.com/docker/libnetwork/sandbox.go b/vendor/src/github.com/docker/libnetwork/sandbox.go
index 8977cf3..9dbb100 100644
--- a/vendor/src/github.com/docker/libnetwork/sandbox.go
+++ b/vendor/src/github.com/docker/libnetwork/sandbox.go
@@ -488,23 +488,22 @@
 }
 
 func (sb *sandbox) SetKey(basePath string) error {
-	var err error
 	if basePath == "" {
 		return types.BadRequestErrorf("invalid sandbox key")
 	}
 
 	sb.Lock()
-	osSbox := sb.osSbox
+	oldosSbox := sb.osSbox
 	sb.Unlock()
 
-	if osSbox != nil {
+	if oldosSbox != nil {
 		// If we already have an OS sandbox, release the network resources from that
 		// and destroy the OS snab. We are moving into a new home further down. Note that none
 		// of the network resources gets destroyed during the move.
 		sb.releaseOSSbox()
 	}
 
-	osSbox, err = osl.GetSandboxForExternalKey(basePath, sb.Key())
+	osSbox, err := osl.GetSandboxForExternalKey(basePath, sb.Key())
 	if err != nil {
 		return err
 	}
@@ -520,6 +519,17 @@
 		}
 	}()
 
+	// If the resolver was setup before stop it and set it up in the
+	// new osl sandbox.
+	if oldosSbox != nil && sb.resolver != nil {
+		sb.resolver.Stop()
+
+		sb.osSbox.InvokeFunc(sb.resolver.SetupFunc())
+		if err := sb.resolver.Start(); err != nil {
+			log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err)
+		}
+	}
+
 	for _, ep := range sb.getConnectedEndpoints() {
 		if err = sb.populateNetworkResources(ep); err != nil {
 			return err
diff --git a/vendor/src/github.com/imdario/mergo/.travis.yml b/vendor/src/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 0000000..9d91c63
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,2 @@
+language: go
+install: go get -t
diff --git a/vendor/src/github.com/imdario/mergo/LICENSE b/vendor/src/github.com/imdario/mergo/LICENSE
new file mode 100644
index 0000000..6866802
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/imdario/mergo/README.md b/vendor/src/github.com/imdario/mergo/README.md
new file mode 100644
index 0000000..4f0f990
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/README.md
@@ -0,0 +1,122 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
+
+![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg)
+
+## Status
+
+It is ready for production use. It works fine after extensive use in the wild.
+
+[![Build Status][1]][2]
+[![GoDoc](https://godoc.org/github.com/imdario/mergo?status.svg)](https://godoc.org/github.com/imdario/mergo)
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+
+### Important note
+
+Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected.
+
+If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+
+### Mergo in the wild
+
+- [imdario/zas](https://github.com/imdario/zas)
+- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+
+## Installation
+
+    go get github.com/imdario/mergo
+
+    // use in your .go code
+    import (
+        "github.com/imdario/mergo"
+    )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+    if err := mergo.Merge(&dst, src); err != nil {
+        // ...
+    }
+
+Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
+
+    if err := mergo.Map(&dst, srcMap); err != nil {
+        // ...
+    }
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+### Nice example
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/imdario/mergo"
+)
+
+type Foo struct {
+	A string
+	B int64
+}
+
+func main() {
+	src := Foo{
+		A: "one",
+	}
+
+	dest := Foo{
+		A: "two",
+		B: 2,
+	}
+
+	mergo.Merge(&dest, src)
+
+	fmt.Println(dest)
+	// Will print
+	// {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+    go get gopkg.in/yaml.v1
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
diff --git a/vendor/src/github.com/imdario/mergo/doc.go b/vendor/src/github.com/imdario/mergo/doc.go
new file mode 100644
index 0000000..6e9aa7b
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+	type networkConfig struct {
+		Protocol string
+		Address string
+		ServerType string `json: "server_type"`
+		Port uint16
+	}
+
+	type FssnConfig struct {
+		Network networkConfig
+	}
+
+	var fssnDefault = FssnConfig {
+		networkConfig {
+			"tcp",
+			"127.0.0.1",
+			"http",
+			31560,
+		},
+	}
+
+	// Inside a function [...]
+
+	if err := mergo.Merge(&config, fssnDefault); err != nil {
+		log.Fatal(err)
+	}
+
+	// More code [...]
+
+*/
+package mergo
diff --git a/vendor/src/github.com/imdario/mergo/map.go b/vendor/src/github.com/imdario/mergo/map.go
new file mode 100644
index 0000000..1ed3d71
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/map.go
@@ -0,0 +1,154 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"fmt"
+	"reflect"
+	"unicode"
+	"unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+	if s == "" {
+		return s
+	}
+	r, n := utf8.DecodeRuneInString(s)
+	return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+	r, _ := utf8.DecodeRuneInString(field.Name)
+	return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	zeroValue := reflect.Value{}
+	switch dst.Kind() {
+	case reflect.Map:
+		dstMap := dst.Interface().(map[string]interface{})
+		for i, n := 0, src.NumField(); i < n; i++ {
+			srcType := src.Type()
+			field := srcType.Field(i)
+			if !isExported(field) {
+				continue
+			}
+			fieldName := field.Name
+			fieldName = changeInitialCase(fieldName, unicode.ToLower)
+			if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+				dstMap[fieldName] = src.Field(i).Interface()
+			}
+		}
+	case reflect.Struct:
+		srcMap := src.Interface().(map[string]interface{})
+		for key := range srcMap {
+			srcValue := srcMap[key]
+			fieldName := changeInitialCase(key, unicode.ToUpper)
+			dstElement := dst.FieldByName(fieldName)
+			if dstElement == zeroValue {
+				// We discard it because the field doesn't exist.
+				continue
+			}
+			srcElement := reflect.ValueOf(srcValue)
+			dstKind := dstElement.Kind()
+			srcKind := srcElement.Kind()
+			if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+				srcElement = srcElement.Elem()
+				srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+			} else if dstKind == reflect.Ptr {
+				// Can this work? I guess it can't.
+				if srcKind != reflect.Ptr && srcElement.CanAddr() {
+					srcPtr := srcElement.Addr()
+					srcElement = reflect.ValueOf(srcPtr)
+					srcKind = reflect.Ptr
+				}
+			}
+			if !srcElement.IsValid() {
+				continue
+			}
+			if srcKind == dstKind {
+				if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+					return
+				}
+			} else {
+				if srcKind == reflect.Map {
+					if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+						return
+					}
+				} else {
+					return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+				}
+			}
+		}
+	}
+	return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}) error {
+	return _map(dst, src, false)
+}
+
+func MapWithOverwrite(dst, src interface{}) error {
+	return _map(dst, src, true)
+}
+
+func _map(dst, src interface{}, overwrite bool) error {
+	var (
+		vDst, vSrc reflect.Value
+		err        error
+	)
+	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+		return err
+	}
+	// To be friction-less, we redirect equal-type arguments
+	// to deepMerge. Only because arguments can be anything.
+	if vSrc.Kind() == vDst.Kind() {
+		return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+	}
+	switch vSrc.Kind() {
+	case reflect.Struct:
+		if vDst.Kind() != reflect.Map {
+			return ErrExpectedMapAsDestination
+		}
+	case reflect.Map:
+		if vDst.Kind() != reflect.Struct {
+			return ErrExpectedStructAsDestination
+		}
+	default:
+		return ErrNotSupported
+	}
+	return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/src/github.com/imdario/mergo/merge.go b/vendor/src/github.com/imdario/mergo/merge.go
new file mode 100644
index 0000000..a7dd9d8
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/merge.go
@@ -0,0 +1,120 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"reflect"
+)
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) {
+	if !src.IsValid() {
+		return
+	}
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	switch dst.Kind() {
+	case reflect.Struct:
+		for i, n := 0, dst.NumField(); i < n; i++ {
+			if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil {
+				return
+			}
+		}
+	case reflect.Map:
+		for _, key := range src.MapKeys() {
+			srcElement := src.MapIndex(key)
+			if !srcElement.IsValid() {
+				continue
+			}
+			dstElement := dst.MapIndex(key)
+			switch srcElement.Kind() {
+			case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+				if srcElement.IsNil() {
+					continue
+				}
+				fallthrough
+			default:
+				switch reflect.TypeOf(srcElement.Interface()).Kind() {
+				case reflect.Struct:
+					fallthrough
+				case reflect.Ptr:
+					fallthrough
+				case reflect.Map:
+					if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil {
+						return
+					}
+				}
+			}
+			if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) {
+				if dst.IsNil() {
+					dst.Set(reflect.MakeMap(dst.Type()))
+				}
+				dst.SetMapIndex(key, srcElement)
+			}
+		}
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Interface:
+		if src.IsNil() {
+			break
+		} else if dst.IsNil() {
+			if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+				dst.Set(src)
+			}
+		} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil {
+			return
+		}
+	default:
+		if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
+			dst.Set(src)
+		}
+	}
+	return
+}
+
+// Merge sets fields' values in dst from src if they have a zero
+// value of their type.
+// dst and src must be valid same-type structs and dst must be
+// a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+func Merge(dst, src interface{}) error {
+	return merge(dst, src, false)
+}
+
+func MergeWithOverwrite(dst, src interface{}) error {
+	return merge(dst, src, true)
+}
+
+func merge(dst, src interface{}, overwrite bool) error {
+	var (
+		vDst, vSrc reflect.Value
+		err        error
+	)
+	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+		return err
+	}
+	if vDst.Type() != vSrc.Type() {
+		return ErrDifferentArgumentsTypes
+	}
+	return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite)
+}
diff --git a/vendor/src/github.com/imdario/mergo/mergo.go b/vendor/src/github.com/imdario/mergo/mergo.go
new file mode 100644
index 0000000..f8a0991
--- /dev/null
+++ b/vendor/src/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"errors"
+	"reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+	ErrNilArguments                = errors.New("src and dst must not be nil")
+	ErrDifferentArgumentsTypes     = errors.New("src and dst must be of same type")
+	ErrNotSupported                = errors.New("only structs and maps are supported")
+	ErrExpectedMapAsDestination    = errors.New("dst was expected to be a map")
+	ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress.  The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+	ptr  uintptr
+	typ  reflect.Type
+	next *visit
+}
+
+// From src/pkg/encoding/json.
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+	if dst == nil || src == nil {
+		err = ErrNilArguments
+		return
+	}
+	vDst = reflect.ValueOf(dst).Elem()
+	if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+		err = ErrNotSupported
+		return
+	}
+	vSrc = reflect.ValueOf(src)
+	// We check if vSrc is a pointer to dereference it.
+	if vSrc.Kind() == reflect.Ptr {
+		vSrc = vSrc.Elem()
+	}
+	return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	return // TODO refactor
+}