Merge pull request #39128 from thaJeztah/bump_go_units

bump docker/go-units v0.4.0
diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go
index 3f13d06..37b6c24 100644
--- a/builder/builder-next/controller.go
+++ b/builder/builder-next/controller.go
@@ -21,6 +21,7 @@
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/cache/remotecache"
 	inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
+	localremotecache "github.com/moby/buildkit/cache/remotecache/local"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/control"
 	"github.com/moby/buildkit/frontend"
@@ -186,6 +187,7 @@
 		CacheKeyStorage:  cacheStorage,
 		ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
 			"registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt, dist.ReferenceStore, dist.ImageStore),
+			"local":    localremotecache.ResolveCacheImporterFunc(opt.SessionManager),
 		},
 		ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
 			"inline": inlineremotecache.ResolveCacheExporterFunc(),
diff --git a/hack/dockerfile/install/proxy.installer b/hack/dockerfile/install/proxy.installer
index 05876fd..f419693 100755
--- a/hack/dockerfile/install/proxy.installer
+++ b/hack/dockerfile/install/proxy.installer
@@ -3,7 +3,7 @@
 # LIBNETWORK_COMMIT is used to build the docker-userland-proxy binary. When
 # updating the binary version, consider updating github.com/docker/libnetwork
 # in vendor.conf accordingly
-LIBNETWORK_COMMIT=ebcade70ad1059b070d0040d798ecca359bc5fed
+LIBNETWORK_COMMIT=9ff9b57c344df5cd47443ad9e65702ec85c5aeb0
 
 install_proxy() {
 	case "$1" in
diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go
index c353192..05eac68 100644
--- a/pkg/namesgenerator/names-generator.go
+++ b/pkg/namesgenerator/names-generator.go
@@ -705,6 +705,9 @@
 		// Mildred Sanderson - American mathematician best known for Sanderson's theorem concerning modular invariants. https://en.wikipedia.org/wiki/Mildred_Sanderson
 		"sanderson",
 
+		// Satoshi Nakamoto is the name used by the unknown person or group of people who developed bitcoin, authored the bitcoin white paper, and created and deployed bitcoin's original reference implementation. https://en.wikipedia.org/wiki/Satoshi_Nakamoto
+		"satoshi",
+
 		// Adi Shamir - Israeli cryptographer whose numerous inventions and contributions to cryptography include the Ferge Fiat Shamir identification scheme, the Rivest Shamir Adleman (RSA) public-key cryptosystem, the Shamir's secret sharing scheme, the breaking of the Merkle-Hellman cryptosystem, the TWINKLE and TWIRL factoring devices and the discovery of differential cryptanalysis (with Eli Biham). https://en.wikipedia.org/wiki/Adi_Shamir
 		"shamir",
 
diff --git a/vendor.conf b/vendor.conf
index eeb8733..78b0bef 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -27,7 +27,7 @@
 golang.org/x/sync                                   e225da77a7e68af35c70ccbf71af2b83e6acac3c
 
 # buildkit
-github.com/moby/buildkit                            b3028967ae6259c9a31c1a1deeccd30fe3469cce
+github.com/moby/buildkit                            8818c67cff663befa7b70f21454e340f71616581
 github.com/tonistiigi/fsutil                        3bbb99cdbd76619ab717299830c60f6f2a533a6b
 github.com/grpc-ecosystem/grpc-opentracing          8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go               1361b9cd60be79c4c3a7fa9841b3c132e40066a7
@@ -39,7 +39,7 @@
 # libnetwork
 
 # When updating, also update LIBNETWORK_COMMIT in hack/dockerfile/install/proxy.installer accordingly
-github.com/docker/libnetwork                        48f846327bbe6a0dce0c556e8dc9f5bb939d5c16
+github.com/docker/libnetwork                        9ff9b57c344df5cd47443ad9e65702ec85c5aeb0
 github.com/docker/go-events                         9461782956ad83b30282bf90e31fa6a70c255ba9
 github.com/armon/go-radix                           e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 github.com/armon/go-metrics                         eb0af217e5e9747e41dd5303755356b62d28e3ec
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
index d1a4d9e..cc5679f 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
@@ -171,7 +171,19 @@
 			return err
 		}
 
-		pbPolicy, err := windows.ConvertPortBindings(epConnectivity.PortBindings)
+		ep.portMapping = epConnectivity.PortBindings
+		ep.portMapping, err = windows.AllocatePorts(n.portMapper, ep.portMapping, ep.addr.IP)
+		if err != nil {
+			return err
+		}
+
+		defer func() {
+			if err != nil {
+				windows.ReleasePorts(n.portMapper, ep.portMapping)
+			}
+		}()
+
+		pbPolicy, err := windows.ConvertPortBindings(ep.portMapping)
 		if err != nil {
 			return err
 		}
@@ -229,6 +241,8 @@
 		return fmt.Errorf("endpoint id %q not found", eid)
 	}
 
+	windows.ReleasePorts(n.portMapper, ep.portMapping)
+
 	n.deleteEndpoint(eid)
 
 	_, err := endpointRequest("DELETE", ep.profileID, "")
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
index 9cc46f8..592cfc6 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
@@ -11,6 +11,7 @@
 	"github.com/Microsoft/hcsshim"
 	"github.com/docker/libnetwork/driverapi"
 	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/portmapper"
 	"github.com/docker/libnetwork/types"
 	"github.com/sirupsen/logrus"
 )
@@ -46,6 +47,7 @@
 	initErr         error
 	subnets         []*subnet
 	secure          bool
+	portMapper      *portmapper.PortMapper
 	sync.Mutex
 }
 
@@ -89,10 +91,11 @@
 	}
 
 	n := &network{
-		id:        id,
-		driver:    d,
-		endpoints: endpointTable{},
-		subnets:   []*subnet{},
+		id:         id,
+		driver:     d,
+		endpoints:  endpointTable{},
+		subnets:    []*subnet{},
+		portMapper: portmapper.New(""),
 	}
 
 	genData, ok := option[netlabel.GenericData].(map[string]string)
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go b/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go
new file mode 100644
index 0000000..51791fd
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/port_mapping.go
@@ -0,0 +1,125 @@
+// +build windows
+
+package windows
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/docker/libnetwork/portmapper"
+	"github.com/docker/libnetwork/types"
+	"github.com/ishidawataru/sctp"
+	"github.com/sirupsen/logrus"
+)
+
+const (
+	maxAllocatePortAttempts = 10
+)
+
+// ErrUnsupportedAddressType is returned when the specified address type is not supported.
+type ErrUnsupportedAddressType string
+
+func (uat ErrUnsupportedAddressType) Error() string {
+	return fmt.Sprintf("unsupported address type: %s", string(uat))
+}
+
+// AllocatePorts allocates ports specified in bindings from the portMapper
+func AllocatePorts(portMapper *portmapper.PortMapper, bindings []types.PortBinding, containerIP net.IP) ([]types.PortBinding, error) {
+	bs := make([]types.PortBinding, 0, len(bindings))
+	for _, c := range bindings {
+		b := c.GetCopy()
+		if err := allocatePort(portMapper, &b, containerIP); err != nil {
+			// On allocation failure, release previously allocated ports. On cleanup error, just log a warning message
+			if cuErr := ReleasePorts(portMapper, bs); cuErr != nil {
+				logrus.Warnf("Upon allocation failure for %v, failed to clear previously allocated port bindings: %v", b, cuErr)
+			}
+			return nil, err
+		}
+		bs = append(bs, b)
+	}
+	return bs, nil
+}
+
+func allocatePort(portMapper *portmapper.PortMapper, bnd *types.PortBinding, containerIP net.IP) error {
+	var (
+		host net.Addr
+		err  error
+	)
+
+	// Store the container interface address in the operational binding
+	bnd.IP = containerIP
+
+	// Adjust HostPortEnd if this is not a range.
+	if bnd.HostPortEnd == 0 {
+		bnd.HostPortEnd = bnd.HostPort
+	}
+
+	// Construct the container side transport address
+	container, err := bnd.ContainerAddr()
+	if err != nil {
+		return err
+	}
+
+	// Try up to maxAllocatePortAttempts times to get a port that's not already allocated.
+	for i := 0; i < maxAllocatePortAttempts; i++ {
+		if host, err = portMapper.MapRange(container, bnd.HostIP, int(bnd.HostPort), int(bnd.HostPortEnd), false); err == nil {
+			break
+		}
+		// There is no point in immediately retrying to map an explicitly chosen port.
+		if bnd.HostPort != 0 {
+			logrus.Warnf("Failed to allocate and map port %d-%d: %s", bnd.HostPort, bnd.HostPortEnd, err)
+			break
+		}
+		logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1)
+	}
+	if err != nil {
+		return err
+	}
+
+	// Save the host port (regardless it was or not specified in the binding)
+	switch netAddr := host.(type) {
+	case *net.TCPAddr:
+		bnd.HostPort = uint16(host.(*net.TCPAddr).Port)
+		break
+	case *net.UDPAddr:
+		bnd.HostPort = uint16(host.(*net.UDPAddr).Port)
+		break
+	case *sctp.SCTPAddr:
+		bnd.HostPort = uint16(host.(*sctp.SCTPAddr).Port)
+		break
+	default:
+		// For completeness
+		return ErrUnsupportedAddressType(fmt.Sprintf("%T", netAddr))
+	}
+	//Windows does not support host port ranges.
+	bnd.HostPortEnd = bnd.HostPort
+	return nil
+}
+
+// ReleasePorts releases ports specified in bindings from the portMapper
+func ReleasePorts(portMapper *portmapper.PortMapper, bindings []types.PortBinding) error {
+	var errorBuf bytes.Buffer
+
+	// Attempt to release all port bindings, do not stop on failure
+	for _, m := range bindings {
+		if err := releasePort(portMapper, m); err != nil {
+			errorBuf.WriteString(fmt.Sprintf("\ncould not release %v because of %v", m, err))
+		}
+	}
+
+	if errorBuf.Len() != 0 {
+		return errors.New(errorBuf.String())
+	}
+	return nil
+}
+
+func releasePort(portMapper *portmapper.PortMapper, bnd types.PortBinding) error {
+	// Construct the host side transport address
+	host, err := bnd.HostAddr()
+	if err != nil {
+		return err
+	}
+	return portMapper.Unmap(host)
+}
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
index c1cc61a..c8ab047 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
@@ -25,6 +25,7 @@
 	"github.com/docker/libnetwork/discoverapi"
 	"github.com/docker/libnetwork/driverapi"
 	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/portmapper"
 	"github.com/docker/libnetwork/types"
 	"github.com/sirupsen/logrus"
 )
@@ -88,11 +89,12 @@
 }
 
 type hnsNetwork struct {
-	id        string
-	created   bool
-	config    *networkConfiguration
-	endpoints map[string]*hnsEndpoint // key: endpoint id
-	driver    *driver                 // The network's driver
+	id         string
+	created    bool
+	config     *networkConfiguration
+	endpoints  map[string]*hnsEndpoint // key: endpoint id
+	driver     *driver                 // The network's driver
+	portMapper *portmapper.PortMapper
 	sync.Mutex
 }
 
@@ -252,10 +254,11 @@
 
 func (d *driver) createNetwork(config *networkConfiguration) error {
 	network := &hnsNetwork{
-		id:        config.ID,
-		endpoints: make(map[string]*hnsEndpoint),
-		config:    config,
-		driver:    d,
+		id:         config.ID,
+		endpoints:  make(map[string]*hnsEndpoint),
+		config:     config,
+		driver:     d,
+		portMapper: portmapper.New(""),
 	}
 
 	d.Lock()
@@ -610,7 +613,27 @@
 		endpointStruct.MacAddress = strings.Replace(macAddress.String(), ":", "-", -1)
 	}
 
-	endpointStruct.Policies, err = ConvertPortBindings(epConnectivity.PortBindings)
+	portMapping := epConnectivity.PortBindings
+
+	if n.config.Type == "l2bridge" || n.config.Type == "l2tunnel" {
+		ip := net.IPv4(0, 0, 0, 0)
+		if ifInfo.Address() != nil {
+			ip = ifInfo.Address().IP
+		}
+
+		portMapping, err = AllocatePorts(n.portMapper, portMapping, ip)
+		if err != nil {
+			return err
+		}
+
+		defer func() {
+			if err != nil {
+				ReleasePorts(n.portMapper, portMapping)
+			}
+		}()
+	}
+
+	endpointStruct.Policies, err = ConvertPortBindings(portMapping)
 	if err != nil {
 		return err
 	}
@@ -721,6 +744,10 @@
 		return err
 	}
 
+	if n.config.Type == "l2bridge" || n.config.Type == "l2tunnel" {
+		ReleasePorts(n.portMapper, ep.portMapping)
+	}
+
 	n.Lock()
 	delete(n.endpoints, eid)
 	n.Unlock()
diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator.go
index 191b478..9798d23 100644
--- a/vendor/github.com/docker/libnetwork/portallocator/portallocator.go
+++ b/vendor/github.com/docker/libnetwork/portallocator/portallocator.go
@@ -1,5 +1,3 @@
-// +build !windows
-
 package portallocator
 
 import (
diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator_windows.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator_windows.go
index f07ae88..98cae14 100644
--- a/vendor/github.com/docker/libnetwork/portallocator/portallocator_windows.go
+++ b/vendor/github.com/docker/libnetwork/portallocator/portallocator_windows.go
@@ -1 +1,10 @@
 package portallocator
+
+const (
+	StartPortRange = 60000
+	EndPortRange   = 65000
+)
+
+func getDynamicPortRange() (start int, end int, err error) {
+	return StartPortRange, EndPortRange, nil
+}
diff --git a/vendor/github.com/docker/libnetwork/portmapper/mapper.go b/vendor/github.com/docker/libnetwork/portmapper/mapper.go
index 7fa37b1..be4157b 100644
--- a/vendor/github.com/docker/libnetwork/portmapper/mapper.go
+++ b/vendor/github.com/docker/libnetwork/portmapper/mapper.go
@@ -4,9 +4,7 @@
 	"errors"
 	"fmt"
 	"net"
-	"sync"
 
-	"github.com/docker/libnetwork/iptables"
 	"github.com/docker/libnetwork/portallocator"
 	"github.com/ishidawataru/sctp"
 	"github.com/sirupsen/logrus"
@@ -32,20 +30,6 @@
 	ErrSCTPAddrNoIP = errors.New("sctp address does not contain any IP address")
 )
 
-// PortMapper manages the network address translation
-type PortMapper struct {
-	chain      *iptables.ChainInfo
-	bridgeName string
-
-	// udp:ip:port
-	currentMappings map[string]*mapping
-	lock            sync.Mutex
-
-	proxyPath string
-
-	Allocator *portallocator.PortAllocator
-}
-
 // New returns a new instance of PortMapper
 func New(proxyPath string) *PortMapper {
 	return NewWithPortAllocator(portallocator.Get(), proxyPath)
@@ -60,12 +44,6 @@
 	}
 }
 
-// SetIptablesChain sets the specified chain into portmapper
-func (pm *PortMapper) SetIptablesChain(c *iptables.ChainInfo, bridgeName string) {
-	pm.chain = c
-	pm.bridgeName = bridgeName
-}
-
 // Map maps the specified container transport address to the host's network address and transport port
 func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int, useProxy bool) (host net.Addr, err error) {
 	return pm.MapRange(container, hostIP, hostPort, hostPort, useProxy)
@@ -174,7 +152,7 @@
 
 	containerIP, containerPort := getIPAndPort(m.container)
 	if hostIP.To4() != nil {
-		if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
+		if err := pm.AppendForwardingTableEntry(m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil {
 			return nil, err
 		}
 	}
@@ -183,7 +161,7 @@
 		// need to undo the iptables rules before we return
 		m.userlandProxy.Stop()
 		if hostIP.To4() != nil {
-			pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
+			pm.DeleteForwardingTableEntry(m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort)
 			if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil {
 				return err
 			}
@@ -222,7 +200,7 @@
 
 	containerIP, containerPort := getIPAndPort(data.container)
 	hostIP, hostPort := getIPAndPort(data.host)
-	if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
+	if err := pm.DeleteForwardingTableEntry(data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
 		logrus.Errorf("Error on iptables delete: %s", err)
 	}
 
@@ -248,7 +226,7 @@
 	for _, data := range pm.currentMappings {
 		containerIP, containerPort := getIPAndPort(data.container)
 		hostIP, hostPort := getIPAndPort(data.host)
-		if err := pm.forward(iptables.Append, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
+		if err := pm.AppendForwardingTableEntry(data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil {
 			logrus.Errorf("Error on iptables add: %s", err)
 		}
 	}
@@ -285,10 +263,3 @@
 	}
 	return nil, 0
 }
-
-func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
-	if pm.chain == nil {
-		return nil
-	}
-	return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort, pm.bridgeName)
-}
diff --git a/vendor/github.com/docker/libnetwork/portmapper/mapper_linux.go b/vendor/github.com/docker/libnetwork/portmapper/mapper_linux.go
new file mode 100644
index 0000000..0e76c54
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/portmapper/mapper_linux.go
@@ -0,0 +1,46 @@
+package portmapper
+
+import (
+	"net"
+	"sync"
+
+	"github.com/docker/libnetwork/iptables"
+	"github.com/docker/libnetwork/portallocator"
+)
+
+// PortMapper manages the network address translation
+type PortMapper struct {
+	bridgeName string
+
+	// udp:ip:port
+	currentMappings map[string]*mapping
+	lock            sync.Mutex
+
+	proxyPath string
+
+	Allocator *portallocator.PortAllocator
+	chain     *iptables.ChainInfo
+}
+
+// SetIptablesChain sets the specified chain into portmapper
+func (pm *PortMapper) SetIptablesChain(c *iptables.ChainInfo, bridgeName string) {
+	pm.chain = c
+	pm.bridgeName = bridgeName
+}
+
+// AppendForwardingTableEntry adds a port mapping to the forwarding table
+func (pm *PortMapper) AppendForwardingTableEntry(proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
+	return pm.forward(iptables.Append, proto, sourceIP, sourcePort, containerIP, containerPort)
+}
+
+// DeleteForwardingTableEntry removes a port mapping from the forwarding table
+func (pm *PortMapper) DeleteForwardingTableEntry(proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
+	return pm.forward(iptables.Delete, proto, sourceIP, sourcePort, containerIP, containerPort)
+}
+
+func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
+	if pm.chain == nil {
+		return nil
+	}
+	return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort, pm.bridgeName)
+}
diff --git a/vendor/github.com/docker/libnetwork/portmapper/mapper_windows.go b/vendor/github.com/docker/libnetwork/portmapper/mapper_windows.go
new file mode 100644
index 0000000..89651e5
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/portmapper/mapper_windows.go
@@ -0,0 +1,31 @@
+package portmapper
+
+import (
+	"net"
+	"sync"
+
+	"github.com/docker/libnetwork/portallocator"
+)
+
+// PortMapper manages the network address translation
+type PortMapper struct {
+	bridgeName string
+
+	// udp:ip:port
+	currentMappings map[string]*mapping
+	lock            sync.Mutex
+
+	proxyPath string
+
+	Allocator *portallocator.PortAllocator
+}
+
+// AppendForwardingTableEntry adds a port mapping to the forwarding table
+func (pm *PortMapper) AppendForwardingTableEntry(proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
+	return nil
+}
+
+// DeleteForwardingTableEntry removes a port mapping from the forwarding table
+func (pm *PortMapper) DeleteForwardingTableEntry(proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error {
+	return nil
+}
diff --git a/vendor/github.com/docker/libnetwork/portmapper/proxy_windows.go b/vendor/github.com/docker/libnetwork/portmapper/proxy_windows.go
new file mode 100644
index 0000000..06a9e24
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/portmapper/proxy_windows.go
@@ -0,0 +1,10 @@
+package portmapper
+
+import (
+	"errors"
+	"net"
+)
+
+func newProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int, proxyPath string) (userlandProxy, error) {
+	return nil, errors.New("proxy is unsupported on windows")
+}
diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md
index 47da288..39605c0 100644
--- a/vendor/github.com/moby/buildkit/README.md
+++ b/vendor/github.com/moby/buildkit/README.md
@@ -38,7 +38,7 @@
 - [OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
 - [container build interface](https://github.com/containerbuilding/cbi)
 - [Knative Build Templates](https://github.com/knative/build-templates)
-- [boss](https://github.com/crosbymichael/boss)
+- [vab](https://github.com/stellarproject/vab)
 - [Rio](https://github.com/rancher/rio) (on roadmap)
 
 ### Quick start
@@ -100,7 +100,7 @@
 go run examples/buildkit0/buildkit.go | buildctl build
 ```
 
-`buildctl build` will show interactive progress bar by default while the build job is running. It will also show you the path to the trace file that contains all information about the timing of the individual steps and logs.
+`buildctl build` will show interactive progress bar by default while the build job is running. If the path to the trace file is specified, the trace file generated will contain all information about the timing of the individual steps and logs.
 
 Different versions of the example scripts show different ways of describing the build definition for this project to show the capabilities of the library. New versions have been added when new features have become available.
 
@@ -218,8 +218,8 @@
 #### To/From local filesystem
 
 ```
-buildctl build ... --export-cache type=local,src=path/to/input-dir
-buildctl build ... --import-cache type=local,dest=path/to/output-dir
+buildctl build ... --export-cache type=local,dest=path/to/output-dir
+buildctl build ... --import-cache type=local,src=path/to/input-dir
 ```
 
 The directory layout conforms to OCI Image Spec v1.0.
@@ -228,11 +228,11 @@
 * `mode=min` (default): only export layers for the resulting image
 * `mode=max`: export all the layers of all intermediate steps
 * `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
-* `src=path/to/output-dir`: directory for `local` cache exporter
+* `dest=path/to/output-dir`: directory for `local` cache exporter
 
 #### `--import-cache` options
 * `ref=docker.io/user/image:tag`: reference for `registry` cache importer
-* `dest=path/to/input-dir`: directory for `local` cache importer
+* `src=path/to/input-dir`: directory for `local` cache importer
 * `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer. Defaults to the digest of "latest" tag in `index.json`
 
 ### Other
@@ -271,6 +271,18 @@
 The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit).
 Run `make images` to build the images as `moby/buildkit:local` and `moby/buildkit:local-rootless`.
 
+#### Connection helpers
+
+If you are running `moby/buildkit:master` or `moby/buildkit:master-rootless` as a Docker/Kubernetes container, you can use special `BUILDKIT_HOST` URL for connecting to the BuildKit daemon in the container:
+
+```
+export BUILDKIT_HOST=docker://<container>
+```
+
+```
+export BUILDKIT_HOST=kube-pod://<pod>
+```
+
 ### Opentracing support
 
 BuildKit supports opentracing for buildkitd gRPC API and buildctl commands. To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set `JAEGER_TRACE` environment variable to the collection address.
diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
new file mode 100644
index 0000000..f66d5b4
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
@@ -0,0 +1,83 @@
+package local
+
+import (
+	"context"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/moby/buildkit/cache/remotecache"
+	"github.com/moby/buildkit/session"
+	sessioncontent "github.com/moby/buildkit/session/content"
+	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+const (
+	attrDigest           = "digest"
+	attrSrc              = "src"
+	attrDest             = "dest"
+	contentStoreIDPrefix = "local:"
+)
+
+// ResolveCacheExporterFunc for "local" cache exporter.
+func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
+	return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) {
+		store := attrs[attrDest]
+		if store == "" {
+			return nil, errors.New("local cache exporter requires dest")
+		}
+		csID := contentStoreIDPrefix + store
+		cs, err := getContentStore(ctx, sm, csID)
+		if err != nil {
+			return nil, err
+		}
+		return remotecache.NewExporter(cs), nil
+	}
+}
+
+// ResolveCacheImporterFunc for "local" cache importer.
+func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc {
+	return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
+		dgstStr := attrs[attrDigest]
+		if dgstStr == "" {
+			return nil, specs.Descriptor{}, errors.New("local cache importer requires explicit digest")
+		}
+		dgst := digest.Digest(dgstStr)
+		store := attrs[attrSrc]
+		if store == "" {
+			return nil, specs.Descriptor{}, errors.New("local cache importer requires src")
+		}
+		csID := contentStoreIDPrefix + store
+		cs, err := getContentStore(ctx, sm, csID)
+		if err != nil {
+			return nil, specs.Descriptor{}, err
+		}
+		info, err := cs.Info(ctx, dgst)
+		if err != nil {
+			return nil, specs.Descriptor{}, err
+		}
+		desc := specs.Descriptor{
+			// MediaType is typically MediaTypeDockerSchema2ManifestList,
+			// but we leave it empty until we get correct support for local index.json
+			Digest: dgst,
+			Size:   info.Size,
+		}
+		return remotecache.NewImporter(cs), desc, nil
+	}
+}
+
+func getContentStore(ctx context.Context, sm *session.Manager, storeID string) (content.Store, error) {
+	sessionID := session.FromContext(ctx)
+	if sessionID == "" {
+		return nil, errors.New("local cache exporter/importer requires session")
+	}
+	timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	caller, err := sm.Get(timeoutCtx, sessionID)
+	if err != nil {
+		return nil, err
+	}
+	return sessioncontent.NewCallerStore(caller, storeID), nil
+}
diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go
index d772eaa..830c018 100644
--- a/vendor/github.com/moby/buildkit/client/solve.go
+++ b/vendor/github.com/moby/buildkit/client/solve.go
@@ -30,15 +30,17 @@
 )
 
 type SolveOpt struct {
-	Exports             []ExportEntry
-	LocalDirs           map[string]string
-	SharedKey           string
-	Frontend            string
-	FrontendAttrs       map[string]string
-	CacheExports        []CacheOptionsEntry
-	CacheImports        []CacheOptionsEntry
-	Session             []session.Attachable
-	AllowedEntitlements []entitlements.Entitlement
+	Exports               []ExportEntry
+	LocalDirs             map[string]string
+	SharedKey             string
+	Frontend              string
+	FrontendAttrs         map[string]string
+	CacheExports          []CacheOptionsEntry
+	CacheImports          []CacheOptionsEntry
+	Session               []session.Attachable
+	AllowedEntitlements   []entitlements.Entitlement
+	SharedSession         *session.Session // TODO: refactor to better session syncing
+	SessionPreInitialized bool             // TODO: refactor to better session syncing
 }
 
 type ExportEntry struct {
@@ -94,50 +96,15 @@
 		statusContext = opentracing.ContextWithSpan(statusContext, span)
 	}
 
-	s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
-	if err != nil {
-		return nil, errors.Wrap(err, "failed to create session")
-	}
+	s := opt.SharedSession
 
-	if len(syncedDirs) > 0 {
-		s.Allow(filesync.NewFSSyncProvider(syncedDirs))
-	}
-
-	for _, a := range opt.Session {
-		s.Allow(a)
-	}
-
-	var ex ExportEntry
-	if len(opt.Exports) > 1 {
-		return nil, errors.New("currently only single Exports can be specified")
-	}
-	if len(opt.Exports) == 1 {
-		ex = opt.Exports[0]
-	}
-
-	switch ex.Type {
-	case ExporterLocal:
-		if ex.Output != nil {
-			return nil, errors.New("output file writer is not supported by local exporter")
+	if s == nil {
+		if opt.SessionPreInitialized {
+			return nil, errors.Errorf("no session provided for preinitialized option")
 		}
-		if ex.OutputDir == "" {
-			return nil, errors.New("output directory is required for local exporter")
-		}
-		s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
-	case ExporterOCI, ExporterDocker, ExporterTar:
-		if ex.OutputDir != "" {
-			return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
-		}
-		if ex.Output == nil {
-			return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
-		}
-		s.Allow(filesync.NewFSSyncTarget(ex.Output))
-	default:
-		if ex.Output != nil {
-			return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
-		}
-		if ex.OutputDir != "" {
-			return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+		s, err = session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to create session")
 		}
 	}
 
@@ -145,17 +112,64 @@
 	if err != nil {
 		return nil, err
 	}
-	if len(cacheOpt.contentStores) > 0 {
-		s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
+
+	var ex ExportEntry
+
+	if !opt.SessionPreInitialized {
+		if len(syncedDirs) > 0 {
+			s.Allow(filesync.NewFSSyncProvider(syncedDirs))
+		}
+
+		for _, a := range opt.Session {
+			s.Allow(a)
+		}
+
+		if len(opt.Exports) > 1 {
+			return nil, errors.New("currently only single Exports can be specified")
+		}
+		if len(opt.Exports) == 1 {
+			ex = opt.Exports[0]
+		}
+
+		switch ex.Type {
+		case ExporterLocal:
+			if ex.Output != nil {
+				return nil, errors.New("output file writer is not supported by local exporter")
+			}
+			if ex.OutputDir == "" {
+				return nil, errors.New("output directory is required for local exporter")
+			}
+			s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
+		case ExporterOCI, ExporterDocker, ExporterTar:
+			if ex.OutputDir != "" {
+				return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+			}
+			if ex.Output == nil {
+				return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
+			}
+			s.Allow(filesync.NewFSSyncTarget(ex.Output))
+		default:
+			if ex.Output != nil {
+				return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
+			}
+			if ex.OutputDir != "" {
+				return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+			}
+		}
+
+		if len(cacheOpt.contentStores) > 0 {
+			s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
+		}
+
+		eg.Go(func() error {
+			return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
+		})
 	}
+
 	for k, v := range cacheOpt.frontendAttrs {
 		opt.FrontendAttrs[k] = v
 	}
 
-	eg.Go(func() error {
-		return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
-	})
-
 	solveCtx, cancelSolve := context.WithCancel(ctx)
 	var res *SolveResponse
 	eg.Go(func() error {
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
index 3ace6da..76777ee 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
@@ -20,6 +20,7 @@
 	"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/apicaps"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"golang.org/x/sync/errgroup"
@@ -61,8 +62,10 @@
 		localNameContext = v
 	}
 
+	forceLocalDockerfile := false
 	localNameDockerfile := DefaultLocalNameDockerfile
 	if v, ok := opts[keyNameDockerfile]; ok {
+		forceLocalDockerfile = true
 		localNameDockerfile = v
 	}
 
@@ -118,11 +121,14 @@
 		llb.SharedKeyHint(localNameDockerfile),
 		dockerfile2llb.WithInternalName(name),
 	)
+
 	var buildContext *llb.State
 	isScratchContext := false
 	if st, ok := detectGitContext(opts[localNameContext]); ok {
-		src = *st
-		buildContext = &src
+		if !forceLocalDockerfile {
+			src = *st
+		}
+		buildContext = st
 	} else if httpPrefix.MatchString(opts[localNameContext]) {
 		httpContext := llb.HTTP(opts[localNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context"))
 		def, err := httpContext.Marshal(marshalOpts...)
@@ -151,19 +157,35 @@
 			return nil, errors.Errorf("failed to read downloaded context")
 		}
 		if isArchive(dt) {
-			copyImage := opts[keyOverrideCopyImage]
-			if copyImage == "" {
-				copyImage = dockerfile2llb.DefaultCopyImage
+			fileop := useFileOp(opts, &caps)
+			if fileop {
+				bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{
+					AttemptUnpack: true,
+				}))
+				if !forceLocalDockerfile {
+					src = bc
+				}
+				buildContext = &bc
+			} else {
+				copyImage := opts[keyOverrideCopyImage]
+				if copyImage == "" {
+					copyImage = dockerfile2llb.DefaultCopyImage
+				}
+				unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
+					Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context"))
+				unpack.AddMount("/src", httpContext, llb.Readonly)
+				bc := unpack.AddMount("/out", llb.Scratch())
+				if !forceLocalDockerfile {
+					src = bc
+				}
+				buildContext = &bc
 			}
-			unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
-				Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context"))
-			unpack.AddMount("/src", httpContext, llb.Readonly)
-			src = unpack.AddMount("/out", llb.Scratch())
-			buildContext = &src
 		} else {
 			filename = "context"
-			src = httpContext
-			buildContext = &src
+			if !forceLocalDockerfile {
+				src = httpContext
+			}
+			buildContext = &httpContext
 			isScratchContext = true
 		}
 	}
@@ -529,3 +551,13 @@
 		return 0, errors.Errorf("invalid netmode %s", v)
 	}
 }
+
+func useFileOp(args map[string]string, caps *apicaps.CapSet) bool {
+	enabled := true
+	if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok {
+		if b, err := strconv.ParseBool(v); err == nil {
+			enabled = !b
+		}
+	}
+	return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
index b63e787..0527923 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
@@ -390,6 +390,7 @@
 	if !platformOpt.implicitTarget {
 		target.image.OS = platformOpt.targetPlatform.OS
 		target.image.Architecture = platformOpt.targetPlatform.Architecture
+		target.image.Variant = platformOpt.targetPlatform.Variant
 	}
 
 	return &st, &target.image, nil
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
index e83e58b..55e9add 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
@@ -50,6 +50,9 @@
 
 	// Config defines the execution parameters which should be used as a base when running a container using the image.
 	Config ImageConfig `json:"config,omitempty"`
+
+	// Variant defines platform variant. To be added to OCI.
+	Variant string `json:"variant,omitempty"`
 }
 
 func clone(src Image) Image {
@@ -67,6 +70,7 @@
 			Architecture: platform.Architecture,
 			OS:           platform.OS,
 		},
+		Variant: platform.Variant,
 	}
 	img.RootFS.Type = "layers"
 	img.Config.WorkingDir = "/"
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
index 28b34f6..ed96d7e 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
@@ -200,6 +200,11 @@
 
 // Expand variables
 func (c *CopyCommand) Expand(expander SingleWordExpander) error {
+	expandedChown, err := expander(c.Chown)
+	if err != nil {
+		return err
+	}
+	c.Chown = expandedChown
 	return expandSliceInPlace(c.SourcesAndDest, expander)
 }
 
diff --git a/vendor/github.com/moby/buildkit/session/upload/generate.go b/vendor/github.com/moby/buildkit/session/upload/generate.go
new file mode 100644
index 0000000..c498a92
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/generate.go
@@ -0,0 +1,3 @@
+package upload
+
+//go:generate protoc --gogoslick_out=plugins=grpc:. upload.proto
diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.go b/vendor/github.com/moby/buildkit/session/upload/upload.go
new file mode 100644
index 0000000..8d69bde
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/upload.go
@@ -0,0 +1,55 @@
+package upload
+
+import (
+	"context"
+	io "io"
+	"net/url"
+
+	"github.com/moby/buildkit/session"
+	"google.golang.org/grpc/metadata"
+)
+
+const (
+	keyPath = "urlpath"
+	keyHost = "urlhost"
+)
+
+func New(ctx context.Context, c session.Caller, url *url.URL) (*Upload, error) {
+	opts := map[string][]string{
+		keyPath: {url.Path},
+		keyHost: {url.Host},
+	}
+
+	client := NewUploadClient(c.Conn())
+
+	ctx = metadata.NewOutgoingContext(ctx, opts)
+
+	cc, err := client.Pull(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Upload{cc: cc}, nil
+}
+
+type Upload struct {
+	cc Upload_PullClient
+}
+
+func (u *Upload) WriteTo(w io.Writer) (int, error) {
+	n := 0
+	for {
+		var bm BytesMessage
+		if err := u.cc.RecvMsg(&bm); err != nil {
+			if err == io.EOF {
+				return n, nil
+			}
+			return n, err
+		}
+		nn, err := w.Write(bm.Data)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+	}
+}
diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.pb.go b/vendor/github.com/moby/buildkit/session/upload/upload.pb.go
new file mode 100644
index 0000000..a41928a
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/upload.pb.go
@@ -0,0 +1,506 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: upload.proto
+
+package upload
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import bytes "bytes"
+
+import strings "strings"
+import reflect "reflect"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// BytesMessage contains a chunk of byte data
+type BytesMessage struct {
+	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *BytesMessage) Reset()      { *m = BytesMessage{} }
+func (*BytesMessage) ProtoMessage() {}
+func (*BytesMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_upload_0898dc79ebc86e9c, []int{0}
+}
+func (m *BytesMessage) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (dst *BytesMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BytesMessage.Merge(dst, src)
+}
+func (m *BytesMessage) XXX_Size() int {
+	return m.Size()
+}
+func (m *BytesMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_BytesMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesMessage proto.InternalMessageInfo
+
+func (m *BytesMessage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*BytesMessage)(nil), "moby.upload.v1.BytesMessage")
+}
+func (this *BytesMessage) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*BytesMessage)
+	if !ok {
+		that2, ok := that.(BytesMessage)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if !bytes.Equal(this.Data, that1.Data) {
+		return false
+	}
+	return true
+}
+func (this *BytesMessage) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&upload.BytesMessage{")
+	s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func valueToGoStringUpload(v interface{}, typ string) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// UploadClient is the client API for Upload service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type UploadClient interface {
+	Pull(ctx context.Context, opts ...grpc.CallOption) (Upload_PullClient, error)
+}
+
+type uploadClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewUploadClient(cc *grpc.ClientConn) UploadClient {
+	return &uploadClient{cc}
+}
+
+func (c *uploadClient) Pull(ctx context.Context, opts ...grpc.CallOption) (Upload_PullClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Upload_serviceDesc.Streams[0], "/moby.upload.v1.Upload/Pull", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &uploadPullClient{stream}
+	return x, nil
+}
+
+type Upload_PullClient interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ClientStream
+}
+
+type uploadPullClient struct {
+	grpc.ClientStream
+}
+
+func (x *uploadPullClient) Send(m *BytesMessage) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *uploadPullClient) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// UploadServer is the server API for Upload service.
+type UploadServer interface {
+	Pull(Upload_PullServer) error
+}
+
+func RegisterUploadServer(s *grpc.Server, srv UploadServer) {
+	s.RegisterService(&_Upload_serviceDesc, srv)
+}
+
+func _Upload_Pull_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(UploadServer).Pull(&uploadPullServer{stream})
+}
+
+type Upload_PullServer interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ServerStream
+}
+
+type uploadPullServer struct {
+	grpc.ServerStream
+}
+
+func (x *uploadPullServer) Send(m *BytesMessage) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *uploadPullServer) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _Upload_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "moby.upload.v1.Upload",
+	HandlerType: (*UploadServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Pull",
+			Handler:       _Upload_Pull_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "upload.proto",
+}
+
+func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Data) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintUpload(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
+	}
+	return i, nil
+}
+
+func encodeVarintUpload(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *BytesMessage) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Data)
+	if l > 0 {
+		n += 1 + l + sovUpload(uint64(l))
+	}
+	return n
+}
+
+func sovUpload(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozUpload(x uint64) (n int) {
+	return sovUpload(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *BytesMessage) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BytesMessage{`,
+		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringUpload(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *BytesMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowUpload
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowUpload
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthUpload
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipUpload(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthUpload
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipUpload(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowUpload
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowUpload
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowUpload
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthUpload
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowUpload
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipUpload(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthUpload = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowUpload   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("upload.proto", fileDescriptor_upload_0898dc79ebc86e9c) }
+
+var fileDescriptor_upload_0898dc79ebc86e9c = []byte{
+	// 179 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x2d, 0xc8, 0xc9,
+	0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xcb, 0xcd, 0x4f, 0xaa, 0xd4, 0x83,
+	0x0a, 0x95, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, 0x16, 0x17,
+	0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
+	0xf0, 0x04, 0x81, 0xd9, 0x46, 0x01, 0x5c, 0x6c, 0xa1, 0x60, 0x0d, 0x42, 0x6e, 0x5c, 0x2c, 0x01,
+	0xa5, 0x39, 0x39, 0x42, 0x32, 0x7a, 0xa8, 0xc6, 0xe8, 0x21, 0x9b, 0x21, 0x85, 0x57, 0x56, 0x83,
+	0xd1, 0x80, 0xd1, 0xc9, 0xe6, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, 0x94,
+	0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e,
+	0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x87, 0x47, 0x72, 0x8c, 0x13,
+	0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x14, 0x1b, 0xc4, 0xc4,
+	0x24, 0x36, 0xb0, 0x57, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x12, 0xf2, 0xfc, 0xb4, 0xda,
+	0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.proto b/vendor/github.com/moby/buildkit/session/upload/upload.proto
new file mode 100644
index 0000000..ce254ba
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/upload.proto
@@ -0,0 +1,14 @@
+syntax = "proto3";
+
+package moby.upload.v1;
+
+option go_package = "upload";
+
+service Upload {
+	rpc Pull(stream BytesMessage) returns (stream BytesMessage);
+}
+
+// BytesMessage contains a chunk of byte data
+message BytesMessage{
+	bytes data = 1;
+}
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
index 8002dcd..137c8ac 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
@@ -176,14 +176,16 @@
 	return lcm.id
 }
 func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) {
-	if err := lcm.wait(); err != nil {
-		return nil, err
+	lcm.wait()
+	if lcm.main == nil {
+		return nil, nil
 	}
 	return lcm.main.Query(inp, inputIndex, dgst, outputIndex)
 }
 func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) {
-	if err := lcm.wait(); err != nil {
-		return nil, err
+	lcm.wait()
+	if lcm.main == nil {
+		return nil, nil
 	}
 	return lcm.main.Records(ck)
 }
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
index 2404035..9ae1163 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
@@ -14,7 +14,6 @@
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend/gateway"
-	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/util/entitlements"
@@ -306,7 +305,7 @@
 
 func inVertexContext(ctx context.Context, name, id string, f func(ctx context.Context) error) error {
 	if id == "" {
-		id = identity.NewID()
+		id = name
 	}
 	v := client.Vertex{
 		Digest: digest.FromBytes([]byte(id)),
diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go
index c9fe8f5..7394a03 100644
--- a/vendor/github.com/moby/buildkit/source/http/httpsource.go
+++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go
@@ -35,10 +35,10 @@
 }
 
 type httpSource struct {
-	md     *metadata.Store
-	cache  cache.Accessor
-	locker *locker.Locker
-	client *http.Client
+	md        *metadata.Store
+	cache     cache.Accessor
+	locker    *locker.Locker
+	transport http.RoundTripper
 }
 
 func NewSource(opt Opt) (source.Source, error) {
@@ -47,12 +47,10 @@
 		transport = tracing.DefaultTransport
 	}
 	hs := &httpSource{
-		md:     opt.MetadataStore,
-		cache:  opt.CacheAccessor,
-		locker: locker.New(),
-		client: &http.Client{
-			Transport: transport,
-		},
+		md:        opt.MetadataStore,
+		cache:     opt.CacheAccessor,
+		locker:    locker.New(),
+		transport: transport,
 	}
 	return hs, nil
 }
@@ -66,17 +64,21 @@
 	src      source.HttpIdentifier
 	refID    string
 	cacheKey digest.Digest
+	client   *http.Client
 }
 
-func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, _ *session.Manager) (source.SourceInstance, error) {
+func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
 	httpIdentifier, ok := id.(*source.HttpIdentifier)
 	if !ok {
 		return nil, errors.Errorf("invalid http identifier %v", id)
 	}
 
+	sessionID := session.FromContext(ctx)
+
 	return &httpSourceHandler{
 		src:        *httpIdentifier,
 		httpSource: hs,
+		client:     &http.Client{Transport: newTransport(hs.transport, sm, sessionID)},
 	}, nil
 }
 
diff --git a/vendor/github.com/moby/buildkit/source/http/transport.go b/vendor/github.com/moby/buildkit/source/http/transport.go
new file mode 100644
index 0000000..0ce89b7
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/source/http/transport.go
@@ -0,0 +1,60 @@
+package http
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"time"
+
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/session/upload"
+	"github.com/pkg/errors"
+)
+
+func newTransport(rt http.RoundTripper, sm *session.Manager, id string) http.RoundTripper {
+	return &sessionHandler{rt: rt, sm: sm, id: id}
+}
+
+type sessionHandler struct {
+	sm *session.Manager
+	rt http.RoundTripper
+	id string
+}
+
+func (h *sessionHandler) RoundTrip(req *http.Request) (*http.Response, error) {
+	if req.URL.Host != "buildkit-session" {
+		return h.rt.RoundTrip(req)
+	}
+
+	if req.Method != "GET" {
+		return nil, errors.Errorf("invalid request")
+	}
+
+	timeoutCtx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
+	defer cancel()
+
+	caller, err := h.sm.Get(timeoutCtx, h.id)
+	if err != nil {
+		return nil, err
+	}
+
+	up, err := upload.New(context.TODO(), caller, req.URL)
+	if err != nil {
+		return nil, err
+	}
+
+	pr, pw := io.Pipe()
+	go func() {
+		_, err := up.WriteTo(pw)
+		pw.CloseWithError(err)
+	}()
+
+	resp := &http.Response{
+		Status:        "200 OK",
+		StatusCode:    200,
+		Body:          pr,
+		ContentLength: -1,
+	}
+
+	return resp, nil
+}
diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
index 3aedbf7..3a73054 100644
--- a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
+++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
@@ -13,7 +13,7 @@
 
 func SupportedPlatforms() []string {
 	once.Do(func() {
-		def := platforms.DefaultString()
+		def := defaultPlatform()
 		arr = append(arr, def)
 		if p := "linux/amd64"; def != p && amd64Supported() == nil {
 			arr = append(arr, p)
@@ -34,7 +34,7 @@
 //the end user could fix the issue based on those warning, and thus no need to drop
 //the platform from the candidates.
 func WarnIfUnsupported(pfs []string) {
-	def := platforms.DefaultString()
+	def := defaultPlatform()
 	for _, p := range pfs {
 		if p != def {
 			if p == "linux/amd64" {
@@ -56,6 +56,10 @@
 	}
 }
 
+func defaultPlatform() string {
+	return platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
+}
+
 func printPlatfromWarning(p string, err error) {
 	if strings.Contains(err.Error(), "exec format error") {
 		logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p)