Merge pull request #39132 from tonistiigi/update-buildkit

vendor: update buildkit to 8818c67c
diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go
index 3f13d06..37b6c24 100644
--- a/builder/builder-next/controller.go
+++ b/builder/builder-next/controller.go
@@ -21,6 +21,7 @@
 	"github.com/moby/buildkit/cache/metadata"
 	"github.com/moby/buildkit/cache/remotecache"
 	inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
+	localremotecache "github.com/moby/buildkit/cache/remotecache/local"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/control"
 	"github.com/moby/buildkit/frontend"
@@ -186,6 +187,7 @@
 		CacheKeyStorage:  cacheStorage,
 		ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{
 			"registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.ResolverOpt, dist.ReferenceStore, dist.ImageStore),
+			"local":    localremotecache.ResolveCacheImporterFunc(opt.SessionManager),
 		},
 		ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
 			"inline": inlineremotecache.ResolveCacheExporterFunc(),
diff --git a/vendor.conf b/vendor.conf
index 6a70d92..06c5666 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -27,7 +27,7 @@
 golang.org/x/sync                                   e225da77a7e68af35c70ccbf71af2b83e6acac3c
 
 # buildkit
-github.com/moby/buildkit                            b3028967ae6259c9a31c1a1deeccd30fe3469cce
+github.com/moby/buildkit                            8818c67cff663befa7b70f21454e340f71616581
 github.com/tonistiigi/fsutil                        3bbb99cdbd76619ab717299830c60f6f2a533a6b
 github.com/grpc-ecosystem/grpc-opentracing          8e809c8a86450a29b90dcc9efbf062d0fe6d9746
 github.com/opentracing/opentracing-go               1361b9cd60be79c4c3a7fa9841b3c132e40066a7
diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md
index 47da288..39605c0 100644
--- a/vendor/github.com/moby/buildkit/README.md
+++ b/vendor/github.com/moby/buildkit/README.md
@@ -38,7 +38,7 @@
 - [OpenFaaS Cloud](https://github.com/openfaas/openfaas-cloud)
 - [container build interface](https://github.com/containerbuilding/cbi)
 - [Knative Build Templates](https://github.com/knative/build-templates)
-- [boss](https://github.com/crosbymichael/boss)
+- [vab](https://github.com/stellarproject/vab)
 - [Rio](https://github.com/rancher/rio) (on roadmap)
 
 ### Quick start
@@ -100,7 +100,7 @@
 go run examples/buildkit0/buildkit.go | buildctl build
 ```
 
-`buildctl build` will show interactive progress bar by default while the build job is running. It will also show you the path to the trace file that contains all information about the timing of the individual steps and logs.
+`buildctl build` will show interactive progress bar by default while the build job is running. If the path to the trace file is specified, the trace file generated will contain all information about the timing of the individual steps and logs.
 
 Different versions of the example scripts show different ways of describing the build definition for this project to show the capabilities of the library. New versions have been added when new features have become available.
 
@@ -218,8 +218,8 @@
 #### To/From local filesystem
 
 ```
-buildctl build ... --export-cache type=local,src=path/to/input-dir
-buildctl build ... --import-cache type=local,dest=path/to/output-dir
+buildctl build ... --export-cache type=local,dest=path/to/output-dir
+buildctl build ... --import-cache type=local,src=path/to/input-dir
 ```
 
 The directory layout conforms to OCI Image Spec v1.0.
@@ -228,11 +228,11 @@
 * `mode=min` (default): only export layers for the resulting image
 * `mode=max`: export all the layers of all intermediate steps
 * `ref=docker.io/user/image:tag`: reference for `registry` cache exporter
-* `src=path/to/output-dir`: directory for `local` cache exporter
+* `dest=path/to/output-dir`: directory for `local` cache exporter
 
 #### `--import-cache` options
 * `ref=docker.io/user/image:tag`: reference for `registry` cache importer
-* `dest=path/to/input-dir`: directory for `local` cache importer
+* `src=path/to/input-dir`: directory for `local` cache importer
 * `digest=sha256:deadbeef`: digest of the manifest list to import for `local` cache importer. Defaults to the digest of "latest" tag in `index.json`
 
 ### Other
@@ -271,6 +271,18 @@
 The images can be also built locally using `./hack/dockerfiles/test.Dockerfile` (or `./hack/dockerfiles/test.buildkit.Dockerfile` if you already have BuildKit).
 Run `make images` to build the images as `moby/buildkit:local` and `moby/buildkit:local-rootless`.
 
+#### Connection helpers
+
+If you are running `moby/buildkit:master` or `moby/buildkit:master-rootless` as a Docker/Kubernetes container, you can use special `BUILDKIT_HOST` URL for connecting to the BuildKit daemon in the container:
+
+```
+export BUILDKIT_HOST=docker://<container>
+```
+
+```
+export BUILDKIT_HOST=kube-pod://<pod>
+```
+
 ### Opentracing support
 
 BuildKit supports opentracing for buildkitd gRPC API and buildctl commands. To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set `JAEGER_TRACE` environment variable to the collection address.
diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
new file mode 100644
index 0000000..f66d5b4
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
@@ -0,0 +1,83 @@
+package local
+
+import (
+	"context"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/moby/buildkit/cache/remotecache"
+	"github.com/moby/buildkit/session"
+	sessioncontent "github.com/moby/buildkit/session/content"
+	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+const (
+	attrDigest           = "digest"
+	attrSrc              = "src"
+	attrDest             = "dest"
+	contentStoreIDPrefix = "local:"
+)
+
+// ResolveCacheExporterFunc for "local" cache exporter.
+func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
+	return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) {
+		store := attrs[attrDest]
+		if store == "" {
+			return nil, errors.New("local cache exporter requires dest")
+		}
+		csID := contentStoreIDPrefix + store
+		cs, err := getContentStore(ctx, sm, csID)
+		if err != nil {
+			return nil, err
+		}
+		return remotecache.NewExporter(cs), nil
+	}
+}
+
+// ResolveCacheImporterFunc for "local" cache importer.
+func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc {
+	return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
+		dgstStr := attrs[attrDigest]
+		if dgstStr == "" {
+			return nil, specs.Descriptor{}, errors.New("local cache importer requires explicit digest")
+		}
+		dgst := digest.Digest(dgstStr)
+		store := attrs[attrSrc]
+		if store == "" {
+			return nil, specs.Descriptor{}, errors.New("local cache importer requires src")
+		}
+		csID := contentStoreIDPrefix + store
+		cs, err := getContentStore(ctx, sm, csID)
+		if err != nil {
+			return nil, specs.Descriptor{}, err
+		}
+		info, err := cs.Info(ctx, dgst)
+		if err != nil {
+			return nil, specs.Descriptor{}, err
+		}
+		desc := specs.Descriptor{
+			// MediaType is typically MediaTypeDockerSchema2ManifestList,
+			// but we leave it empty until we get correct support for local index.json
+			Digest: dgst,
+			Size:   info.Size,
+		}
+		return remotecache.NewImporter(cs), desc, nil
+	}
+}
+
+func getContentStore(ctx context.Context, sm *session.Manager, storeID string) (content.Store, error) {
+	sessionID := session.FromContext(ctx)
+	if sessionID == "" {
+		return nil, errors.New("local cache exporter/importer requires session")
+	}
+	timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	caller, err := sm.Get(timeoutCtx, sessionID)
+	if err != nil {
+		return nil, err
+	}
+	return sessioncontent.NewCallerStore(caller, storeID), nil
+}
diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go
index d772eaa..830c018 100644
--- a/vendor/github.com/moby/buildkit/client/solve.go
+++ b/vendor/github.com/moby/buildkit/client/solve.go
@@ -30,15 +30,17 @@
 )
 
 type SolveOpt struct {
-	Exports             []ExportEntry
-	LocalDirs           map[string]string
-	SharedKey           string
-	Frontend            string
-	FrontendAttrs       map[string]string
-	CacheExports        []CacheOptionsEntry
-	CacheImports        []CacheOptionsEntry
-	Session             []session.Attachable
-	AllowedEntitlements []entitlements.Entitlement
+	Exports               []ExportEntry
+	LocalDirs             map[string]string
+	SharedKey             string
+	Frontend              string
+	FrontendAttrs         map[string]string
+	CacheExports          []CacheOptionsEntry
+	CacheImports          []CacheOptionsEntry
+	Session               []session.Attachable
+	AllowedEntitlements   []entitlements.Entitlement
+	SharedSession         *session.Session // TODO: refactor to better session syncing
+	SessionPreInitialized bool             // TODO: refactor to better session syncing
 }
 
 type ExportEntry struct {
@@ -94,50 +96,15 @@
 		statusContext = opentracing.ContextWithSpan(statusContext, span)
 	}
 
-	s, err := session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
-	if err != nil {
-		return nil, errors.Wrap(err, "failed to create session")
-	}
+	s := opt.SharedSession
 
-	if len(syncedDirs) > 0 {
-		s.Allow(filesync.NewFSSyncProvider(syncedDirs))
-	}
-
-	for _, a := range opt.Session {
-		s.Allow(a)
-	}
-
-	var ex ExportEntry
-	if len(opt.Exports) > 1 {
-		return nil, errors.New("currently only single Exports can be specified")
-	}
-	if len(opt.Exports) == 1 {
-		ex = opt.Exports[0]
-	}
-
-	switch ex.Type {
-	case ExporterLocal:
-		if ex.Output != nil {
-			return nil, errors.New("output file writer is not supported by local exporter")
+	if s == nil {
+		if opt.SessionPreInitialized {
+			return nil, errors.Errorf("no session provided for preinitialized option")
 		}
-		if ex.OutputDir == "" {
-			return nil, errors.New("output directory is required for local exporter")
-		}
-		s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
-	case ExporterOCI, ExporterDocker, ExporterTar:
-		if ex.OutputDir != "" {
-			return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
-		}
-		if ex.Output == nil {
-			return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
-		}
-		s.Allow(filesync.NewFSSyncTarget(ex.Output))
-	default:
-		if ex.Output != nil {
-			return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
-		}
-		if ex.OutputDir != "" {
-			return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+		s, err = session.NewSession(statusContext, defaultSessionName(), opt.SharedKey)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to create session")
 		}
 	}
 
@@ -145,17 +112,64 @@
 	if err != nil {
 		return nil, err
 	}
-	if len(cacheOpt.contentStores) > 0 {
-		s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
+
+	var ex ExportEntry
+
+	if !opt.SessionPreInitialized {
+		if len(syncedDirs) > 0 {
+			s.Allow(filesync.NewFSSyncProvider(syncedDirs))
+		}
+
+		for _, a := range opt.Session {
+			s.Allow(a)
+		}
+
+		if len(opt.Exports) > 1 {
+			return nil, errors.New("currently only single Exports can be specified")
+		}
+		if len(opt.Exports) == 1 {
+			ex = opt.Exports[0]
+		}
+
+		switch ex.Type {
+		case ExporterLocal:
+			if ex.Output != nil {
+				return nil, errors.New("output file writer is not supported by local exporter")
+			}
+			if ex.OutputDir == "" {
+				return nil, errors.New("output directory is required for local exporter")
+			}
+			s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
+		case ExporterOCI, ExporterDocker, ExporterTar:
+			if ex.OutputDir != "" {
+				return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+			}
+			if ex.Output == nil {
+				return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
+			}
+			s.Allow(filesync.NewFSSyncTarget(ex.Output))
+		default:
+			if ex.Output != nil {
+				return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
+			}
+			if ex.OutputDir != "" {
+				return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+			}
+		}
+
+		if len(cacheOpt.contentStores) > 0 {
+			s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
+		}
+
+		eg.Go(func() error {
+			return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
+		})
 	}
+
 	for k, v := range cacheOpt.frontendAttrs {
 		opt.FrontendAttrs[k] = v
 	}
 
-	eg.Go(func() error {
-		return s.Run(statusContext, grpchijack.Dialer(c.controlClient()))
-	})
-
 	solveCtx, cancelSolve := context.WithCancel(ctx)
 	var res *SolveResponse
 	eg.Go(func() error {
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
index 3ace6da..76777ee 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
@@ -20,6 +20,7 @@
 	"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
 	"github.com/moby/buildkit/frontend/gateway/client"
 	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/apicaps"
 	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"golang.org/x/sync/errgroup"
@@ -61,8 +62,10 @@
 		localNameContext = v
 	}
 
+	forceLocalDockerfile := false
 	localNameDockerfile := DefaultLocalNameDockerfile
 	if v, ok := opts[keyNameDockerfile]; ok {
+		forceLocalDockerfile = true
 		localNameDockerfile = v
 	}
 
@@ -118,11 +121,14 @@
 		llb.SharedKeyHint(localNameDockerfile),
 		dockerfile2llb.WithInternalName(name),
 	)
+
 	var buildContext *llb.State
 	isScratchContext := false
 	if st, ok := detectGitContext(opts[localNameContext]); ok {
-		src = *st
-		buildContext = &src
+		if !forceLocalDockerfile {
+			src = *st
+		}
+		buildContext = st
 	} else if httpPrefix.MatchString(opts[localNameContext]) {
 		httpContext := llb.HTTP(opts[localNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context"))
 		def, err := httpContext.Marshal(marshalOpts...)
@@ -151,19 +157,35 @@
 			return nil, errors.Errorf("failed to read downloaded context")
 		}
 		if isArchive(dt) {
-			copyImage := opts[keyOverrideCopyImage]
-			if copyImage == "" {
-				copyImage = dockerfile2llb.DefaultCopyImage
+			fileop := useFileOp(opts, &caps)
+			if fileop {
+				bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{
+					AttemptUnpack: true,
+				}))
+				if !forceLocalDockerfile {
+					src = bc
+				}
+				buildContext = &bc
+			} else {
+				copyImage := opts[keyOverrideCopyImage]
+				if copyImage == "" {
+					copyImage = dockerfile2llb.DefaultCopyImage
+				}
+				unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
+					Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context"))
+				unpack.AddMount("/src", httpContext, llb.Readonly)
+				bc := unpack.AddMount("/out", llb.Scratch())
+				if !forceLocalDockerfile {
+					src = bc
+				}
+				buildContext = &bc
 			}
-			unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
-				Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context"))
-			unpack.AddMount("/src", httpContext, llb.Readonly)
-			src = unpack.AddMount("/out", llb.Scratch())
-			buildContext = &src
 		} else {
 			filename = "context"
-			src = httpContext
-			buildContext = &src
+			if !forceLocalDockerfile {
+				src = httpContext
+			}
+			buildContext = &httpContext
 			isScratchContext = true
 		}
 	}
@@ -529,3 +551,13 @@
 		return 0, errors.Errorf("invalid netmode %s", v)
 	}
 }
+
+func useFileOp(args map[string]string, caps *apicaps.CapSet) bool {
+	enabled := true
+	if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok {
+		if b, err := strconv.ParseBool(v); err == nil {
+			enabled = !b
+		}
+	}
+	return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
index b63e787..0527923 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
@@ -390,6 +390,7 @@
 	if !platformOpt.implicitTarget {
 		target.image.OS = platformOpt.targetPlatform.OS
 		target.image.Architecture = platformOpt.targetPlatform.Architecture
+		target.image.Variant = platformOpt.targetPlatform.Variant
 	}
 
 	return &st, &target.image, nil
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
index e83e58b..55e9add 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
@@ -50,6 +50,9 @@
 
 	// Config defines the execution parameters which should be used as a base when running a container using the image.
 	Config ImageConfig `json:"config,omitempty"`
+
+	// Variant defines platform variant. To be added to OCI.
+	Variant string `json:"variant,omitempty"`
 }
 
 func clone(src Image) Image {
@@ -67,6 +70,7 @@
 			Architecture: platform.Architecture,
 			OS:           platform.OS,
 		},
+		Variant: platform.Variant,
 	}
 	img.RootFS.Type = "layers"
 	img.Config.WorkingDir = "/"
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
index 28b34f6..ed96d7e 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
@@ -200,6 +200,11 @@
 
 // Expand variables
 func (c *CopyCommand) Expand(expander SingleWordExpander) error {
+	expandedChown, err := expander(c.Chown)
+	if err != nil {
+		return err
+	}
+	c.Chown = expandedChown
 	return expandSliceInPlace(c.SourcesAndDest, expander)
 }
 
diff --git a/vendor/github.com/moby/buildkit/session/upload/generate.go b/vendor/github.com/moby/buildkit/session/upload/generate.go
new file mode 100644
index 0000000..c498a92
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/generate.go
@@ -0,0 +1,3 @@
+package upload
+
+//go:generate protoc --gogoslick_out=plugins=grpc:. upload.proto
diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.go b/vendor/github.com/moby/buildkit/session/upload/upload.go
new file mode 100644
index 0000000..8d69bde
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/upload.go
@@ -0,0 +1,55 @@
+package upload
+
+import (
+	"context"
+	io "io"
+	"net/url"
+
+	"github.com/moby/buildkit/session"
+	"google.golang.org/grpc/metadata"
+)
+
+const (
+	keyPath = "urlpath"
+	keyHost = "urlhost"
+)
+
+func New(ctx context.Context, c session.Caller, url *url.URL) (*Upload, error) {
+	opts := map[string][]string{
+		keyPath: {url.Path},
+		keyHost: {url.Host},
+	}
+
+	client := NewUploadClient(c.Conn())
+
+	ctx = metadata.NewOutgoingContext(ctx, opts)
+
+	cc, err := client.Pull(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Upload{cc: cc}, nil
+}
+
+type Upload struct {
+	cc Upload_PullClient
+}
+
+func (u *Upload) WriteTo(w io.Writer) (int, error) {
+	n := 0
+	for {
+		var bm BytesMessage
+		if err := u.cc.RecvMsg(&bm); err != nil {
+			if err == io.EOF {
+				return n, nil
+			}
+			return n, err
+		}
+		nn, err := w.Write(bm.Data)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+	}
+}
diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.pb.go b/vendor/github.com/moby/buildkit/session/upload/upload.pb.go
new file mode 100644
index 0000000..a41928a
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/upload.pb.go
@@ -0,0 +1,506 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: upload.proto
+
+package upload
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import bytes "bytes"
+
+import strings "strings"
+import reflect "reflect"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// BytesMessage contains a chunk of byte data
+type BytesMessage struct {
+	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *BytesMessage) Reset()      { *m = BytesMessage{} }
+func (*BytesMessage) ProtoMessage() {}
+func (*BytesMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_upload_0898dc79ebc86e9c, []int{0}
+}
+func (m *BytesMessage) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalTo(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (dst *BytesMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BytesMessage.Merge(dst, src)
+}
+func (m *BytesMessage) XXX_Size() int {
+	return m.Size()
+}
+func (m *BytesMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_BytesMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesMessage proto.InternalMessageInfo
+
+func (m *BytesMessage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*BytesMessage)(nil), "moby.upload.v1.BytesMessage")
+}
+func (this *BytesMessage) Equal(that interface{}) bool {
+	if that == nil {
+		return this == nil
+	}
+
+	that1, ok := that.(*BytesMessage)
+	if !ok {
+		that2, ok := that.(BytesMessage)
+		if ok {
+			that1 = &that2
+		} else {
+			return false
+		}
+	}
+	if that1 == nil {
+		return this == nil
+	} else if this == nil {
+		return false
+	}
+	if !bytes.Equal(this.Data, that1.Data) {
+		return false
+	}
+	return true
+}
+func (this *BytesMessage) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&upload.BytesMessage{")
+	s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func valueToGoStringUpload(v interface{}, typ string) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// UploadClient is the client API for Upload service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type UploadClient interface {
+	Pull(ctx context.Context, opts ...grpc.CallOption) (Upload_PullClient, error)
+}
+
+type uploadClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewUploadClient(cc *grpc.ClientConn) UploadClient {
+	return &uploadClient{cc}
+}
+
+func (c *uploadClient) Pull(ctx context.Context, opts ...grpc.CallOption) (Upload_PullClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Upload_serviceDesc.Streams[0], "/moby.upload.v1.Upload/Pull", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &uploadPullClient{stream}
+	return x, nil
+}
+
+type Upload_PullClient interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ClientStream
+}
+
+type uploadPullClient struct {
+	grpc.ClientStream
+}
+
+func (x *uploadPullClient) Send(m *BytesMessage) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *uploadPullClient) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// UploadServer is the server API for Upload service.
+type UploadServer interface {
+	Pull(Upload_PullServer) error
+}
+
+func RegisterUploadServer(s *grpc.Server, srv UploadServer) {
+	s.RegisterService(&_Upload_serviceDesc, srv)
+}
+
+func _Upload_Pull_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(UploadServer).Pull(&uploadPullServer{stream})
+}
+
+type Upload_PullServer interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ServerStream
+}
+
+type uploadPullServer struct {
+	grpc.ServerStream
+}
+
+func (x *uploadPullServer) Send(m *BytesMessage) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *uploadPullServer) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _Upload_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "moby.upload.v1.Upload",
+	HandlerType: (*UploadServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Pull",
+			Handler:       _Upload_Pull_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "upload.proto",
+}
+
+func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Data) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintUpload(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
+	}
+	return i, nil
+}
+
+func encodeVarintUpload(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *BytesMessage) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Data)
+	if l > 0 {
+		n += 1 + l + sovUpload(uint64(l))
+	}
+	return n
+}
+
+func sovUpload(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozUpload(x uint64) (n int) {
+	return sovUpload(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *BytesMessage) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BytesMessage{`,
+		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringUpload(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *BytesMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowUpload
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowUpload
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthUpload
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipUpload(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthUpload
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipUpload(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowUpload
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowUpload
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowUpload
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthUpload
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowUpload
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipUpload(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthUpload = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowUpload   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("upload.proto", fileDescriptor_upload_0898dc79ebc86e9c) }
+
+var fileDescriptor_upload_0898dc79ebc86e9c = []byte{
+	// 179 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x2d, 0xc8, 0xc9,
+	0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xcb, 0xcd, 0x4f, 0xaa, 0xd4, 0x83,
+	0x0a, 0x95, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, 0x16, 0x17,
+	0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
+	0xf0, 0x04, 0x81, 0xd9, 0x46, 0x01, 0x5c, 0x6c, 0xa1, 0x60, 0x0d, 0x42, 0x6e, 0x5c, 0x2c, 0x01,
+	0xa5, 0x39, 0x39, 0x42, 0x32, 0x7a, 0xa8, 0xc6, 0xe8, 0x21, 0x9b, 0x21, 0x85, 0x57, 0x56, 0x83,
+	0xd1, 0x80, 0xd1, 0xc9, 0xe6, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, 0x94,
+	0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e,
+	0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x87, 0x47, 0x72, 0x8c, 0x13,
+	0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x14, 0x1b, 0xc4, 0xc4,
+	0x24, 0x36, 0xb0, 0x57, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x12, 0xf2, 0xfc, 0xb4, 0xda,
+	0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.proto b/vendor/github.com/moby/buildkit/session/upload/upload.proto
new file mode 100644
index 0000000..ce254ba
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/upload/upload.proto
@@ -0,0 +1,14 @@
+syntax = "proto3";
+
+package moby.upload.v1;
+
+option go_package = "upload";
+
+service Upload {
+	rpc Pull(stream BytesMessage) returns (stream BytesMessage);
+}
+
+// BytesMessage contains a chunk of byte data
+message BytesMessage{
+	bytes data = 1;
+}
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
index 8002dcd..137c8ac 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
@@ -176,14 +176,16 @@
 	return lcm.id
 }
 func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) {
-	if err := lcm.wait(); err != nil {
-		return nil, err
+	lcm.wait()
+	if lcm.main == nil {
+		return nil, nil
 	}
 	return lcm.main.Query(inp, inputIndex, dgst, outputIndex)
 }
 func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) {
-	if err := lcm.wait(); err != nil {
-		return nil, err
+	lcm.wait()
+	if lcm.main == nil {
+		return nil, nil
 	}
 	return lcm.main.Records(ck)
 }
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
index 2404035..9ae1163 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
@@ -14,7 +14,6 @@
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend/gateway"
-	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/util/entitlements"
@@ -306,7 +305,7 @@
 
 func inVertexContext(ctx context.Context, name, id string, f func(ctx context.Context) error) error {
 	if id == "" {
-		id = identity.NewID()
+		id = name
 	}
 	v := client.Vertex{
 		Digest: digest.FromBytes([]byte(id)),
diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go
index c9fe8f5..7394a03 100644
--- a/vendor/github.com/moby/buildkit/source/http/httpsource.go
+++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go
@@ -35,10 +35,10 @@
 }
 
 type httpSource struct {
-	md     *metadata.Store
-	cache  cache.Accessor
-	locker *locker.Locker
-	client *http.Client
+	md        *metadata.Store
+	cache     cache.Accessor
+	locker    *locker.Locker
+	transport http.RoundTripper
 }
 
 func NewSource(opt Opt) (source.Source, error) {
@@ -47,12 +47,10 @@
 		transport = tracing.DefaultTransport
 	}
 	hs := &httpSource{
-		md:     opt.MetadataStore,
-		cache:  opt.CacheAccessor,
-		locker: locker.New(),
-		client: &http.Client{
-			Transport: transport,
-		},
+		md:        opt.MetadataStore,
+		cache:     opt.CacheAccessor,
+		locker:    locker.New(),
+		transport: transport,
 	}
 	return hs, nil
 }
@@ -66,17 +64,21 @@
 	src      source.HttpIdentifier
 	refID    string
 	cacheKey digest.Digest
+	client   *http.Client
 }
 
-func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, _ *session.Manager) (source.SourceInstance, error) {
+func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
 	httpIdentifier, ok := id.(*source.HttpIdentifier)
 	if !ok {
 		return nil, errors.Errorf("invalid http identifier %v", id)
 	}
 
+	sessionID := session.FromContext(ctx)
+
 	return &httpSourceHandler{
 		src:        *httpIdentifier,
 		httpSource: hs,
+		client:     &http.Client{Transport: newTransport(hs.transport, sm, sessionID)},
 	}, nil
 }
 
diff --git a/vendor/github.com/moby/buildkit/source/http/transport.go b/vendor/github.com/moby/buildkit/source/http/transport.go
new file mode 100644
index 0000000..0ce89b7
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/source/http/transport.go
@@ -0,0 +1,60 @@
+package http
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"time"
+
+	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/session/upload"
+	"github.com/pkg/errors"
+)
+
+func newTransport(rt http.RoundTripper, sm *session.Manager, id string) http.RoundTripper {
+	return &sessionHandler{rt: rt, sm: sm, id: id}
+}
+
+type sessionHandler struct {
+	sm *session.Manager
+	rt http.RoundTripper
+	id string
+}
+
+func (h *sessionHandler) RoundTrip(req *http.Request) (*http.Response, error) {
+	if req.URL.Host != "buildkit-session" {
+		return h.rt.RoundTrip(req)
+	}
+
+	if req.Method != "GET" {
+		return nil, errors.Errorf("invalid request")
+	}
+
+	timeoutCtx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
+	defer cancel()
+
+	caller, err := h.sm.Get(timeoutCtx, h.id)
+	if err != nil {
+		return nil, err
+	}
+
+	up, err := upload.New(context.TODO(), caller, req.URL)
+	if err != nil {
+		return nil, err
+	}
+
+	pr, pw := io.Pipe()
+	go func() {
+		_, err := up.WriteTo(pw)
+		pw.CloseWithError(err)
+	}()
+
+	resp := &http.Response{
+		Status:        "200 OK",
+		StatusCode:    200,
+		Body:          pr,
+		ContentLength: -1,
+	}
+
+	return resp, nil
+}
diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
index 3aedbf7..3a73054 100644
--- a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
+++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go
@@ -13,7 +13,7 @@
 
 func SupportedPlatforms() []string {
 	once.Do(func() {
-		def := platforms.DefaultString()
+		def := defaultPlatform()
 		arr = append(arr, def)
 		if p := "linux/amd64"; def != p && amd64Supported() == nil {
 			arr = append(arr, p)
@@ -34,7 +34,7 @@
 //the end user could fix the issue based on those warning, and thus no need to drop
 //the platform from the candidates.
 func WarnIfUnsupported(pfs []string) {
-	def := platforms.DefaultString()
+	def := defaultPlatform()
 	for _, p := range pfs {
 		if p != def {
 			if p == "linux/amd64" {
@@ -56,6 +56,10 @@
 	}
 }
 
+func defaultPlatform() string {
+	return platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
+}
+
 func printPlatfromWarning(p string, err error) {
 	if strings.Contains(err.Error(), "exec format error") {
 		logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p)