Merge pull request #25 from thaJeztah/18.06-backport-error_when_base_name_resolved_to_blank
[18.06] Return error if basename is expanded to blank
diff --git a/Makefile b/Makefile
index f344b5c..fc5b7d9 100644
--- a/Makefile
+++ b/Makefile
@@ -56,7 +56,8 @@
-e https_proxy \
-e no_proxy \
-e VERSION \
- -e PLATFORM
+ -e PLATFORM \
+ -e PRODUCT
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go
index b1d31a5..dacec0a 100644
--- a/builder/builder-next/builder.go
+++ b/builder/builder-next/builder.go
@@ -237,7 +237,7 @@
Session: opt.Options.SessionID,
}
- aux := streamformatter.AuxFormatter{opt.ProgressWriter.Output}
+ aux := streamformatter.AuxFormatter{Writer: opt.ProgressWriter.Output}
eg, ctx := errgroup.WithContext(ctx)
diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go
index 2956aff..c97f941 100644
--- a/builder/builder-next/controller.go
+++ b/builder/builder-next/controller.go
@@ -13,12 +13,13 @@
"github.com/docker/docker/daemon/graphdriver"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
- "github.com/moby/buildkit/cache/remotecache"
+ registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/frontend"
- "github.com/moby/buildkit/frontend/dockerfile"
+ dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
"github.com/moby/buildkit/frontend/gateway"
+ "github.com/moby/buildkit/frontend/gateway/forwarder"
"github.com/moby/buildkit/snapshot/blobmapping"
"github.com/moby/buildkit/solver/boltdbcachestorage"
"github.com/moby/buildkit/worker"
@@ -113,10 +114,6 @@
return nil, err
}
- frontends := map[string]frontend.Frontend{}
- frontends["dockerfile.v0"] = dockerfile.NewDockerfileFrontend()
- frontends["gateway.v0"] = gateway.NewGatewayFrontend()
-
wopt := mobyworker.Opt{
ID: "moby",
SessionManager: opt.SessionManager,
@@ -141,17 +138,17 @@
}
wc.Add(w)
- ci := remotecache.NewCacheImporter(remotecache.ImportOpt{
- Worker: w,
- SessionManager: opt.SessionManager,
- })
+ frontends := map[string]frontend.Frontend{
+ "dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build),
+ "gateway.v0": gateway.NewGatewayFrontend(wc),
+ }
return control.NewController(control.Opt{
- SessionManager: opt.SessionManager,
- WorkerController: wc,
- Frontends: frontends,
- CacheKeyStorage: cacheStorage,
- // CacheExporter: ce,
- CacheImporter: ci,
+ SessionManager: opt.SessionManager,
+ WorkerController: wc,
+ Frontends: frontends,
+ CacheKeyStorage: cacheStorage,
+ ResolveCacheImporterFunc: registryremotecache.ResolveCacheImporterFunc(opt.SessionManager),
+ // TODO: set ResolveCacheExporterFunc for exporting cache
})
}
diff --git a/builder/builder-next/exporter/export.go b/builder/builder-next/exporter/export.go
index 818ff00..70e7d6e 100644
--- a/builder/builder-next/exporter/export.go
+++ b/builder/builder-next/exporter/export.go
@@ -83,7 +83,7 @@
if ref != nil {
layersDone := oneOffProgress(ctx, "exporting layers")
- if err := ref.Finalize(ctx); err != nil {
+ if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go
index 2874f50..d767d31 100644
--- a/builder/dockerfile/dispatchers_test.go
+++ b/builder/dockerfile/dispatchers_test.go
@@ -137,10 +137,10 @@
args := NewBuildArgs(make(map[string]*string))
val := "sometag"
- metaArg := instructions.ArgCommand{
+ metaArg := instructions.ArgCommand{KeyValuePairOptional: instructions.KeyValuePairOptional{
Key: "THETAG",
Value: &val,
- }
+ }}
cmd := &instructions.Stage{
BaseName: "alpine:${THETAG}",
}
@@ -393,7 +393,7 @@
argName := "foo"
argVal := "bar"
- cmd := &instructions.ArgCommand{Key: argName, Value: &argVal}
+ cmd := &instructions.ArgCommand{KeyValuePairOptional: instructions.KeyValuePairOptional{Key: argName, Value: &argVal}}
err := dispatch(sb, cmd)
assert.NilError(t, err)
diff --git a/cmd/dockerd/docker.go b/cmd/dockerd/docker.go
index 463482e..197bb49 100644
--- a/cmd/dockerd/docker.go
+++ b/cmd/dockerd/docker.go
@@ -10,6 +10,7 @@
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/docker/pkg/term"
+ "github.com/moby/buildkit/util/apicaps"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@@ -42,6 +43,12 @@
return cmd
}
+func init() {
+ if dockerversion.ProductName != "" {
+ apicaps.ExportedProduct = dockerversion.ProductName
+ }
+}
+
func main() {
if reexec.Init() {
return
diff --git a/dockerversion/version_lib.go b/dockerversion/version_lib.go
index 0897c07..ff18165 100644
--- a/dockerversion/version_lib.go
+++ b/dockerversion/version_lib.go
@@ -14,4 +14,5 @@
RuncCommitID = "library-import"
InitCommitID = "library-import"
PlatformName = ""
+ ProductName = ""
)
diff --git a/hack/make.ps1 b/hack/make.ps1
index 70b9a47..e1e91e5 100644
--- a/hack/make.ps1
+++ b/hack/make.ps1
@@ -365,7 +365,7 @@
# Run autogen if building binaries or running unit tests.
if ($Client -or $Daemon -or $TestUnit) {
Write-Host "INFO: Invoking autogen..."
- Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion -Platform "$env:PLATFORM" }
+ Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion -Platform "$env:PLATFORM" -Product "$env:PRODUCT" }
Catch [Exception] { Throw $_ }
}
diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen
index ba00189..342f5ec 100644
--- a/hack/make/.go-autogen
+++ b/hack/make/.go-autogen
@@ -21,6 +21,7 @@
IAmStatic string = "${IAMSTATIC:-true}"
ContainerdCommitID string = "${CONTAINERD_COMMIT}"
PlatformName string = "${PLATFORM}"
+ ProductName string = "${PRODUCT}"
)
// AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen
diff --git a/hack/make/.go-autogen.ps1 b/hack/make/.go-autogen.ps1
index cc14e9e..98686e1 100644
--- a/hack/make/.go-autogen.ps1
+++ b/hack/make/.go-autogen.ps1
@@ -15,7 +15,8 @@
param(
[Parameter(Mandatory=$true)][string]$CommitString,
[Parameter(Mandatory=$true)][string]$DockerVersion,
- [Parameter(Mandatory=$false)][string]$Platform
+ [Parameter(Mandatory=$false)][string]$Platform,
+ [Parameter(Mandatory=$false)][string]$Product
)
$ErrorActionPreference = "Stop"
@@ -45,6 +46,7 @@
Version string = "'+$DockerVersion+'"
BuildTime string = "'+$buildDateTime+'"
PlatformName string = "'+$Platform+'"
+ ProductName string = "'+$Product+'"
)
// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1
diff --git a/libcontainerd/remote_daemon_linux.go b/libcontainerd/remote_daemon_linux.go
index dc59eb8..34b04e2 100644
--- a/libcontainerd/remote_daemon_linux.go
+++ b/libcontainerd/remote_daemon_linux.go
@@ -37,6 +37,10 @@
if r.snapshotter == "" {
r.snapshotter = "overlay"
}
+ // Disable CRI plugin by default if containerd is managed as child-process
+ // of dockerd. See https://github.com/moby/moby/issues/37507
+ r.DisabledPlugins = append(r.DisabledPlugins, "cri")
+ delete(r.pluginConfs.Plugins, "cri")
}
func (r *remote) stopDaemon() {
diff --git a/vendor.conf b/vendor.conf
index 6c415f6..dd026d1 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -26,7 +26,7 @@
golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5
# buildkit
-github.com/moby/buildkit 9acf51e49185b348608e0096b2903dd72907adcb
+github.com/moby/buildkit 98f1604134f945d48538ffca0e18662337b4a850
github.com/tonistiigi/fsutil 8abad97ee3969cdf5e9c367f46adba2c212b3ddb
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7
@@ -125,7 +125,7 @@
github.com/gogo/googleapis 08a7655d27152912db7aaf4f983275eaf8d128ef
# cluster
-github.com/docker/swarmkit 68266392a176434d282760d2d6d0ab4c68edcae6
+github.com/docker/swarmkit 8852e8840e30d69db0b39a4a3d6447362e17c64f
github.com/gogo/protobuf v1.0.0
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
diff --git a/vendor/github.com/docker/swarmkit/agent/agent.go b/vendor/github.com/docker/swarmkit/agent/agent.go
index 93a7cbc..4d8f99c 100644
--- a/vendor/github.com/docker/swarmkit/agent/agent.go
+++ b/vendor/github.com/docker/swarmkit/agent/agent.go
@@ -2,7 +2,6 @@
import (
"bytes"
- "fmt"
"math/rand"
"reflect"
"sync"
@@ -11,6 +10,7 @@
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
+ "github.com/pkg/errors"
"golang.org/x/net/context"
)
@@ -444,7 +444,7 @@
if !same {
a.keys = message.NetworkBootstrapKeys
if err := a.config.Executor.SetNetworkBootstrapKeys(a.keys); err != nil {
- panic(fmt.Errorf("configuring network key failed"))
+ return errors.Wrap(err, "configuring network key failed")
}
}
}
diff --git a/vendor/github.com/docker/swarmkit/agent/session.go b/vendor/github.com/docker/swarmkit/agent/session.go
index 8606eab..8afca95 100644
--- a/vendor/github.com/docker/swarmkit/agent/session.go
+++ b/vendor/github.com/docker/swarmkit/agent/session.go
@@ -12,6 +12,7 @@
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
var (
@@ -195,7 +196,8 @@
cancel()
if err != nil {
log.G(ctx).WithFields(fields).WithError(err).Errorf("heartbeat to manager %v failed", s.conn.Peer())
- if grpc.Code(err) == codes.NotFound {
+ st, _ := status.FromError(err)
+ if st.Code() == codes.NotFound {
err = errNodeNotRegistered
}
@@ -252,7 +254,8 @@
for {
resp, err := subscriptions.Recv()
- if grpc.Code(err) == codes.Unimplemented {
+ st, _ := status.FromError(err)
+ if st.Code() == codes.Unimplemented {
log.Warning("manager does not support log subscriptions")
// Don't return, because returning would bounce the session
select {
@@ -303,7 +306,8 @@
// If we get a code = 12 desc = unknown method Assignments, try to use tasks
resp, err = assignmentWatch.Recv()
if err != nil {
- if grpc.Code(err) != codes.Unimplemented {
+ st, _ := status.FromError(err)
+ if st.Code() != codes.Unimplemented {
return err
}
tasksFallback = true
@@ -362,20 +366,21 @@
}
// sendTaskStatus uses the current session to send the status of a single task.
-func (s *session) sendTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
+func (s *session) sendTaskStatus(ctx context.Context, taskID string, taskStatus *api.TaskStatus) error {
client := api.NewDispatcherClient(s.conn.ClientConn)
if _, err := client.UpdateTaskStatus(ctx, &api.UpdateTaskStatusRequest{
SessionID: s.sessionID,
Updates: []*api.UpdateTaskStatusRequest_TaskStatusUpdate{
{
TaskID: taskID,
- Status: status,
+ Status: taskStatus,
},
},
}); err != nil {
// TODO(stevvooe): Dispatcher should not return this error. Status
// reports for unknown tasks should be ignored.
- if grpc.Code(err) == codes.NotFound {
+ st, _ := status.FromError(err)
+ if st.Code() == codes.NotFound {
return errTaskUnknown
}
diff --git a/vendor/github.com/docker/swarmkit/ca/certificates.go b/vendor/github.com/docker/swarmkit/ca/certificates.go
index f2d3dba..ad2be2c 100644
--- a/vendor/github.com/docker/swarmkit/ca/certificates.go
+++ b/vendor/github.com/docker/swarmkit/ca/certificates.go
@@ -35,6 +35,7 @@
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/status"
)
const (
@@ -352,7 +353,8 @@
defer cancel()
response, err := client.GetUnlockKey(ctx, &api.GetUnlockKeyRequest{})
if err != nil {
- if grpc.Code(err) == codes.Unimplemented { // if the server does not support keks, return as if no encryption key was specified
+ s, _ := status.FromError(err)
+ if s.Code() == codes.Unimplemented { // if the server does not support keks, return as if no encryption key was specified
conn.Close(true)
return &KEKData{}, nil
}
@@ -838,8 +840,9 @@
stateCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
statusResponse, err := caClient.NodeCertificateStatus(stateCtx, statusRequest)
+ s, _ := status.FromError(err)
switch {
- case err != nil && grpc.Code(err) != codes.DeadlineExceeded:
+ case err != nil && s.Code() != codes.DeadlineExceeded:
conn.Close(false)
// Because IssueNodeCertificate succeeded, if this call failed likely it is due to an issue with this
// particular connection, so we need to get another. We should try a remote connection - the local node
diff --git a/vendor/github.com/docker/swarmkit/manager/manager.go b/vendor/github.com/docker/swarmkit/manager/manager.go
index b738477..f3c16fa 100644
--- a/vendor/github.com/docker/swarmkit/manager/manager.go
+++ b/vendor/github.com/docker/swarmkit/manager/manager.go
@@ -962,7 +962,7 @@
// in order to allow running services on the predefined docker
// networks like `bridge` and `host`.
for _, p := range allocator.PredefinedNetworks() {
- if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != store.ErrNameConflict {
+ if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != nil && err != store.ErrNameConflict {
log.G(ctx).WithError(err).Error("failed to create predefined network " + p.Name)
}
}
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/task.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
index 8173dbd..f9a3fea 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/task.go
@@ -9,6 +9,7 @@
"github.com/docker/swarmkit/identity"
"github.com/docker/swarmkit/manager/constraint"
"github.com/docker/swarmkit/protobuf/ptypes"
+ google_protobuf "github.com/gogo/protobuf/types"
)
// NewTask creates a new task.
@@ -143,6 +144,14 @@
n.Spec.Availability == api.NodeAvailabilityDrain
}
+func taskTimestamp(t *api.Task) *google_protobuf.Timestamp {
+ if t.Status.AppliedAt != nil {
+ return t.Status.AppliedAt
+ }
+
+ return t.Status.Timestamp
+}
+
// TasksByTimestamp sorts tasks by applied timestamp if available, otherwise
// status timestamp.
type TasksByTimestamp []*api.Task
@@ -159,15 +168,8 @@
// Less implements the Less method for sorting.
func (t TasksByTimestamp) Less(i, j int) bool {
- iTimestamp := t[i].Status.Timestamp
- if t[i].Status.AppliedAt != nil {
- iTimestamp = t[i].Status.AppliedAt
- }
-
- jTimestamp := t[j].Status.Timestamp
- if t[j].Status.AppliedAt != nil {
- iTimestamp = t[j].Status.AppliedAt
- }
+ iTimestamp := taskTimestamp(t[i])
+ jTimestamp := taskTimestamp(t[j])
if iTimestamp == nil {
return true
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
index e3c2b82..cbeb321 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
@@ -220,6 +220,16 @@
}
}
+// taskInTerminalState returns true if task is in a terminal state.
+func taskInTerminalState(task *api.Task) bool {
+ return task.Status.State > api.TaskStateRunning
+}
+
+// taskWillNeverRun returns true if task will never reach running state.
+func taskWillNeverRun(task *api.Task) bool {
+ return task.Status.State < api.TaskStateAssigned && task.DesiredState > api.TaskStateRunning
+}
+
// tick performs task history cleanup.
func (tr *TaskReaper) tick() {
// this signals that a tick has occurred. it exists solely for testing.
@@ -329,22 +339,20 @@
runningTasks := 0
for _, t := range historicTasks {
- // Skip tasks which are desired to be running but the current state
- // is less than or equal to running.
- // This check is important to ignore tasks which are running or need to be running,
- // but to delete tasks which are either past running,
- // or have not reached running but need to be shutdown (because of a service update, for example).
- if t.DesiredState == api.TaskStateRunning && t.Status.State <= api.TaskStateRunning {
- // Don't delete running tasks
+ // Historical tasks can be considered for cleanup if:
+ // 1. The task has reached a terminal state i.e. actual state beyond TaskStateRunning.
+ // 2. The task has not yet become running and desired state is a terminal state i.e.
+ // actual state not yet TaskStateAssigned and desired state beyond TaskStateRunning.
+ if taskInTerminalState(t) || taskWillNeverRun(t) {
+ deleteTasks[t.ID] = struct{}{}
+
+ taskHistory++
+ if int64(len(historicTasks)) <= taskHistory {
+ break
+ }
+ } else {
+ // all other tasks are counted as running.
runningTasks++
- continue
- }
-
- deleteTasks[t.ID] = struct{}{}
-
- taskHistory++
- if int64(len(historicTasks)) <= taskHistory {
- break
}
}
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
index bdd3ec0..644e295 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
@@ -16,6 +16,7 @@
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/state/raft/membership"
"github.com/pkg/errors"
+ "google.golang.org/grpc/status"
)
const (
@@ -238,13 +239,15 @@
}
// Try doing a regular rpc if the receiver doesn't support streaming.
- if grpc.Code(err) == codes.Unimplemented {
+ s, _ := status.FromError(err)
+ if s.Code() == codes.Unimplemented {
log.G(ctx).Info("sending message to raft peer using ProcessRaftMessage()")
_, err = api.NewRaftClient(p.conn()).ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m})
}
// Handle errors.
- if grpc.Code(err) == codes.NotFound && grpc.ErrorDesc(err) == membership.ErrMemberRemoved.Error() {
+ s, _ = status.FromError(err)
+ if s.Code() == codes.NotFound && s.Message() == membership.ErrMemberRemoved.Error() {
p.tr.config.NodeRemoved()
}
if m.Type == raftpb.MsgSnap {
diff --git a/vendor/github.com/docker/swarmkit/vendor.conf b/vendor/github.com/docker/swarmkit/vendor.conf
index 3b21fcd..0283434 100644
--- a/vendor/github.com/docker/swarmkit/vendor.conf
+++ b/vendor/github.com/docker/swarmkit/vendor.conf
@@ -12,7 +12,7 @@
github.com/gogo/protobuf v1.0.0
github.com/golang/protobuf v1.1.0
github.com/matttproud/golang_protobuf_extensions v1.0.0
-google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
+google.golang.org/genproto 694d95ba50e67b2e363f3483057db5d4910c18f9
# metrics
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
@@ -20,35 +20,35 @@
# etcd/raft
github.com/coreos/etcd v3.2.1
-github.com/coreos/go-systemd v15
+github.com/coreos/go-systemd v17
github.com/coreos/pkg v3
github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8
github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5
-github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
-github.com/docker/docker 3d14173a2900b60200d9b1475abd5138f4315981
+github.com/docker/distribution 83389a148052d74ac602f5f1d62f86ff2f3c4aa5
+github.com/docker/docker b9bb3bae5161f931c1dede43c67948c599197f50
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
-github.com/docker/libnetwork 1b91bc94094ecfdae41daa465cc0c8df37dfb3dd
-github.com/opencontainers/runc 4fc53a81fb7c994640722ac585fa9ca548971871
-github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
+github.com/docker/libnetwork d00ceed44cc447c77f25cdf5d59e83163bdcb4c9
+github.com/opencontainers/runc ad0f5255060d36872be04de22f8731f38ef2d7b1
+github.com/opencontainers/go-digest v1.0.0-rc1
github.com/opencontainers/image-spec v1.0.1
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 # v1.1.0
-github.com/Microsoft/go-winio v0.4.6
+github.com/Microsoft/go-winio v0.4.8
github.com/sirupsen/logrus v1.0.3
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
github.com/cloudflare/cfssl 1.3.2
github.com/dustin/go-humanize 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0
github.com/fernet/fernet-go 1b2437bc582b3cfbb341ee5a29f8ef5b42912ff2
-github.com/google/certificate-transparency-go 5ab67e519c93568ac3ee50fd6772a5bcf8aa460d
-github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+github.com/google/certificate-transparency-go v1.0.20
+github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
@@ -60,8 +60,8 @@
github.com/spf13/cobra 8e91712f174ced10270cf66615e0a9127e7c4de5
github.com/spf13/pflag 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7
github.com/stretchr/testify v1.1.4
-golang.org/x/crypto 650f4a345ab4e5b245a3034b110ebc7299e68186
-golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+golang.org/x/crypto 1a580b3eff7814fc9b40602fd35256c63b50f491
+golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd
golang.org/x/sys 37707fdb30a5b38865cfb95e5aab41707daec7fd
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md
index 5679476..10b2b0e 100644
--- a/vendor/github.com/moby/buildkit/README.md
+++ b/vendor/github.com/moby/buildkit/README.md
@@ -256,7 +256,7 @@
make test TESTPKGS=./client TESTFLAGS="--run /TestCallDiskUsage -v"
# run all integration tests with a specific worker
-# supported workers are oci and containerd
+# supported workers: oci, oci-rootless, containerd, containerd-1.0
make test TESTPKGS=./client TESTFLAGS="--run //worker=containerd -v"
```
diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go
index 5feffaf..9df84f1 100644
--- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go
+++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go
@@ -23,7 +23,6 @@
BytesMessage
ListWorkersRequest
ListWorkersResponse
- WorkerRecord
*/
package moby_buildkit_v1
@@ -33,6 +32,7 @@
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/golang/protobuf/ptypes/timestamp"
import pb "github.com/moby/buildkit/solver/pb"
+import moby_buildkit_v1_types "github.com/moby/buildkit/api/types"
import time "time"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
@@ -526,7 +526,7 @@
}
type ListWorkersResponse struct {
- Record []*WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"`
+ Record []*moby_buildkit_v1_types.WorkerRecord `protobuf:"bytes,1,rep,name=record" json:"record,omitempty"`
}
func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} }
@@ -534,45 +534,13 @@
func (*ListWorkersResponse) ProtoMessage() {}
func (*ListWorkersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} }
-func (m *ListWorkersResponse) GetRecord() []*WorkerRecord {
+func (m *ListWorkersResponse) GetRecord() []*moby_buildkit_v1_types.WorkerRecord {
if m != nil {
return m.Record
}
return nil
}
-type WorkerRecord struct {
- ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
- Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
-}
-
-func (m *WorkerRecord) Reset() { *m = WorkerRecord{} }
-func (m *WorkerRecord) String() string { return proto.CompactTextString(m) }
-func (*WorkerRecord) ProtoMessage() {}
-func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} }
-
-func (m *WorkerRecord) GetID() string {
- if m != nil {
- return m.ID
- }
- return ""
-}
-
-func (m *WorkerRecord) GetLabels() map[string]string {
- if m != nil {
- return m.Labels
- }
- return nil
-}
-
-func (m *WorkerRecord) GetPlatforms() []pb.Platform {
- if m != nil {
- return m.Platforms
- }
- return nil
-}
-
func init() {
proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest")
proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
@@ -589,7 +557,6 @@
proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage")
proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest")
proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse")
- proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.WorkerRecord")
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -1620,59 +1587,6 @@
return i, nil
}
-func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.ID) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintControl(dAtA, i, uint64(len(m.ID)))
- i += copy(dAtA[i:], m.ID)
- }
- if len(m.Labels) > 0 {
- for k, _ := range m.Labels {
- dAtA[i] = 0x12
- i++
- v := m.Labels[k]
- mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- i = encodeVarintControl(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
- i = encodeVarintControl(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
- }
- }
- if len(m.Platforms) > 0 {
- for _, msg := range m.Platforms {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintControl(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- return i, nil
-}
-
func encodeVarintControl(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
@@ -1984,30 +1898,6 @@
return n
}
-func (m *WorkerRecord) Size() (n int) {
- var l int
- _ = l
- l = len(m.ID)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if len(m.Labels) > 0 {
- for k, v := range m.Labels {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
- }
- }
- if len(m.Platforms) > 0 {
- for _, e := range m.Platforms {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
- return n
-}
-
func sovControl(x uint64) (n int) {
for {
n++
@@ -4487,7 +4377,7 @@
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Record = append(m.Record, &WorkerRecord{})
+ m.Record = append(m.Record, &moby_buildkit_v1_types.WorkerRecord{})
if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
@@ -4513,234 +4403,6 @@
}
return nil
}
-func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ID = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Labels == nil {
- m.Labels = make(map[string]string)
- }
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
- m.Labels[mapkey] = mapvalue
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Platforms = append(m.Platforms, pb.Platform{})
- if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func skipControl(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
@@ -4849,81 +4511,79 @@
func init() { proto.RegisterFile("control.proto", fileDescriptorControl) }
var fileDescriptorControl = []byte{
- // 1214 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4f, 0x6f, 0x1b, 0x55,
- 0x10, 0x67, 0x6d, 0xc7, 0xf6, 0x8e, 0x9d, 0x28, 0x3c, 0xa0, 0x5a, 0x2d, 0x90, 0x98, 0x05, 0x24,
- 0xab, 0x6a, 0xd7, 0x69, 0xa0, 0x08, 0x72, 0xa8, 0x5a, 0xc7, 0x45, 0x24, 0x4a, 0x44, 0xd8, 0x34,
- 0x54, 0xe2, 0xb6, 0xb6, 0x5f, 0xdc, 0x55, 0xd6, 0xfb, 0x96, 0xf7, 0x9e, 0xa3, 0x86, 0x4f, 0xc1,
- 0x81, 0x6f, 0xc2, 0x81, 0x33, 0x07, 0xa4, 0xde, 0xe0, 0xcc, 0x21, 0x45, 0xb9, 0xc3, 0x67, 0x40,
- 0xef, 0xcf, 0xda, 0xcf, 0x5e, 0xe7, 0x8f, 0xd3, 0x93, 0xdf, 0xcc, 0xfe, 0xe6, 0xb7, 0xf3, 0x66,
- 0x66, 0x67, 0xc6, 0xb0, 0xdc, 0x23, 0x09, 0xa7, 0x24, 0xf6, 0x53, 0x4a, 0x38, 0x41, 0xab, 0x43,
- 0xd2, 0x3d, 0xf3, 0xbb, 0xa3, 0x28, 0xee, 0x9f, 0x44, 0xdc, 0x3f, 0x7d, 0xe0, 0xde, 0x1f, 0x44,
- 0xfc, 0xc5, 0xa8, 0xeb, 0xf7, 0xc8, 0xb0, 0x35, 0x20, 0x03, 0xd2, 0x92, 0xc0, 0xee, 0xe8, 0x58,
- 0x4a, 0x52, 0x90, 0x27, 0x45, 0xe0, 0xae, 0x0f, 0x08, 0x19, 0xc4, 0x78, 0x82, 0xe2, 0xd1, 0x10,
- 0x33, 0x1e, 0x0e, 0x53, 0x0d, 0xb8, 0x67, 0xf0, 0x89, 0x97, 0xb5, 0xb2, 0x97, 0xb5, 0x18, 0x89,
- 0x4f, 0x31, 0x6d, 0xa5, 0xdd, 0x16, 0x49, 0x99, 0x42, 0x7b, 0x2b, 0x50, 0x3f, 0xa0, 0xa3, 0x04,
- 0x07, 0xf8, 0xc7, 0x11, 0x66, 0xdc, 0xbb, 0x0b, 0xab, 0x9d, 0x88, 0x9d, 0x1c, 0xb1, 0x70, 0x90,
- 0xe9, 0xd0, 0x1d, 0x28, 0x1f, 0x47, 0x31, 0xc7, 0xd4, 0xb1, 0x1a, 0x56, 0xd3, 0x0e, 0xb4, 0xe4,
- 0xed, 0xc2, 0xdb, 0x06, 0x96, 0xa5, 0x24, 0x61, 0x18, 0x3d, 0x84, 0x32, 0xc5, 0x3d, 0x42, 0xfb,
- 0x8e, 0xd5, 0x28, 0x36, 0x6b, 0x9b, 0x1f, 0xfa, 0xb3, 0x37, 0xf6, 0xb5, 0x81, 0x00, 0x05, 0x1a,
- 0xec, 0xfd, 0x5e, 0x80, 0x9a, 0xa1, 0x47, 0x2b, 0x50, 0xd8, 0xe9, 0xe8, 0xf7, 0x15, 0x76, 0x3a,
- 0xc8, 0x81, 0xca, 0xfe, 0x88, 0x87, 0xdd, 0x18, 0x3b, 0x85, 0x86, 0xd5, 0xac, 0x06, 0x99, 0x88,
- 0xde, 0x85, 0xa5, 0x9d, 0xe4, 0x88, 0x61, 0xa7, 0x28, 0xf5, 0x4a, 0x40, 0x08, 0x4a, 0x87, 0xd1,
- 0x4f, 0xd8, 0x29, 0x35, 0xac, 0x66, 0x31, 0x90, 0x67, 0x71, 0x8f, 0x83, 0x90, 0xe2, 0x84, 0x3b,
- 0x4b, 0xea, 0x1e, 0x4a, 0x42, 0x6d, 0xb0, 0xb7, 0x29, 0x0e, 0x39, 0xee, 0x3f, 0xe1, 0x4e, 0xb9,
- 0x61, 0x35, 0x6b, 0x9b, 0xae, 0xaf, 0xc2, 0xec, 0x67, 0x61, 0xf6, 0x9f, 0x65, 0x61, 0x6e, 0x57,
- 0x5f, 0x9d, 0xaf, 0xbf, 0xf5, 0xf3, 0xeb, 0x75, 0x2b, 0x98, 0x98, 0xa1, 0xc7, 0x00, 0x7b, 0x21,
- 0xe3, 0x47, 0x4c, 0x92, 0x54, 0xae, 0x25, 0x29, 0x49, 0x02, 0xc3, 0x06, 0xad, 0x01, 0xc8, 0x00,
- 0x6c, 0x93, 0x51, 0xc2, 0x9d, 0xaa, 0xf4, 0xdb, 0xd0, 0xa0, 0x06, 0xd4, 0x3a, 0x98, 0xf5, 0x68,
- 0x94, 0xf2, 0x88, 0x24, 0x8e, 0x2d, 0xaf, 0x60, 0xaa, 0xbc, 0x5f, 0x4a, 0x50, 0x3f, 0x14, 0x39,
- 0xce, 0x12, 0xb7, 0x0a, 0xc5, 0x00, 0x1f, 0xeb, 0x28, 0x8a, 0x23, 0xf2, 0x01, 0x3a, 0xf8, 0x38,
- 0x4a, 0x22, 0xc9, 0x51, 0x90, 0x6e, 0xae, 0xf8, 0x69, 0xd7, 0x9f, 0x68, 0x03, 0x03, 0x81, 0x5c,
- 0xa8, 0x3e, 0x7d, 0x99, 0x12, 0x2a, 0x92, 0x5f, 0x94, 0x34, 0x63, 0x19, 0x3d, 0x87, 0xe5, 0xec,
- 0xfc, 0x84, 0x73, 0xca, 0x9c, 0x92, 0x4c, 0xf8, 0x83, 0x7c, 0xc2, 0x4d, 0xa7, 0xfc, 0x29, 0x9b,
- 0xa7, 0x09, 0xa7, 0x67, 0xc1, 0x34, 0x8f, 0xc8, 0xf5, 0x21, 0x66, 0x4c, 0x78, 0xa8, 0x12, 0x95,
- 0x89, 0xc2, 0x9d, 0xaf, 0x29, 0x49, 0x38, 0x4e, 0xfa, 0x32, 0x51, 0x76, 0x30, 0x96, 0x85, 0x3b,
- 0xd9, 0x59, 0xb9, 0x53, 0xb9, 0x91, 0x3b, 0x53, 0x36, 0xda, 0x9d, 0x29, 0x1d, 0xda, 0x82, 0xa5,
- 0xed, 0xb0, 0xf7, 0x02, 0xcb, 0x9c, 0xd4, 0x36, 0xd7, 0xf2, 0x84, 0xf2, 0xf1, 0xb7, 0x32, 0x09,
- 0xac, 0x5d, 0x12, 0xe5, 0x11, 0x28, 0x13, 0xf7, 0x31, 0xa0, 0xfc, 0x7d, 0x45, 0x5e, 0x4e, 0xf0,
- 0x59, 0x96, 0x97, 0x13, 0x7c, 0x26, 0x8a, 0xf8, 0x34, 0x8c, 0x47, 0xaa, 0xb8, 0xed, 0x40, 0x09,
- 0x5b, 0x85, 0x2f, 0x2d, 0xc1, 0x90, 0x77, 0x71, 0x11, 0x06, 0xef, 0xb5, 0x05, 0x75, 0xd3, 0x43,
- 0xf4, 0x01, 0xd8, 0xca, 0xa9, 0x49, 0x71, 0x4c, 0x14, 0xa2, 0x0e, 0x77, 0x86, 0x5a, 0x60, 0x4e,
- 0xa1, 0x51, 0x6c, 0xda, 0x81, 0xa1, 0x41, 0xdf, 0x41, 0x4d, 0x81, 0x55, 0x94, 0x8b, 0x32, 0xca,
- 0xad, 0xab, 0x83, 0xe2, 0x1b, 0x16, 0x2a, 0xc6, 0x26, 0x87, 0xfb, 0x08, 0x56, 0x67, 0x01, 0x0b,
- 0xdd, 0xf0, 0x37, 0x0b, 0x96, 0x75, 0x52, 0x75, 0x17, 0x0a, 0x33, 0x46, 0x4c, 0x33, 0x9d, 0xee,
- 0x47, 0x0f, 0x2f, 0xad, 0x07, 0x05, 0xf3, 0x67, 0xed, 0x94, 0xbf, 0x39, 0x3a, 0x77, 0x1b, 0xde,
- 0x9b, 0x0b, 0x5d, 0xc8, 0xf3, 0x8f, 0x60, 0xf9, 0x90, 0x87, 0x7c, 0xc4, 0x2e, 0xfd, 0x64, 0xbd,
- 0x5f, 0x2d, 0x58, 0xc9, 0x30, 0xfa, 0x76, 0x9f, 0x43, 0xf5, 0x14, 0x53, 0x8e, 0x5f, 0x62, 0xa6,
- 0x6f, 0xe5, 0xe4, 0x6f, 0xf5, 0xbd, 0x44, 0x04, 0x63, 0x24, 0xda, 0x82, 0x2a, 0x93, 0x3c, 0x58,
- 0xa5, 0x75, 0x6e, 0x29, 0x2b, 0x2b, 0xfd, 0xbe, 0x31, 0x1e, 0xb5, 0xa0, 0x14, 0x93, 0x41, 0x96,
- 0xed, 0xf7, 0x2f, 0xb3, 0xdb, 0x23, 0x83, 0x40, 0x02, 0xbd, 0xf3, 0x02, 0x94, 0x95, 0x0e, 0xed,
- 0x42, 0xb9, 0x1f, 0x0d, 0x30, 0xe3, 0xea, 0x56, 0xed, 0x4d, 0xf1, 0x81, 0xfc, 0x7d, 0xbe, 0x7e,
- 0xd7, 0x18, 0x54, 0x24, 0xc5, 0x89, 0x18, 0x94, 0x61, 0x94, 0x60, 0xca, 0x5a, 0x03, 0x72, 0x5f,
- 0x99, 0xf8, 0x1d, 0xf9, 0x13, 0x68, 0x06, 0xc1, 0x15, 0x25, 0xe9, 0x88, 0xeb, 0xc2, 0xbc, 0x1d,
- 0x97, 0x62, 0x10, 0x23, 0x22, 0x09, 0x87, 0x58, 0xf7, 0x35, 0x79, 0x16, 0x23, 0xa2, 0x27, 0xea,
- 0xb6, 0x2f, 0x07, 0x47, 0x35, 0xd0, 0x12, 0xda, 0x82, 0x0a, 0xe3, 0x21, 0xe5, 0xb8, 0x2f, 0x5b,
- 0xd2, 0x4d, 0x7a, 0x7b, 0x66, 0x80, 0x1e, 0x81, 0xdd, 0x23, 0xc3, 0x34, 0xc6, 0xc2, 0xba, 0x7c,
- 0x43, 0xeb, 0x89, 0x89, 0xa8, 0x1e, 0x4c, 0x29, 0xa1, 0x72, 0xaa, 0xd8, 0x81, 0x12, 0xbc, 0xff,
- 0x0a, 0x50, 0x37, 0x93, 0x95, 0x9b, 0x98, 0xbb, 0x50, 0x56, 0xa9, 0x57, 0x55, 0x77, 0xbb, 0x50,
- 0x29, 0x86, 0xb9, 0xa1, 0x72, 0xa0, 0xd2, 0x1b, 0x51, 0x39, 0x4e, 0xd5, 0x90, 0xcd, 0x44, 0xe1,
- 0x30, 0x27, 0x3c, 0x8c, 0x65, 0xa8, 0x8a, 0x81, 0x12, 0xc4, 0x94, 0x1d, 0xaf, 0x2a, 0x8b, 0x4d,
- 0xd9, 0xb1, 0x99, 0x99, 0x86, 0xca, 0x1b, 0xa5, 0xa1, 0xba, 0x70, 0x1a, 0xbc, 0x3f, 0x2c, 0xb0,
- 0xc7, 0x55, 0x6e, 0x44, 0xd7, 0x7a, 0xe3, 0xe8, 0x4e, 0x45, 0xa6, 0x70, 0xbb, 0xc8, 0xdc, 0x81,
- 0x32, 0xe3, 0x14, 0x87, 0x43, 0x99, 0xa3, 0x62, 0xa0, 0x25, 0xd1, 0x4f, 0x86, 0x6c, 0x20, 0x33,
- 0x54, 0x0f, 0xc4, 0xd1, 0xf3, 0xa0, 0xde, 0x3e, 0xe3, 0x98, 0xed, 0x63, 0x26, 0x96, 0x0b, 0x91,
- 0xdb, 0x7e, 0xc8, 0x43, 0x79, 0x8f, 0x7a, 0x20, 0xcf, 0xde, 0x3d, 0x40, 0x7b, 0x11, 0xe3, 0xcf,
- 0x09, 0x3d, 0xc1, 0x94, 0xcd, 0xdb, 0x03, 0x8b, 0xc6, 0x1e, 0xb8, 0x0f, 0xef, 0x4c, 0xa1, 0x75,
- 0x97, 0xfa, 0x62, 0x66, 0x13, 0x9c, 0xd3, 0x6d, 0x94, 0xc9, 0xcc, 0x2a, 0xf8, 0xa7, 0x05, 0x75,
- 0xf3, 0x41, 0xae, 0xb2, 0xdb, 0x50, 0xde, 0x0b, 0xbb, 0x38, 0xce, 0xda, 0xd8, 0xdd, 0xab, 0x89,
- 0x7d, 0x05, 0x56, 0x7d, 0x5c, 0x5b, 0xa2, 0x0d, 0xb0, 0xd3, 0x38, 0xe4, 0xc7, 0x84, 0x0e, 0xb3,
- 0xae, 0x56, 0x17, 0x7b, 0xd0, 0x81, 0x56, 0xea, 0x31, 0x3e, 0x01, 0xb9, 0x5f, 0x41, 0xcd, 0x20,
- 0x5a, 0xa4, 0xcb, 0x6f, 0xfe, 0x5b, 0x84, 0xca, 0xb6, 0xfa, 0x1b, 0x80, 0x9e, 0x81, 0x3d, 0x5e,
- 0x9a, 0x91, 0x97, 0xf7, 0x7c, 0x76, 0xfb, 0x76, 0x3f, 0xbe, 0x12, 0xa3, 0x63, 0xfd, 0x0d, 0x2c,
- 0xc9, 0x35, 0x1e, 0xcd, 0x09, 0xb2, 0xb9, 0xdf, 0xbb, 0x57, 0xaf, 0xe3, 0x1b, 0x96, 0x60, 0x92,
- 0xf3, 0x70, 0x1e, 0x93, 0xb9, 0x38, 0xb9, 0xeb, 0xd7, 0x0c, 0x52, 0xb4, 0x0f, 0x65, 0xdd, 0x9a,
- 0xe6, 0x41, 0xcd, 0xa9, 0xe7, 0x36, 0x2e, 0x07, 0x28, 0xb2, 0x0d, 0x0b, 0xed, 0x8f, 0xb7, 0xc2,
- 0x79, 0xae, 0x99, 0x25, 0xed, 0x5e, 0xf3, 0xbc, 0x69, 0x6d, 0x58, 0xe8, 0x07, 0xa8, 0x19, 0x45,
- 0x8b, 0x3e, 0xc9, 0x9b, 0xe4, 0xbf, 0x00, 0xf7, 0xd3, 0x6b, 0x50, 0xca, 0xd9, 0x76, 0xfd, 0xd5,
- 0xc5, 0x9a, 0xf5, 0xd7, 0xc5, 0x9a, 0xf5, 0xcf, 0xc5, 0x9a, 0xd5, 0x2d, 0xcb, 0x6f, 0xf8, 0xb3,
- 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x86, 0xd4, 0x0f, 0xa1, 0x0a, 0x0e, 0x00, 0x00,
+ // 1176 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x6f, 0x23, 0x45,
+ 0x13, 0x7e, 0xc7, 0x76, 0xfc, 0x51, 0x76, 0xa2, 0xbc, 0x0d, 0xac, 0x46, 0x03, 0x24, 0x66, 0x00,
+ 0xc9, 0x5a, 0xed, 0xce, 0x64, 0x03, 0x2b, 0xa1, 0x08, 0xad, 0x76, 0x1d, 0x2f, 0x22, 0x51, 0x22,
+ 0x96, 0xce, 0x86, 0x95, 0xb8, 0x8d, 0xed, 0x8e, 0x77, 0x14, 0x7b, 0x7a, 0xe8, 0xee, 0x09, 0x6b,
+ 0x7e, 0x05, 0x07, 0xfe, 0x09, 0x07, 0xce, 0x1c, 0x90, 0xf6, 0xc8, 0x99, 0x43, 0x16, 0xe5, 0x0e,
+ 0xbf, 0x01, 0xf5, 0xc7, 0xd8, 0xed, 0xd8, 0xf9, 0xdc, 0x53, 0xba, 0x2a, 0x4f, 0x3d, 0x53, 0x5d,
+ 0x4f, 0xb9, 0xab, 0x60, 0xb9, 0x47, 0x13, 0xc1, 0xe8, 0x30, 0x48, 0x19, 0x15, 0x14, 0xad, 0x8e,
+ 0x68, 0x77, 0x1c, 0x74, 0xb3, 0x78, 0xd8, 0x3f, 0x8e, 0x45, 0x70, 0xf2, 0xc0, 0xbb, 0x3f, 0x88,
+ 0xc5, 0xcb, 0xac, 0x1b, 0xf4, 0xe8, 0x28, 0x1c, 0xd0, 0x01, 0x0d, 0x15, 0xb0, 0x9b, 0x1d, 0x29,
+ 0x4b, 0x19, 0xea, 0xa4, 0x09, 0xbc, 0xf5, 0x01, 0xa5, 0x83, 0x21, 0x99, 0xa2, 0x44, 0x3c, 0x22,
+ 0x5c, 0x44, 0xa3, 0xd4, 0x00, 0xee, 0x59, 0x7c, 0xf2, 0x63, 0x61, 0xfe, 0xb1, 0x90, 0xd3, 0xe1,
+ 0x09, 0x61, 0x61, 0xda, 0x0d, 0x69, 0xca, 0x0d, 0x3a, 0xbc, 0x10, 0x1d, 0xa5, 0x71, 0x28, 0xc6,
+ 0x29, 0xe1, 0xe1, 0x8f, 0x94, 0x1d, 0x13, 0xa6, 0x03, 0xfc, 0x15, 0x68, 0x3c, 0x63, 0x59, 0x42,
+ 0x30, 0xf9, 0x21, 0x23, 0x5c, 0xf8, 0x77, 0x61, 0xb5, 0x13, 0xf3, 0xe3, 0x43, 0x1e, 0x0d, 0x72,
+ 0x1f, 0xba, 0x03, 0xe5, 0xa3, 0x78, 0x28, 0x08, 0x73, 0x9d, 0xa6, 0xd3, 0xaa, 0x61, 0x63, 0xf9,
+ 0xbb, 0xf0, 0x7f, 0x0b, 0xcb, 0x53, 0x9a, 0x70, 0x82, 0x1e, 0x42, 0x99, 0x91, 0x1e, 0x65, 0x7d,
+ 0xd7, 0x69, 0x16, 0x5b, 0xf5, 0xcd, 0x0f, 0x83, 0xf3, 0x25, 0x0a, 0x4c, 0x80, 0x04, 0x61, 0x03,
+ 0xf6, 0x7f, 0x2f, 0x40, 0xdd, 0xf2, 0xa3, 0x15, 0x28, 0xec, 0x74, 0xcc, 0xf7, 0x0a, 0x3b, 0x1d,
+ 0xe4, 0x42, 0x65, 0x3f, 0x13, 0x51, 0x77, 0x48, 0xdc, 0x42, 0xd3, 0x69, 0x55, 0x71, 0x6e, 0xa2,
+ 0x77, 0x61, 0x69, 0x27, 0x39, 0xe4, 0xc4, 0x2d, 0x2a, 0xbf, 0x36, 0x10, 0x82, 0xd2, 0x41, 0xfc,
+ 0x13, 0x71, 0x4b, 0x4d, 0xa7, 0x55, 0xc4, 0xea, 0x2c, 0xef, 0xf1, 0x2c, 0x62, 0x24, 0x11, 0xee,
+ 0x92, 0xbe, 0x87, 0xb6, 0x50, 0x1b, 0x6a, 0xdb, 0x8c, 0x44, 0x82, 0xf4, 0x9f, 0x08, 0xb7, 0xdc,
+ 0x74, 0x5a, 0xf5, 0x4d, 0x2f, 0xd0, 0xba, 0x04, 0xb9, 0x2e, 0xc1, 0xf3, 0x5c, 0x97, 0x76, 0xf5,
+ 0xf5, 0xe9, 0xfa, 0xff, 0x7e, 0x7e, 0xb3, 0xee, 0xe0, 0x69, 0x18, 0x7a, 0x0c, 0xb0, 0x17, 0x71,
+ 0x71, 0xc8, 0x15, 0x49, 0xe5, 0x4a, 0x92, 0x92, 0x22, 0xb0, 0x62, 0xd0, 0x1a, 0x80, 0x2a, 0xc0,
+ 0x36, 0xcd, 0x12, 0xe1, 0x56, 0x55, 0xde, 0x96, 0x07, 0x35, 0xa1, 0xde, 0x21, 0xbc, 0xc7, 0xe2,
+ 0x54, 0xc4, 0x34, 0x71, 0x6b, 0xea, 0x0a, 0xb6, 0xcb, 0xff, 0xa5, 0x04, 0x8d, 0x03, 0xd9, 0x14,
+ 0xb9, 0x70, 0xab, 0x50, 0xc4, 0xe4, 0xc8, 0x54, 0x51, 0x1e, 0x51, 0x00, 0xd0, 0x21, 0x47, 0x71,
+ 0x12, 0x2b, 0x8e, 0x82, 0x4a, 0x73, 0x25, 0x48, 0xbb, 0xc1, 0xd4, 0x8b, 0x2d, 0x04, 0xf2, 0xa0,
+ 0xfa, 0xf4, 0x55, 0x4a, 0x99, 0x14, 0xbf, 0xa8, 0x68, 0x26, 0x36, 0x7a, 0x01, 0xcb, 0xf9, 0xf9,
+ 0x89, 0x10, 0x8c, 0xbb, 0x25, 0x25, 0xf8, 0x83, 0x79, 0xc1, 0xed, 0xa4, 0x82, 0x99, 0x98, 0xa7,
+ 0x89, 0x60, 0x63, 0x3c, 0xcb, 0x23, 0xb5, 0x3e, 0x20, 0x9c, 0xcb, 0x0c, 0xb5, 0x50, 0xb9, 0x29,
+ 0xd3, 0xf9, 0x8a, 0xd1, 0x44, 0x90, 0xa4, 0xaf, 0x84, 0xaa, 0xe1, 0x89, 0x2d, 0xd3, 0xc9, 0xcf,
+ 0x3a, 0x9d, 0xca, 0xb5, 0xd2, 0x99, 0x89, 0x31, 0xe9, 0xcc, 0xf8, 0xd0, 0x16, 0x2c, 0x6d, 0x47,
+ 0xbd, 0x97, 0x44, 0x69, 0x52, 0xdf, 0x5c, 0x9b, 0x27, 0x54, 0xff, 0xfe, 0x46, 0x89, 0xc0, 0xdb,
+ 0x25, 0xd9, 0x1e, 0x58, 0x87, 0x78, 0x8f, 0x01, 0xcd, 0xdf, 0x57, 0xea, 0x72, 0x4c, 0xc6, 0xb9,
+ 0x2e, 0xc7, 0x64, 0x2c, 0x9b, 0xf8, 0x24, 0x1a, 0x66, 0xba, 0xb9, 0x6b, 0x58, 0x1b, 0x5b, 0x85,
+ 0x2f, 0x1c, 0xc9, 0x30, 0x9f, 0xe2, 0x4d, 0x18, 0xfc, 0x37, 0x0e, 0x34, 0xec, 0x0c, 0xd1, 0x07,
+ 0x50, 0xd3, 0x49, 0x4d, 0x9b, 0x63, 0xea, 0x90, 0x7d, 0xb8, 0x33, 0x32, 0x06, 0x77, 0x0b, 0xcd,
+ 0x62, 0xab, 0x86, 0x2d, 0x0f, 0xfa, 0x16, 0xea, 0x1a, 0xac, 0xab, 0x5c, 0x54, 0x55, 0x0e, 0x2f,
+ 0x2f, 0x4a, 0x60, 0x45, 0xe8, 0x1a, 0xdb, 0x1c, 0xde, 0x23, 0x58, 0x3d, 0x0f, 0xb8, 0xd1, 0x0d,
+ 0x7f, 0x73, 0x60, 0xd9, 0x88, 0x6a, 0x5e, 0xa1, 0x28, 0x67, 0x24, 0x2c, 0xf7, 0x99, 0xf7, 0xe8,
+ 0xe1, 0x85, 0xfd, 0xa0, 0x61, 0xc1, 0xf9, 0x38, 0x9d, 0xef, 0x1c, 0x9d, 0xb7, 0x0d, 0xef, 0x2d,
+ 0x84, 0xde, 0x28, 0xf3, 0x8f, 0x60, 0xf9, 0x40, 0x44, 0x22, 0xe3, 0x17, 0xfe, 0x64, 0xfd, 0x5f,
+ 0x1d, 0x58, 0xc9, 0x31, 0xe6, 0x76, 0x9f, 0x43, 0xf5, 0x84, 0x30, 0x41, 0x5e, 0x11, 0x6e, 0x6e,
+ 0xe5, 0xce, 0xdf, 0xea, 0x3b, 0x85, 0xc0, 0x13, 0x24, 0xda, 0x82, 0x2a, 0x57, 0x3c, 0x44, 0xcb,
+ 0xba, 0xb0, 0x95, 0x75, 0x94, 0xf9, 0xde, 0x04, 0x8f, 0x42, 0x28, 0x0d, 0xe9, 0x20, 0x57, 0xfb,
+ 0xfd, 0x8b, 0xe2, 0xf6, 0xe8, 0x00, 0x2b, 0xa0, 0x7f, 0x5a, 0x80, 0xb2, 0xf6, 0xa1, 0x5d, 0x28,
+ 0xf7, 0xe3, 0x01, 0xe1, 0x42, 0xdf, 0xaa, 0xbd, 0x29, 0x7f, 0x20, 0x7f, 0x9d, 0xae, 0xdf, 0xb5,
+ 0x66, 0x15, 0x4d, 0x49, 0x22, 0x27, 0x6b, 0x14, 0x27, 0x84, 0xf1, 0x70, 0x40, 0xef, 0xeb, 0x90,
+ 0xa0, 0xa3, 0xfe, 0x60, 0xc3, 0x20, 0xb9, 0xe2, 0x24, 0xcd, 0x84, 0x69, 0xcc, 0xdb, 0x71, 0x69,
+ 0x06, 0x39, 0x22, 0x92, 0x68, 0x44, 0xcc, 0xbb, 0xa6, 0xce, 0x72, 0x44, 0xf4, 0x64, 0xdf, 0xf6,
+ 0xd5, 0xe0, 0xa8, 0x62, 0x63, 0xa1, 0x2d, 0xa8, 0x70, 0x11, 0x31, 0x41, 0xfa, 0xea, 0x49, 0xba,
+ 0xce, 0xdb, 0x9e, 0x07, 0xa0, 0x47, 0x50, 0xeb, 0xd1, 0x51, 0x3a, 0x24, 0x32, 0xba, 0x7c, 0xcd,
+ 0xe8, 0x69, 0x88, 0xec, 0x1e, 0xc2, 0x18, 0x65, 0x6a, 0xaa, 0xd4, 0xb0, 0x36, 0xfc, 0x7f, 0x0b,
+ 0xd0, 0xb0, 0xc5, 0x9a, 0x9b, 0x98, 0xbb, 0x50, 0xd6, 0xd2, 0xeb, 0xae, 0xbb, 0x5d, 0xa9, 0x34,
+ 0xc3, 0xc2, 0x52, 0xb9, 0x50, 0xe9, 0x65, 0x4c, 0x8d, 0x53, 0x3d, 0x64, 0x73, 0x53, 0x26, 0x2c,
+ 0xa8, 0x88, 0x86, 0xaa, 0x54, 0x45, 0xac, 0x0d, 0x39, 0x65, 0x27, 0xbb, 0xcd, 0xcd, 0xa6, 0xec,
+ 0x24, 0xcc, 0x96, 0xa1, 0xf2, 0x56, 0x32, 0x54, 0x6f, 0x2c, 0x83, 0xff, 0x87, 0x03, 0xb5, 0x49,
+ 0x97, 0x5b, 0xd5, 0x75, 0xde, 0xba, 0xba, 0x33, 0x95, 0x29, 0xdc, 0xae, 0x32, 0x77, 0xa0, 0xcc,
+ 0x05, 0x23, 0xd1, 0x48, 0x69, 0x54, 0xc4, 0xc6, 0x92, 0xef, 0xc9, 0x88, 0x0f, 0x94, 0x42, 0x0d,
+ 0x2c, 0x8f, 0xbe, 0x0f, 0x8d, 0xf6, 0x58, 0x10, 0xbe, 0x4f, 0xb8, 0x5c, 0x2e, 0xa4, 0xb6, 0xfd,
+ 0x48, 0x44, 0xea, 0x1e, 0x0d, 0xac, 0xce, 0xfe, 0x3d, 0x40, 0x7b, 0x31, 0x17, 0x2f, 0xd4, 0xa6,
+ 0xc8, 0x17, 0xed, 0x81, 0x45, 0x6b, 0x0f, 0x3c, 0x80, 0x77, 0x66, 0xd0, 0xe6, 0x95, 0xfa, 0xf2,
+ 0xdc, 0x26, 0xf8, 0xc9, 0xfc, 0xab, 0xa1, 0x16, 0xd2, 0x40, 0x07, 0xce, 0x2e, 0x84, 0x9b, 0xff,
+ 0x14, 0xa1, 0xb2, 0xad, 0x77, 0x6d, 0xf4, 0x1c, 0x6a, 0x93, 0x45, 0x13, 0xf9, 0xf3, 0x34, 0xe7,
+ 0x37, 0x56, 0xef, 0xe3, 0x4b, 0x31, 0x26, 0xbf, 0xaf, 0x61, 0x49, 0xad, 0xbe, 0x68, 0xc1, 0x33,
+ 0x68, 0xef, 0xc4, 0xde, 0xe5, 0x2b, 0xec, 0x86, 0x23, 0x99, 0xd4, 0x0c, 0x59, 0xc4, 0x64, 0x2f,
+ 0x1b, 0xde, 0xfa, 0x15, 0xc3, 0x07, 0xed, 0x43, 0xd9, 0xfc, 0x9c, 0x17, 0x41, 0xed, 0x49, 0xe1,
+ 0x35, 0x2f, 0x06, 0x68, 0xb2, 0x0d, 0x07, 0xed, 0x4f, 0x36, 0xa9, 0x45, 0xa9, 0xd9, 0x6d, 0xe0,
+ 0x5d, 0xf1, 0xff, 0x96, 0xb3, 0xe1, 0xa0, 0xef, 0xa1, 0x6e, 0x09, 0x8d, 0x16, 0x08, 0x3a, 0xdf,
+ 0x35, 0xde, 0xa7, 0x57, 0xa0, 0x74, 0xb2, 0xed, 0xc6, 0xeb, 0xb3, 0x35, 0xe7, 0xcf, 0xb3, 0x35,
+ 0xe7, 0xef, 0xb3, 0x35, 0xa7, 0x5b, 0x56, 0x7d, 0xff, 0xd9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff,
+ 0xe1, 0xef, 0xcc, 0xf5, 0x6f, 0x0d, 0x00, 0x00,
}
diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto
index 9768920..8d61c7e 100644
--- a/vendor/github.com/moby/buildkit/api/services/control/control.proto
+++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto
@@ -2,9 +2,13 @@
package moby.buildkit.v1;
+// The control API is currently considered experimental and may break in a backwards
+// incompatible way.
+
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
+import "github.com/moby/buildkit/api/types/worker.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
@@ -17,6 +21,7 @@
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Session(stream BytesMessage) returns (stream BytesMessage);
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
+ // rpc Info(InfoRequest) returns (InfoResponse);
}
message PruneRequest {
@@ -112,11 +117,5 @@
}
message ListWorkersResponse {
- repeated WorkerRecord record = 1;
-}
-
-message WorkerRecord {
- string ID = 1;
- map<string, string> Labels = 2;
- repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
-}
+ repeated moby.buildkit.v1.types.WorkerRecord record = 1;
+}
\ No newline at end of file
diff --git a/vendor/github.com/moby/buildkit/api/types/generate.go b/vendor/github.com/moby/buildkit/api/types/generate.go
new file mode 100644
index 0000000..84007df
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/api/types/generate.go
@@ -0,0 +1,3 @@
+package moby_buildkit_v1_types
+
+//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto
diff --git a/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/vendor/github.com/moby/buildkit/api/types/worker.pb.go
new file mode 100644
index 0000000..46c8538
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/api/types/worker.pb.go
@@ -0,0 +1,523 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: worker.proto
+
+/*
+ Package moby_buildkit_v1_types is a generated protocol buffer package.
+
+ It is generated from these files:
+ worker.proto
+
+ It has these top-level messages:
+ WorkerRecord
+*/
+package moby_buildkit_v1_types
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import pb "github.com/moby/buildkit/solver/pb"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type WorkerRecord struct {
+ ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
+}
+
+func (m *WorkerRecord) Reset() { *m = WorkerRecord{} }
+func (m *WorkerRecord) String() string { return proto.CompactTextString(m) }
+func (*WorkerRecord) ProtoMessage() {}
+func (*WorkerRecord) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{0} }
+
+func (m *WorkerRecord) GetID() string {
+ if m != nil {
+ return m.ID
+ }
+ return ""
+}
+
+func (m *WorkerRecord) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func (m *WorkerRecord) GetPlatforms() []pb.Platform {
+ if m != nil {
+ return m.Platforms
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord")
+}
+func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintWorker(dAtA, i, uint64(len(m.ID)))
+ i += copy(dAtA[i:], m.ID)
+ }
+ if len(m.Labels) > 0 {
+ for k, _ := range m.Labels {
+ dAtA[i] = 0x12
+ i++
+ v := m.Labels[k]
+ mapSize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v)))
+ i = encodeVarintWorker(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintWorker(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintWorker(dAtA, i, uint64(len(v)))
+ i += copy(dAtA[i:], v)
+ }
+ }
+ if len(m.Platforms) > 0 {
+ for _, msg := range m.Platforms {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintWorker(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ return i, nil
+}
+
+func encodeVarintWorker(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *WorkerRecord) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovWorker(uint64(l))
+ }
+ if len(m.Labels) > 0 {
+ for k, v := range m.Labels {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v)))
+ n += mapEntrySize + 1 + sovWorker(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Platforms) > 0 {
+ for _, e := range m.Platforms {
+ l = e.Size()
+ n += 1 + l + sovWorker(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovWorker(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozWorker(x uint64) (n int) {
+ return sovWorker(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Labels == nil {
+ m.Labels = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipWorker(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Labels[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Platforms = append(m.Platforms, pb.Platform{})
+ if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipWorker(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipWorker(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthWorker
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipWorker(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthWorker = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowWorker = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("worker.proto", fileDescriptorWorker) }
+
+var fileDescriptorWorker = []byte{
+ // 273 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x41, 0x4b, 0xf3, 0x40,
+ 0x10, 0x86, 0xbf, 0x4d, 0x3e, 0x0b, 0xdd, 0x06, 0x91, 0x45, 0x24, 0xe4, 0x10, 0x8b, 0xa7, 0x1e,
+ 0x74, 0xb6, 0xea, 0x45, 0x3d, 0x96, 0x0a, 0x16, 0x3c, 0x48, 0x2e, 0x9e, 0xb3, 0xed, 0x36, 0x86,
+ 0x24, 0xce, 0xb2, 0xd9, 0x44, 0xf2, 0x0f, 0x7b, 0xf4, 0xe2, 0x55, 0x24, 0xbf, 0x44, 0xba, 0x89,
+ 0x98, 0x83, 0xb7, 0x79, 0x87, 0x67, 0x1e, 0xde, 0xa1, 0xde, 0x1b, 0xea, 0x4c, 0x6a, 0x50, 0x1a,
+ 0x0d, 0xb2, 0x93, 0x02, 0x45, 0x03, 0xa2, 0x4a, 0xf3, 0x4d, 0x96, 0x1a, 0xa8, 0x2f, 0xc1, 0x34,
+ 0x4a, 0x96, 0xc1, 0x45, 0x92, 0x9a, 0x97, 0x4a, 0xc0, 0x1a, 0x0b, 0x9e, 0x60, 0x82, 0xdc, 0xe2,
+ 0xa2, 0xda, 0xda, 0x64, 0x83, 0x9d, 0x3a, 0x4d, 0x70, 0x3e, 0xc0, 0xf7, 0x46, 0xfe, 0x63, 0xe4,
+ 0x25, 0xe6, 0xb5, 0xd4, 0x5c, 0x09, 0x8e, 0xaa, 0xec, 0xe8, 0xb3, 0x0f, 0x42, 0xbd, 0x67, 0xdb,
+ 0x22, 0x92, 0x6b, 0xd4, 0x1b, 0x76, 0x48, 0x9d, 0xd5, 0xd2, 0x27, 0x53, 0x32, 0x1b, 0x47, 0xce,
+ 0x6a, 0xc9, 0x1e, 0xe8, 0xe8, 0x31, 0x16, 0x32, 0x2f, 0x7d, 0x67, 0xea, 0xce, 0x26, 0x57, 0x73,
+ 0xf8, 0xbb, 0x26, 0x0c, 0x2d, 0xd0, 0x9d, 0xdc, 0xbf, 0x1a, 0xdd, 0x44, 0xfd, 0x3d, 0x9b, 0xd3,
+ 0xb1, 0xca, 0x63, 0xb3, 0x45, 0x5d, 0x94, 0xbe, 0x6b, 0x65, 0x1e, 0x28, 0x01, 0x4f, 0xfd, 0x72,
+ 0xf1, 0x7f, 0xf7, 0x79, 0xfa, 0x2f, 0xfa, 0x85, 0x82, 0x5b, 0x3a, 0x19, 0x88, 0xd8, 0x11, 0x75,
+ 0x33, 0xd9, 0xf4, 0xdd, 0xf6, 0x23, 0x3b, 0xa6, 0x07, 0x75, 0x9c, 0x57, 0xd2, 0x77, 0xec, 0xae,
+ 0x0b, 0x77, 0xce, 0x0d, 0x59, 0x78, 0xbb, 0x36, 0x24, 0xef, 0x6d, 0x48, 0xbe, 0xda, 0x90, 0x88,
+ 0x91, 0x7d, 0xf6, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x5c, 0x8f, 0x26, 0x71, 0x01, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/moby/buildkit/api/types/worker.proto b/vendor/github.com/moby/buildkit/api/types/worker.proto
new file mode 100644
index 0000000..b1376b7
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/api/types/worker.proto
@@ -0,0 +1,16 @@
+syntax = "proto3";
+
+package moby.buildkit.v1.types;
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+import "github.com/moby/buildkit/solver/pb/ops.proto";
+
+option (gogoproto.sizer_all) = true;
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+message WorkerRecord {
+ string ID = 1;
+ map<string, string> Labels = 2;
+ repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go
index a10cb9e..cf096e5 100644
--- a/vendor/github.com/moby/buildkit/cache/manager.go
+++ b/vendor/github.com/moby/buildkit/cache/manager.go
@@ -225,7 +225,7 @@
if err != nil {
return nil, err
}
- if err := parent.Finalize(ctx); err != nil {
+ if err := parent.Finalize(ctx, true); err != nil {
return nil, err
}
parentID = parent.ID()
diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go
index 060d91e..af92b80 100644
--- a/vendor/github.com/moby/buildkit/cache/refs.go
+++ b/vendor/github.com/moby/buildkit/cache/refs.go
@@ -25,7 +25,7 @@
type ImmutableRef interface {
Ref
Parent() ImmutableRef
- Finalize(ctx context.Context) error // Make sure reference is flushed to driver
+ Finalize(ctx context.Context, commit bool) error // Make sure reference is flushed to driver
Clone() ImmutableRef
}
@@ -148,7 +148,7 @@
return setReadonly(m), nil
}
- if err := cr.finalize(ctx); err != nil {
+ if err := cr.finalize(ctx, true); err != nil {
return nil, err
}
if cr.viewMount == nil { // TODO: handle this better
@@ -233,22 +233,29 @@
return nil
}
-func (sr *immutableRef) Finalize(ctx context.Context) error {
+func (sr *immutableRef) Finalize(ctx context.Context, b bool) error {
sr.mu.Lock()
defer sr.mu.Unlock()
- return sr.finalize(ctx)
+ return sr.finalize(ctx, b)
}
func (cr *cacheRecord) Metadata() *metadata.StorageItem {
return cr.md
}
-func (cr *cacheRecord) finalize(ctx context.Context) error {
+func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error {
mutable := cr.equalMutable
if mutable == nil {
return nil
}
+ if !commit {
+ if HasCachePolicyRetain(mutable) {
+ CachePolicyRetain(mutable)
+ return mutable.Metadata().Commit()
+ }
+ return nil
+ }
err := cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID())
if err != nil {
return errors.Wrapf(err, "failed to commit %s", mutable.ID())
diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go
index 0536ba2..e40b8de 100644
--- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go
+++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go
@@ -10,35 +10,54 @@
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
- "github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
- "github.com/moby/buildkit/util/push"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
-type ExporterOpt struct {
- SessionManager *session.Manager
+type ResolveCacheExporterFunc func(ctx context.Context, typ, target string) (Exporter, error)
+
+func oneOffProgress(ctx context.Context, id string) func(err error) error {
+ pw, _, _ := progress.FromContext(ctx)
+ now := time.Now()
+ st := progress.Status{
+ Started: &now,
+ }
+ pw.Write(id, st)
+ return func(err error) error {
+ now := time.Now()
+ st.Completed = &now
+ pw.Write(id, st)
+ pw.Close()
+ return err
+ }
}
-func NewCacheExporter(opt ExporterOpt) *CacheExporter {
- return &CacheExporter{opt: opt}
+type Exporter interface {
+ solver.CacheExporterTarget
+ Finalize(ctx context.Context) error
}
-type CacheExporter struct {
- opt ExporterOpt
+type contentCacheExporter struct {
+ solver.CacheExporterTarget
+ chains *v1.CacheChains
+ ingester content.Ingester
}
-func (ce *CacheExporter) ExporterForTarget(target string) *RegistryCacheExporter {
+func NewExporter(ingester content.Ingester) Exporter {
cc := v1.NewCacheChains()
- return &RegistryCacheExporter{target: target, CacheExporterTarget: cc, chains: cc, exporter: ce}
+ return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester}
}
-func (ce *CacheExporter) Finalize(ctx context.Context, cc *v1.CacheChains, target string) error {
+func (ce *contentCacheExporter) Finalize(ctx context.Context) error {
+ return export(ctx, ce.ingester, ce.chains)
+}
+
+func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) error {
config, descs, err := cc.Marshal()
if err != nil {
return err
@@ -58,19 +77,16 @@
mfst.SchemaVersion = 2
mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
- allBlobs := map[digest.Digest]struct{}{}
- mp := contentutil.NewMultiProvider(nil)
for _, l := range config.Layers {
- if _, ok := allBlobs[l.Blob]; ok {
- continue
- }
dgstPair, ok := descs[l.Blob]
if !ok {
return errors.Errorf("missing blob %s", l.Blob)
}
- allBlobs[l.Blob] = struct{}{}
- mp.Add(l.Blob, dgstPair.Provider)
-
+ layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+ if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil {
+ return layerDone(errors.Wrap(err, "error writing layer blob"))
+ }
+ layerDone(nil)
mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
}
@@ -85,13 +101,11 @@
MediaType: v1.CacheConfigMediaTypeV0,
}
configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
- buf := contentutil.NewBuffer()
- if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
+ if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return configDone(errors.Wrap(err, "error writing config blob"))
}
configDone(nil)
- mp.Add(dgst, buf)
mfst.Manifests = append(mfst.Manifests, desc)
dt, err = json.Marshal(mfst)
@@ -100,44 +114,15 @@
}
dgst = digest.FromBytes(dt)
- buf = contentutil.NewBuffer()
desc = ocispec.Descriptor{
- Digest: dgst,
- Size: int64(len(dt)),
+ Digest: dgst,
+ Size: int64(len(dt)),
+ MediaType: mfst.MediaType,
}
mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
- if err := content.WriteBlob(ctx, buf, dgst.String(), bytes.NewReader(dt), desc); err != nil {
+ if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return mfstDone(errors.Wrap(err, "error writing manifest blob"))
}
mfstDone(nil)
- mp.Add(dgst, buf)
-
- return push.Push(ctx, ce.opt.SessionManager, mp, dgst, target, false)
-}
-
-type RegistryCacheExporter struct {
- solver.CacheExporterTarget
- chains *v1.CacheChains
- target string
- exporter *CacheExporter
-}
-
-func (ce *RegistryCacheExporter) Finalize(ctx context.Context) error {
- return ce.exporter.Finalize(ctx, ce.chains, ce.target)
-}
-
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.FromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
- }
+ return nil
}
diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go
index fa17091..a76762e 100644
--- a/vendor/github.com/moby/buildkit/cache/remotecache/import.go
+++ b/vendor/github.com/moby/buildkit/cache/remotecache/import.go
@@ -3,77 +3,34 @@
import (
"context"
"encoding/json"
- "net/http"
- "time"
+ "io"
"github.com/containerd/containerd/content"
- "github.com/containerd/containerd/remotes"
- "github.com/containerd/containerd/remotes/docker"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
- "github.com/moby/buildkit/session"
- "github.com/moby/buildkit/session/auth"
"github.com/moby/buildkit/solver"
- "github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/worker"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
-type ImportOpt struct {
- SessionManager *session.Manager
- Worker worker.Worker // TODO: remove. This sets the worker where the cache is imported to. Should be passed on load instead.
+// ResolveCacheImporterFunc returns importer and descriptor.
+// Currently typ needs to be an empty string.
+type ResolveCacheImporterFunc func(ctx context.Context, typ, ref string) (Importer, ocispec.Descriptor, error)
+
+type Importer interface {
+ Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error)
}
-func NewCacheImporter(opt ImportOpt) *CacheImporter {
- return &CacheImporter{opt: opt}
+func NewImporter(provider content.Provider) Importer {
+ return &contentCacheImporter{provider: provider}
}
-type CacheImporter struct {
- opt ImportOpt
+type contentCacheImporter struct {
+ provider content.Provider
}
-func (ci *CacheImporter) getCredentialsFromSession(ctx context.Context) func(string) (string, string, error) {
- id := session.FromContext(ctx)
- if id == "" {
- return nil
- }
-
- return func(host string) (string, string, error) {
- timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- caller, err := ci.opt.SessionManager.Get(timeoutCtx, id)
- if err != nil {
- return "", "", err
- }
-
- return auth.CredentialsFunc(context.TODO(), caller)(host)
- }
-}
-
-func (ci *CacheImporter) Resolve(ctx context.Context, ref string) (solver.CacheManager, error) {
- resolver := docker.NewResolver(docker.ResolverOptions{
- Client: http.DefaultClient,
- Credentials: ci.getCredentialsFromSession(ctx),
- })
-
- ref, desc, err := resolver.Resolve(ctx, ref)
- if err != nil {
- return nil, err
- }
-
- fetcher, err := resolver.Fetcher(ctx, ref)
- if err != nil {
- return nil, err
- }
-
- b := contentutil.NewBuffer()
-
- if _, err := remotes.FetchHandler(b, fetcher)(ctx, desc); err != nil {
- return nil, err
- }
-
- dt, err := content.ReadBlob(ctx, b, desc)
+func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
+ dt, err := readBlob(ctx, ci.provider, desc)
if err != nil {
return nil, err
}
@@ -94,19 +51,15 @@
}
allLayers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m,
- Provider: contentutil.FromFetcher(fetcher, m),
+ Provider: ci.provider,
}
}
if configDesc.Digest == "" {
- return nil, errors.Errorf("invalid build cache from %s", ref)
+ return nil, errors.Errorf("invalid build cache from %+v", desc)
}
- if _, err := remotes.FetchHandler(b, fetcher)(ctx, configDesc); err != nil {
- return nil, err
- }
-
- dt, err = content.ReadBlob(ctx, b, configDesc)
+ dt, err = readBlob(ctx, ci.provider, configDesc)
if err != nil {
return nil, err
}
@@ -116,9 +69,30 @@
return nil, err
}
- keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, ci.opt.Worker)
+ keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil {
return nil, err
}
- return solver.NewCacheManager(ref, keysStorage, resultStorage), nil
+ return solver.NewCacheManager(id, keysStorage, resultStorage), nil
+}
+
+func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) {
+ maxBlobSize := int64(1 << 20)
+ if desc.Size > maxBlobSize {
+ return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize)
+ }
+ dt, err := content.ReadBlob(ctx, provider, desc)
+ if err != nil {
+ // NOTE: even if err == EOF, we might have got expected dt here.
+ // For instance, http.Response.Body is known to return non-zero bytes with EOF.
+ if err == io.EOF {
+ if dtDigest := desc.Digest.Algorithm().FromBytes(dt); dtDigest != desc.Digest {
+ err = errors.Wrapf(err, "got EOF, expected %s (%d bytes), got %s (%d bytes)",
+ desc.Digest, desc.Size, dtDigest, len(dt))
+ } else {
+ err = nil
+ }
+ }
+ }
+ return dt, err
}
diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
new file mode 100644
index 0000000..fa9dc1a
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
@@ -0,0 +1,73 @@
+package registry
+
+import (
+ "context"
+ "time"
+
+ "github.com/containerd/containerd/remotes"
+ "github.com/containerd/containerd/remotes/docker"
+ "github.com/moby/buildkit/cache/remotecache"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/session/auth"
+ "github.com/moby/buildkit/util/contentutil"
+ "github.com/moby/buildkit/util/tracing"
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
+ return func(ctx context.Context, typ, ref string) (remotecache.Exporter, error) {
+ if typ != "" {
+ return nil, errors.Errorf("unsupported cache exporter type: %s", typ)
+ }
+ remote := newRemoteResolver(ctx, sm)
+ pusher, err := remote.Pusher(ctx, ref)
+ if err != nil {
+ return nil, err
+ }
+ return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil
+ }
+}
+
+func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc {
+ return func(ctx context.Context, typ, ref string) (remotecache.Importer, specs.Descriptor, error) {
+ if typ != "" {
+ return nil, specs.Descriptor{}, errors.Errorf("unsupported cache importer type: %s", typ)
+ }
+ remote := newRemoteResolver(ctx, sm)
+ xref, desc, err := remote.Resolve(ctx, ref)
+ if err != nil {
+ return nil, specs.Descriptor{}, err
+ }
+ fetcher, err := remote.Fetcher(ctx, xref)
+ if err != nil {
+ return nil, specs.Descriptor{}, err
+ }
+ return remotecache.NewImporter(contentutil.FromFetcher(fetcher)), desc, nil
+ }
+}
+
+func newRemoteResolver(ctx context.Context, sm *session.Manager) remotes.Resolver {
+ return docker.NewResolver(docker.ResolverOptions{
+ Client: tracing.DefaultClient,
+ Credentials: getCredentialsFunc(ctx, sm),
+ })
+}
+
+func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
+ id := session.FromContext(ctx)
+ if id == "" {
+ return nil
+ }
+ return func(host string) (string, string, error) {
+ timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ caller, err := sm.Get(timeoutCtx, id)
+ if err != nil {
+ return "", "", err
+ }
+
+ return auth.CredentialsFunc(context.TODO(), caller)(host)
+ }
+}
diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go
index f0f2968..5b4aa9b 100644
--- a/vendor/github.com/moby/buildkit/client/llb/source.go
+++ b/vendor/github.com/moby/buildkit/client/llb/source.go
@@ -51,6 +51,15 @@
return "", nil, nil, err
}
+ if strings.HasPrefix(s.id, "local://") {
+ if _, hasSession := s.attrs[pb.AttrLocalSessionID]; !hasSession {
+ uid := s.constraints.LocalUniqueID
+ if uid == "" {
+ uid = constraints.LocalUniqueID
+ }
+ s.attrs[pb.AttrLocalUniqueID] = uid
+ }
+ }
proto, md := MarshalConstraints(constraints, &s.constraints)
proto.Op = &pb.Op_Source{
diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go
index f9e2138..a0a9850 100644
--- a/vendor/github.com/moby/buildkit/client/llb/state.go
+++ b/vendor/github.com/moby/buildkit/client/llb/state.go
@@ -4,6 +4,7 @@
"context"
"github.com/containerd/containerd/platforms"
+ "github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
@@ -78,7 +79,8 @@
defaultPlatform := platforms.Normalize(platforms.DefaultSpec())
c := &Constraints{
- Platform: &defaultPlatform,
+ Platform: &defaultPlatform,
+ LocalUniqueID: identity.NewID(),
}
for _, o := range append(s.opts, co...) {
o.SetConstraintsOption(c)
@@ -358,6 +360,7 @@
Platform *specs.Platform
WorkerConstraints []string
Metadata pb.OpMetadata
+ LocalUniqueID string
}
func Platform(p specs.Platform) ConstraintsOpt {
@@ -366,6 +369,12 @@
})
}
+func LocalUniqueID(v string) ConstraintsOpt {
+ return constraintsOptFunc(func(c *Constraints) {
+ c.LocalUniqueID = v
+ })
+}
+
var (
LinuxAmd64 = Platform(specs.Platform{OS: "linux", Architecture: "amd64"})
LinuxArmhf = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"})
diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go
index 2645a59..e70dcbc 100644
--- a/vendor/github.com/moby/buildkit/client/workers.go
+++ b/vendor/github.com/moby/buildkit/client/workers.go
@@ -33,7 +33,7 @@
wi = append(wi, &WorkerInfo{
ID: w.ID,
Labels: w.Labels,
- Platforms: toClientPlatforms(w.Platforms),
+ Platforms: pb.ToSpecPlatforms(w.Platforms),
})
}
@@ -51,17 +51,3 @@
wi.Filter = f
}
}
-
-func toClientPlatforms(p []pb.Platform) []specs.Platform {
- out := make([]specs.Platform, 0, len(p))
- for _, pp := range p {
- out = append(out, specs.Platform{
- OS: pp.OS,
- Architecture: pp.Architecture,
- Variant: pp.Variant,
- OSVersion: pp.OSVersion,
- OSFeatures: pp.OSFeatures,
- })
- }
- return out
-}
diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go
index 2460df8..b59c1ee 100644
--- a/vendor/github.com/moby/buildkit/control/control.go
+++ b/vendor/github.com/moby/buildkit/control/control.go
@@ -5,6 +5,7 @@
"github.com/docker/distribution/reference"
controlapi "github.com/moby/buildkit/api/services/control"
+ apitypes "github.com/moby/buildkit/api/types"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/exporter"
@@ -15,20 +16,21 @@
"github.com/moby/buildkit/solver/llbsolver"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/worker"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
+type ResolveCacheExporterFunc func(ctx context.Context, typ, target string) (remotecache.Exporter, error)
+
type Opt struct {
- SessionManager *session.Manager
- WorkerController *worker.Controller
- Frontends map[string]frontend.Frontend
- CacheKeyStorage solver.CacheKeyStorage
- CacheExporter *remotecache.CacheExporter
- CacheImporter *remotecache.CacheImporter
+ SessionManager *session.Manager
+ WorkerController *worker.Controller
+ Frontends map[string]frontend.Frontend
+ CacheKeyStorage solver.CacheKeyStorage
+ ResolveCacheExporterFunc remotecache.ResolveCacheExporterFunc
+ ResolveCacheImporterFunc remotecache.ResolveCacheImporterFunc
}
type Controller struct { // TODO: ControlService
@@ -37,7 +39,7 @@
}
func NewController(opt Opt) (*Controller, error) {
- solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.CacheImporter)
+ solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, opt.CacheKeyStorage, opt.ResolveCacheImporterFunc)
if err != nil {
return nil, errors.Wrap(err, "failed to create solver")
}
@@ -103,7 +105,7 @@
}(w)
}
- eg2, ctx := errgroup.WithContext(stream.Context())
+ eg2, _ := errgroup.WithContext(stream.Context())
eg2.Go(func() error {
defer close(ch)
@@ -154,14 +156,18 @@
}
}
- var cacheExporter *remotecache.RegistryCacheExporter
- if ref := req.Cache.ExportRef; ref != "" {
+ var cacheExporter remotecache.Exporter
+ if ref := req.Cache.ExportRef; ref != "" && c.opt.ResolveCacheExporterFunc != nil {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return nil, err
}
exportCacheRef := reference.TagNameOnly(parsed).String()
- cacheExporter = c.opt.CacheExporter.ExporterForTarget(exportCacheRef)
+ typ := "" // unimplemented yet (typically registry)
+ cacheExporter, err = c.opt.ResolveCacheExporterFunc(ctx, typ, exportCacheRef)
+ if err != nil {
+ return nil, err
+ }
}
var importCacheRefs []string
@@ -269,10 +275,10 @@
return nil, err
}
for _, w := range workers {
- resp.Record = append(resp.Record, &controlapi.WorkerRecord{
+ resp.Record = append(resp.Record, &apitypes.WorkerRecord{
ID: w.ID(),
Labels: w.Labels(),
- Platforms: toPBPlatforms(w.Platforms()),
+ Platforms: pb.PlatformsFromSpec(w.Platforms()),
})
}
return resp, nil
@@ -296,17 +302,3 @@
}
return solver.CacheExportModeMin
}
-
-func toPBPlatforms(p []specs.Platform) []pb.Platform {
- out := make([]pb.Platform, 0, len(p))
- for _, pp := range p {
- out = append(out, pb.Platform{
- OS: pp.OS,
- Architecture: pp.Architecture,
- Variant: pp.Variant,
- OSVersion: pp.OSVersion,
- OSFeatures: pp.OSFeatures,
- })
- }
- return out
-}
diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
index 97eb343..5225d5f 100644
--- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
+++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
@@ -21,7 +21,7 @@
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/identity"
- "github.com/moby/buildkit/util/libcontainer_specconv"
+ rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
"github.com/moby/buildkit/util/system"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
@@ -84,6 +84,8 @@
LogFormat: runc.JSON,
PdeathSignal: syscall.SIGKILL,
Setpgid: true,
+ // we don't execute runc with --rootless=(true|false) explicitly,
+ // so as to support non-runc runtimes
}
w := &runcExecutor{
@@ -169,13 +171,11 @@
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
+ if err := setOOMScoreAdj(spec); err != nil {
+ return err
+ }
if w.rootless {
- specconv.ToRootless(spec, nil)
- // TODO(AkihiroSuda): keep Cgroups enabled if /sys/fs/cgroup/cpuset/buildkit exists and writable
- spec.Linux.CgroupsPath = ""
- // TODO(AkihiroSuda): ToRootless removes netns, but we should readd netns here
- // if either SUID or userspace NAT is configured on the host.
- if err := setOOMScoreAdj(spec); err != nil {
+ if err := rootlessspecconv.ToRootless(spec); err != nil {
return err
}
}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
index c618f38..947eef4 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go
@@ -36,17 +36,21 @@
var httpPrefix = regexp.MustCompile("^https?://")
var gitUrlPathWithFragmentSuffix = regexp.MustCompile("\\.git(?:#.+)?$")
-func Build(ctx context.Context, c client.Client) error {
- opts := c.Opts()
+func Build(ctx context.Context, c client.Client) (*client.Result, error) {
+ opts := c.BuildOpts().Opts
- // TODO: read buildPlatforms from workers
- buildPlatforms := []specs.Platform{platforms.DefaultSpec()}
+ defaultBuildPlatform := platforms.DefaultSpec()
+ if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 {
+ defaultBuildPlatform = workers[0].Platforms[0]
+ }
+
+ buildPlatforms := []specs.Platform{defaultBuildPlatform}
targetPlatform := platforms.DefaultSpec()
if v := opts[keyTargetPlatform]; v != "" {
var err error
targetPlatform, err = platforms.Parse(v)
if err != nil {
- return errors.Wrapf(err, "failed to parse target platform %s", v)
+ return nil, errors.Wrapf(err, "failed to parse target platform %s", v)
}
}
@@ -66,7 +70,7 @@
src := llb.Local(LocalNameDockerfile,
llb.IncludePatterns([]string{filename}),
- llb.SessionID(c.SessionID()),
+ llb.SessionID(c.BuildOpts().SessionID),
llb.SharedKeyHint(defaultDockerfileName),
)
var buildContext *llb.State
@@ -78,13 +82,18 @@
httpContext := llb.HTTP(opts[LocalNameContext], llb.Filename("context"))
def, err := httpContext.Marshal()
if err != nil {
- return err
+ return nil, errors.Wrapf(err, "failed to marshal httpcontext")
}
- ref, err := c.Solve(ctx, client.SolveRequest{
+ res, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
- }, nil, false)
+ })
if err != nil {
- return err
+ return nil, errors.Wrapf(err, "failed to resolve httpcontext")
+ }
+
+ ref, err := res.SingleRef()
+ if err != nil {
+ return nil, err
}
dt, err := ref.ReadFile(ctx, client.ReadRequest{
@@ -94,7 +103,7 @@
},
})
if err != nil {
- return err
+ return nil, errors.Errorf("failed to read downloaded context")
}
if isArchive(dt) {
unpack := llb.Image(dockerfile2llb.CopyImage).
@@ -112,15 +121,20 @@
def, err := src.Marshal()
if err != nil {
- return err
+ return nil, errors.Wrapf(err, "failed to marshal local source")
}
eg, ctx2 := errgroup.WithContext(ctx)
var dtDockerfile []byte
eg.Go(func() error {
- ref, err := c.Solve(ctx2, client.SolveRequest{
+ res, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
- }, nil, false)
+ })
+ if err != nil {
+ return errors.Wrapf(err, "failed to resolve dockerfile")
+ }
+
+ ref, err := res.SingleRef()
if err != nil {
return err
}
@@ -129,7 +143,7 @@
Filename: filename,
})
if err != nil {
- return err
+ return errors.Wrapf(err, "failed to read dockerfile")
}
return nil
})
@@ -139,7 +153,7 @@
dockerignoreState := buildContext
if dockerignoreState == nil {
st := llb.Local(LocalNameContext,
- llb.SessionID(c.SessionID()),
+ llb.SessionID(c.BuildOpts().SessionID),
llb.IncludePatterns([]string{dockerignoreFilename}),
llb.SharedKeyHint(dockerignoreFilename),
)
@@ -149,9 +163,13 @@
if err != nil {
return err
}
- ref, err := c.Solve(ctx2, client.SolveRequest{
+ res, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
- }, nil, false)
+ })
+ if err != nil {
+ return err
+ }
+ ref, err := res.SingleRef()
if err != nil {
return err
}
@@ -169,10 +187,10 @@
}
if err := eg.Wait(); err != nil {
- return err
+ return nil, err
}
- if _, ok := c.Opts()["cmdline"]; !ok {
+ if _, ok := opts["cmdline"]; !ok {
ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile))
if ok {
return forwardGateway(ctx, c, ref, cmdline)
@@ -184,7 +202,7 @@
MetaResolver: c,
BuildArgs: filter(opts, buildArgPrefix),
Labels: filter(opts, labelPrefix),
- SessionID: c.SessionID(),
+ SessionID: c.BuildOpts().SessionID,
BuildContext: buildContext,
Excludes: excludes,
IgnoreCache: ignoreCache,
@@ -193,17 +211,17 @@
})
if err != nil {
- return err
+ return nil, errors.Wrapf(err, "failed to create LLB definition")
}
def, err = st.Marshal()
if err != nil {
- return err
+ return nil, errors.Wrapf(err, "failed to marshal LLB definition")
}
config, err := json.Marshal(img)
if err != nil {
- return err
+ return nil, errors.Wrapf(err, "failed to marshal image config")
}
var cacheFrom []string
@@ -211,30 +229,30 @@
cacheFrom = strings.Split(cacheFromStr, ",")
}
- _, err = c.Solve(ctx, client.SolveRequest{
+ res, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
ImportCacheRefs: cacheFrom,
- }, map[string][]byte{
- exporterImageConfig: config,
- }, true)
+ })
if err != nil {
- return err
+ return nil, err
}
- return nil
+
+ res.AddMeta(exporterImageConfig, config)
+
+ return res, nil
}
-func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) error {
- opts := c.Opts()
+func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) {
+ opts := c.BuildOpts().Opts
if opts == nil {
opts = map[string]string{}
}
opts["cmdline"] = cmdline
opts["source"] = ref
- _, err := c.Solve(ctx, client.SolveRequest{
+ return c.Solve(ctx, client.SolveRequest{
Frontend: "gateway.v0",
FrontendOpt: opts,
- }, nil, true)
- return err
+ })
}
func filter(opt map[string]string, key string) map[string]string {
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go
deleted file mode 100644
index 7c0e192..0000000
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package dockerfile
-
-import (
- "context"
-
- "github.com/moby/buildkit/frontend"
- "github.com/moby/buildkit/frontend/dockerfile/builder"
- "github.com/moby/buildkit/solver"
-)
-
-func NewDockerfileFrontend() frontend.Frontend {
- return &dfFrontend{}
-}
-
-type dfFrontend struct{}
-
-func (f *dfFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) {
-
- c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts)
- if err != nil {
- return nil, nil, err
- }
-
- defer func() {
- for _, r := range c.refs {
- if r != nil && (c.final != r || retErr != nil) {
- r.Release(context.TODO())
- }
- }
- }()
-
- if err := builder.Build(ctx, c); err != nil {
- return nil, nil, err
- }
-
- if c.final == nil || c.final.CachedResult == nil {
- return nil, c.exporterAttr, nil
- }
-
- return c.final, c.exporterAttr, nil
-}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
index ac70fb8..983672f 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go
@@ -80,8 +80,9 @@
return nil, nil, err
}
- for i := range metaArgs {
- metaArgs[i] = setBuildArgValue(metaArgs[i], opt.BuildArgs)
+ optMetaArgs := []instructions.KeyValuePairOptional{}
+ for _, metaArg := range metaArgs {
+ optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs))
}
shlex := shell.NewLex(dockerfile.EscapeToken)
@@ -95,7 +96,7 @@
// set base state for every image
for _, st := range stages {
- name, err := shlex.ProcessWord(st.BaseName, toEnvList(metaArgs, nil))
+ name, err := shlex.ProcessWord(st.BaseName, toEnvList(optMetaArgs, nil))
if err != nil {
return nil, nil, err
}
@@ -111,7 +112,7 @@
}
if v := st.Platform; v != "" {
- v, err := shlex.ProcessWord(v, toEnvList(metaArgs, nil))
+ v, err := shlex.ProcessWord(v, toEnvList(optMetaArgs, nil))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v)
}
@@ -268,7 +269,7 @@
opt := dispatchOpt{
allDispatchStates: allDispatchStates,
- metaArgs: metaArgs,
+ metaArgs: optMetaArgs,
buildArgValues: opt.BuildArgs,
shlex: shlex,
sessionID: opt.SessionID,
@@ -359,7 +360,7 @@
type dispatchOpt struct {
allDispatchStates *dispatchStates
- metaArgs []instructions.ArgCommand
+ metaArgs []instructions.KeyValuePairOptional
buildArgValues map[string]string
shlex *shell.Lex
sessionID string
@@ -442,7 +443,7 @@
stage instructions.Stage
base *dispatchState
deps map[*dispatchState]struct{}
- buildArgs []instructions.ArgCommand
+ buildArgs []instructions.KeyValuePairOptional
commands []command
ctxPaths map[string]struct{}
ignoreCache bool
@@ -538,7 +539,7 @@
}
opt := []llb.RunOption{llb.Args(args)}
for _, arg := range d.buildArgs {
- opt = append(opt, llb.AddEnv(arg.Key, getArgValue(arg)))
+ opt = append(opt, llb.AddEnv(arg.Key, arg.ValueString()))
}
opt = append(opt, dfCmd(c))
if d.ignoreCache {
@@ -770,20 +771,22 @@
return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil)
}
-func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.ArgCommand, buildArgValues map[string]string) error {
+func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error {
commitStr := "ARG " + c.Key
+ buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues)
+
if c.Value != nil {
commitStr += "=" + *c.Value
}
- if c.Value == nil {
+ if buildArg.Value == nil {
for _, ma := range metaArgs {
- if ma.Key == c.Key {
- c.Value = ma.Value
+ if ma.Key == buildArg.Key {
+ buildArg.Value = ma.Value
}
}
}
- d.buildArgs = append(d.buildArgs, setBuildArgValue(*c, buildArgValues))
+ d.buildArgs = append(d.buildArgs, buildArg)
return commitToHistory(&d.image, commitStr, false, nil)
}
@@ -834,28 +837,20 @@
return env
}
-func setBuildArgValue(c instructions.ArgCommand, values map[string]string) instructions.ArgCommand {
- if v, ok := values[c.Key]; ok {
- c.Value = &v
+func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional {
+ if v, ok := values[kvpo.Key]; ok {
+ kvpo.Value = &v
}
- return c
+ return kvpo
}
-func toEnvList(args []instructions.ArgCommand, env []string) []string {
+func toEnvList(args []instructions.KeyValuePairOptional, env []string) []string {
for _, arg := range args {
- env = addEnv(env, arg.Key, getArgValue(arg), false)
+ env = addEnv(env, arg.Key, arg.ValueString(), false)
}
return env
}
-func getArgValue(arg instructions.ArgCommand) string {
- v := ""
- if arg.Value != nil {
- v = *arg.Value
- }
- return v
-}
-
func dfCmd(cmd interface{}) llb.ConstraintsOpt {
// TODO: add fmt.Stringer to instructions.Command to remove interface{}
var cmdStr string
@@ -870,10 +865,10 @@
})
}
-func runCommandString(args []string, buildArgs []instructions.ArgCommand) string {
+func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional) string {
var tmpBuildEnv []string
for _, arg := range buildArgs {
- tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+getArgValue(arg))
+ tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+arg.ValueString())
}
if len(tmpBuildEnv) > 0 {
tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...)
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go
deleted file mode 100644
index aee1f02..0000000
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/forward.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package dockerfile
-
-import (
- "context"
-
- "github.com/moby/buildkit/cache"
- "github.com/moby/buildkit/frontend"
- "github.com/moby/buildkit/frontend/gateway/client"
- "github.com/moby/buildkit/session"
- "github.com/moby/buildkit/solver"
- "github.com/moby/buildkit/worker"
- "github.com/pkg/errors"
-)
-
-func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (*bridgeClient, error) {
- return &bridgeClient{opts: opts, FrontendLLBBridge: llbBridge, sid: session.FromContext(ctx)}, nil
-}
-
-type bridgeClient struct {
- frontend.FrontendLLBBridge
- opts map[string]string
- final *ref
- sid string
- exporterAttr map[string][]byte
- refs []*ref
-}
-
-func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest, exporterAttr map[string][]byte, final bool) (client.Reference, error) {
- r, exporterAttrRes, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
- Definition: req.Definition,
- Frontend: req.Frontend,
- FrontendOpt: req.FrontendOpt,
- ImportCacheRefs: req.ImportCacheRefs,
- })
- if err != nil {
- return nil, err
- }
- rr := &ref{r}
- c.refs = append(c.refs, rr)
- if final {
- c.final = rr
- if exporterAttr == nil {
- exporterAttr = make(map[string][]byte)
- }
- for k, v := range exporterAttrRes {
- exporterAttr[k] = v
- }
- c.exporterAttr = exporterAttr
- }
- return rr, nil
-}
-func (c *bridgeClient) Opts() map[string]string {
- return c.opts
-}
-func (c *bridgeClient) SessionID() string {
- return c.sid
-}
-
-type ref struct {
- solver.CachedResult
-}
-
-func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
- ref, err := r.getImmutableRef()
- if err != nil {
- return nil, err
- }
- newReq := cache.ReadRequest{
- Filename: req.Filename,
- }
- if r := req.Range; r != nil {
- newReq.Range = &cache.FileRange{
- Offset: r.Offset,
- Length: r.Length,
- }
- }
- return cache.ReadFile(ctx, ref, newReq)
-}
-
-func (r *ref) getImmutableRef() (cache.ImmutableRef, error) {
- ref, ok := r.CachedResult.Sys().(*worker.WorkerRef)
- if !ok {
- return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys())
- }
- return ref.ImmutableRef, nil
-}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
index 903353e..28b34f6 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go
@@ -18,6 +18,20 @@
return kvp.Key + "=" + kvp.Value
}
+// KeyValuePairOptional is the same as KeyValuePair but Value is optional
+type KeyValuePairOptional struct {
+ Key string
+ Value *string
+}
+
+func (kvpo *KeyValuePairOptional) ValueString() string {
+ v := ""
+ if kvpo.Value != nil {
+ v = *kvpo.Value
+ }
+ return v
+}
+
// Command is implemented by every command present in a dockerfile
type Command interface {
Name() string
@@ -346,8 +360,7 @@
// Dockerfile author may optionally set a default value of this variable.
type ArgCommand struct {
withNameAndCode
- Key string
- Value *string
+ KeyValuePairOptional
}
// Expand variables
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
index d84bb43..ef17457 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go
@@ -580,10 +580,7 @@
return nil, errExactlyOneArgument("ARG")
}
- var (
- name string
- newValue *string
- )
+ kvpo := KeyValuePairOptional{}
arg := req.args[0]
// 'arg' can just be a name or name-value pair. Note that this is different
@@ -597,16 +594,15 @@
return nil, errBlankCommandNames("ARG")
}
- name = parts[0]
- newValue = &parts[1]
+ kvpo.Key = parts[0]
+ kvpo.Value = &parts[1]
} else {
- name = arg
+ kvpo.Key = arg
}
return &ArgCommand{
- Key: name,
- Value: newValue,
- withNameAndCode: newWithNameAndCode(req),
+ KeyValuePairOptional: kvpo,
+ withNameAndCode: newWithNameAndCode(req),
}, nil
}
diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go
index d76742d..5bae3d2 100644
--- a/vendor/github.com/moby/buildkit/frontend/frontend.go
+++ b/vendor/github.com/moby/buildkit/frontend/frontend.go
@@ -5,26 +5,25 @@
"io"
"github.com/moby/buildkit/cache"
+ "github.com/moby/buildkit/client"
"github.com/moby/buildkit/executor"
- "github.com/moby/buildkit/solver"
- "github.com/moby/buildkit/solver/pb"
+ gatewayclient "github.com/moby/buildkit/frontend/gateway/client"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
type Frontend interface {
- Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (solver.CachedResult, map[string][]byte, error)
+ Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string) (*Result, error)
}
type FrontendLLBBridge interface {
- Solve(ctx context.Context, req SolveRequest) (solver.CachedResult, map[string][]byte, error)
+ Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error
}
-type SolveRequest struct {
- Definition *pb.Definition
- Frontend string
- FrontendOpt map[string]string
- ImportCacheRefs []string
+type SolveRequest = gatewayclient.SolveRequest
+
+type WorkerInfos interface {
+ WorkerInfos() []client.WorkerInfo
}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
index 51c11d6..2198a56 100644
--- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
@@ -8,12 +8,10 @@
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
-// TODO: make this take same options as LLBBridge. Add Return()
type Client interface {
- Solve(ctx context.Context, req SolveRequest, exporterAttr map[string][]byte, final bool) (Reference, error)
+ Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, platform *specs.Platform) (digest.Digest, []byte, error)
- Opts() map[string]string
- SessionID() string
+ BuildOpts() BuildOpts
}
type Reference interface {
@@ -39,3 +37,16 @@
FrontendOpt map[string]string
ImportCacheRefs []string
}
+
+type WorkerInfo struct {
+ ID string
+ Labels map[string]string
+ Platforms []specs.Platform
+}
+
+type BuildOpts struct {
+ Opts map[string]string
+ SessionID string
+ Workers []WorkerInfo
+ Product string
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go
new file mode 100644
index 0000000..bd54228
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go
@@ -0,0 +1,54 @@
+package client
+
+import (
+ "context"
+ "sync"
+
+ "github.com/pkg/errors"
+)
+
+type BuildFunc func(context.Context, Client) (*Result, error)
+
+type Result struct {
+ mu sync.Mutex
+ Ref Reference
+ Refs map[string]Reference
+ Metadata map[string][]byte
+}
+
+func NewResult() *Result {
+ return &Result{}
+}
+
+func (r *Result) AddMeta(k string, v []byte) {
+ r.mu.Lock()
+ if r.Metadata == nil {
+ r.Metadata = map[string][]byte{}
+ }
+ r.Metadata[k] = v
+ r.mu.Unlock()
+}
+
+func (r *Result) AddRef(k string, ref Reference) {
+ r.mu.Lock()
+ if r.Refs == nil {
+ r.Refs = map[string]Reference{}
+ }
+ r.Refs[k] = ref
+ r.mu.Unlock()
+}
+
+func (r *Result) SetRef(ref Reference) {
+ r.Ref = ref
+}
+
+func (r *Result) SingleRef() (Reference, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if r.Refs != nil && r.Ref == nil {
+ return nil, errors.Errorf("invalid map result")
+ }
+
+ return r.Ref, nil
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go
new file mode 100644
index 0000000..65597e5
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go
@@ -0,0 +1,149 @@
+package forwarder
+
+import (
+ "context"
+ "sync"
+
+ "github.com/moby/buildkit/cache"
+ clienttypes "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/frontend"
+ "github.com/moby/buildkit/frontend/gateway/client"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/util/apicaps"
+ "github.com/moby/buildkit/worker"
+ "github.com/pkg/errors"
+)
+
+func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) {
+ return &bridgeClient{
+ opts: opts,
+ FrontendLLBBridge: llbBridge,
+ sid: session.FromContext(ctx),
+ workerInfos: workerInfos,
+ final: map[*ref]struct{}{},
+ }, nil
+}
+
+type bridgeClient struct {
+ frontend.FrontendLLBBridge
+ mu sync.Mutex
+ opts map[string]string
+ final map[*ref]struct{}
+ sid string
+ exporterAttr map[string][]byte
+ refs []*ref
+ workerInfos []clienttypes.WorkerInfo
+}
+
+func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) {
+ res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{
+ Definition: req.Definition,
+ Frontend: req.Frontend,
+ FrontendOpt: req.FrontendOpt,
+ ImportCacheRefs: req.ImportCacheRefs,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ cRes := &client.Result{}
+ c.mu.Lock()
+ for k, r := range res.Refs {
+ rr := &ref{r}
+ c.refs = append(c.refs, rr)
+ cRes.AddRef(k, rr)
+ }
+ if r := res.Ref; r != nil {
+ rr := &ref{r}
+ c.refs = append(c.refs, rr)
+ cRes.SetRef(rr)
+ }
+ c.mu.Unlock()
+ cRes.Metadata = res.Metadata
+
+ return cRes, nil
+}
+func (c *bridgeClient) BuildOpts() client.BuildOpts {
+ workers := make([]client.WorkerInfo, 0, len(c.workerInfos))
+ for _, w := range c.workerInfos {
+ workers = append(workers, client.WorkerInfo(w))
+ }
+
+ return client.BuildOpts{
+ Opts: c.opts,
+ SessionID: c.sid,
+ Workers: workers,
+ Product: apicaps.ExportedProduct,
+ }
+}
+
+func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) {
+ if r == nil {
+ return nil, nil
+ }
+
+ res := &frontend.Result{}
+
+ if r.Refs != nil {
+ res.Refs = make(map[string]solver.CachedResult, len(r.Refs))
+ for k, r := range r.Refs {
+ rr, ok := r.(*ref)
+ if !ok {
+ return nil, errors.Errorf("invalid reference type for forward %T", r)
+ }
+ c.final[rr] = struct{}{}
+ res.Refs[k] = rr.CachedResult
+ }
+ }
+ if r := r.Ref; r != nil {
+ rr, ok := r.(*ref)
+ if !ok {
+ return nil, errors.Errorf("invalid reference type for forward %T", r)
+ }
+ c.final[rr] = struct{}{}
+ res.Ref = rr.CachedResult
+ }
+ res.Metadata = r.Metadata
+
+ return res, nil
+}
+
+func (c *bridgeClient) discard(err error) {
+ for _, r := range c.refs {
+ if r != nil {
+ if _, ok := c.final[r]; !ok || err != nil {
+ r.Release(context.TODO())
+ }
+ }
+ }
+}
+
+type ref struct {
+ solver.CachedResult
+}
+
+func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
+ ref, err := r.getImmutableRef()
+ if err != nil {
+ return nil, err
+ }
+ newReq := cache.ReadRequest{
+ Filename: req.Filename,
+ }
+ if r := req.Range; r != nil {
+ newReq.Range = &cache.FileRange{
+ Offset: r.Offset,
+ Length: r.Length,
+ }
+ }
+ return cache.ReadFile(ctx, ref, newReq)
+}
+
+func (r *ref) getImmutableRef() (cache.ImmutableRef, error) {
+ ref, ok := r.CachedResult.Sys().(*worker.WorkerRef)
+ if !ok {
+ return nil, errors.Errorf("invalid ref: %T", r.CachedResult.Sys())
+ }
+ return ref.ImmutableRef, nil
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go
new file mode 100644
index 0000000..61a187d
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go
@@ -0,0 +1,38 @@
+package forwarder
+
+import (
+ "context"
+
+ "github.com/moby/buildkit/frontend"
+ "github.com/moby/buildkit/frontend/gateway/client"
+)
+
+func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend {
+ return &GatewayForwarder{
+ workers: w,
+ f: f,
+ }
+}
+
+type GatewayForwarder struct {
+ workers frontend.WorkerInfos
+ f client.BuildFunc
+}
+
+func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRes *frontend.Result, retErr error) {
+ c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, gf.workers.WorkerInfos())
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ c.discard(retErr)
+ }()
+
+ res, err := gf.f(ctx, c)
+ if err != nil {
+ return nil, err
+ }
+
+ return c.toFrontendResult(res)
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
index bc7698a..7f6e2c1 100644
--- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go
@@ -12,6 +12,7 @@
"time"
"github.com/docker/distribution/reference"
+ apitypes "github.com/moby/buildkit/api/types"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
@@ -20,15 +21,19 @@
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver"
+ opspb "github.com/moby/buildkit/solver/pb"
+ "github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/tracing"
"github.com/moby/buildkit/worker"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
+ spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/status"
)
const (
@@ -37,11 +42,14 @@
exporterImageConfig = "containerimage.config"
)
-func NewGatewayFrontend() frontend.Frontend {
- return &gatewayFrontend{}
+func NewGatewayFrontend(w frontend.WorkerInfos) frontend.Frontend {
+ return &gatewayFrontend{
+ workers: w,
+ }
}
type gatewayFrontend struct {
+ workers frontend.WorkerInfos
}
func filterPrefix(opts map[string]string, pfx string) map[string]string {
@@ -54,10 +62,10 @@
return m
}
-func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (retRef solver.CachedResult, exporterAttr map[string][]byte, retErr error) {
+func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string) (ret *frontend.Result, retErr error) {
source, ok := opts[keySource]
if !ok {
- return nil, nil, errors.Errorf("no source specified for gateway")
+ return nil, errors.Errorf("no source specified for gateway")
}
sid := session.FromContext(ctx)
@@ -68,46 +76,52 @@
var readonly bool // TODO: try to switch to read-only by default.
if isDevel {
- ref, exp, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
+ devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid),
frontend.SolveRequest{
Frontend: source,
FrontendOpt: filterPrefix(opts, "gateway-"),
})
if err != nil {
- return nil, nil, err
+ return nil, err
}
- defer ref.Release(context.TODO())
-
- workerRef, ok := ref.Sys().(*worker.WorkerRef)
+ defer func() {
+ devRes.EachRef(func(ref solver.CachedResult) error {
+ return ref.Release(context.TODO())
+ })
+ }()
+ if devRes.Ref == nil {
+ return nil, errors.Errorf("development gateway didn't return default result")
+ }
+ workerRef, ok := devRes.Ref.Sys().(*worker.WorkerRef)
if !ok {
- return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys())
+ return nil, errors.Errorf("invalid ref: %T", devRes.Ref.Sys())
}
rootFS = workerRef.ImmutableRef
- config, ok := exp[exporterImageConfig]
+ config, ok := devRes.Metadata[exporterImageConfig]
if ok {
if err := json.Unmarshal(config, &img); err != nil {
- return nil, nil, err
+ return nil, err
}
}
} else {
sourceRef, err := reference.ParseNormalizedNamed(source)
if err != nil {
- return nil, nil, err
+ return nil, err
}
dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), nil) // TODO:
if err != nil {
- return nil, nil, err
+ return nil, err
}
if err := json.Unmarshal(config, &img); err != nil {
- return nil, nil, err
+ return nil, err
}
if dgst != "" {
sourceRef, err = reference.WithDigest(sourceRef, dgst)
if err != nil {
- return nil, nil, err
+ return nil, err
}
}
@@ -115,27 +129,35 @@
def, err := src.Marshal()
if err != nil {
- return nil, nil, err
+ return nil, err
}
- ref, _, err := llbBridge.Solve(ctx, frontend.SolveRequest{
+ res, err := llbBridge.Solve(ctx, frontend.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
- return nil, nil, err
+ return nil, err
}
- defer ref.Release(context.TODO())
- workerRef, ok := ref.Sys().(*worker.WorkerRef)
+ defer func() {
+ res.EachRef(func(ref solver.CachedResult) error {
+ return ref.Release(context.TODO())
+ })
+ }()
+ if res.Ref == nil {
+ return nil, errors.Errorf("gateway source didn't return default result")
+
+ }
+ workerRef, ok := res.Ref.Sys().(*worker.WorkerRef)
if !ok {
- return nil, nil, errors.Errorf("invalid ref: %T", ref.Sys())
+ return nil, errors.Errorf("invalid ref: %T", res.Ref.Sys())
}
rootFS = workerRef.ImmutableRef
}
- lbf, err := newLLBBridgeForwarder(ctx, llbBridge)
+ lbf, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers)
defer lbf.conn.Close()
if err != nil {
- return nil, nil, err
+ return nil, err
}
args := []string{"/run"}
@@ -158,14 +180,32 @@
env = append(env, "BUILDKIT_SESSION_ID="+sid)
+ dt, err := json.Marshal(gf.workers.WorkerInfos())
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal workers array")
+ }
+ env = append(env, "BUILDKIT_WORKERS="+string(dt))
+
defer func() {
for _, r := range lbf.refs {
- if r != nil && (lbf.lastRef != r || retErr != nil) {
- r.Release(context.TODO())
+ if retErr == nil && lbf.result != nil {
+ keep := false
+ lbf.result.EachRef(func(r2 solver.CachedResult) error {
+ if r == r2 {
+ keep = true
+ }
+ return nil
+ })
+ if keep {
+ continue
+ }
}
+ r.Release(context.TODO())
}
}()
+ env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct)
+
err = llbBridge.Exec(ctx, executor.Meta{
Env: env,
Args: args,
@@ -173,19 +213,24 @@
ReadonlyRootFS: readonly,
}, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr)
- if err != nil {
- return nil, nil, err
+ if lbf.err != nil {
+ return nil, lbf.err
}
- return lbf.lastRef, lbf.exporterAttr, nil
+ if err != nil {
+ return nil, err
+ }
+
+ return lbf.result, nil
}
-func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge) (*llbBridgeForwarder, error) {
+func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos) (*llbBridgeForwarder, error) {
lbf := &llbBridgeForwarder{
callCtx: ctx,
llbBridge: llbBridge,
- refs: map[string]solver.Result{},
+ refs: map[string]solver.CachedResult{},
pipe: newPipe(),
+ workers: workers,
}
server := grpc.NewServer()
@@ -251,12 +296,17 @@
}
type llbBridgeForwarder struct {
- mu sync.Mutex
- callCtx context.Context
- llbBridge frontend.FrontendLLBBridge
- refs map[string]solver.Result
- lastRef solver.CachedResult
+ mu sync.Mutex
+ callCtx context.Context
+ llbBridge frontend.FrontendLLBBridge
+ refs map[string]solver.CachedResult
+ // lastRef solver.CachedResult
+ // lastRefs map[string]solver.CachedResult
+ // err error
+ result *frontend.Result
+ err error
exporterAttr map[string][]byte
+ workers frontend.WorkerInfos
*pipe
}
@@ -284,7 +334,7 @@
func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
- ref, expResp, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
+ res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{
Definition: req.Definition,
Frontend: req.Frontend,
FrontendOpt: req.FrontendOpt,
@@ -294,29 +344,65 @@
return nil, err
}
- exp := map[string][]byte{}
- if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil {
- return nil, err
+ if len(res.Refs) > 0 && !req.AllowResultReturn {
+ // this should never happen because old client shouldn't make a map request
+ return nil, errors.Errorf("solve did not return default result")
}
- if expResp != nil {
- for k, v := range expResp {
+ pbRes := &pb.Result{}
+ var defaultID string
+
+ lbf.mu.Lock()
+ if res.Refs != nil {
+ ids := make(map[string]string, len(res.Refs))
+ for k, ref := range res.Refs {
+ id := identity.NewID()
+ if ref == nil {
+ id = ""
+ } else {
+ lbf.refs[id] = ref
+ }
+ ids[k] = id
+ }
+ pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: ids}}
+ } else {
+ id := identity.NewID()
+ if res.Ref == nil {
+ id = ""
+ } else {
+ lbf.refs[id] = res.Ref
+ }
+ defaultID = id
+ pbRes.Result = &pb.Result_Ref{Ref: id}
+ }
+ lbf.mu.Unlock()
+
+ // compatibility mode for older clients
+ if req.Final {
+ exp := map[string][]byte{}
+ if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil {
+ return nil, err
+ }
+
+ for k, v := range res.Metadata {
exp[k] = v
}
+
+ lbf.result = &frontend.Result{
+ Ref: lbf.refs[defaultID],
+ Metadata: exp,
+ }
}
- id := identity.NewID()
- lbf.mu.Lock()
- lbf.refs[id] = ref
- lbf.mu.Unlock()
- if req.Final {
- lbf.lastRef = ref
- lbf.exporterAttr = exp
+ resp := &pb.SolveResponse{
+ Result: pbRes,
}
- if ref == nil {
- id = ""
+
+ if !req.AllowResultReturn {
+ resp.Ref = defaultID
}
- return &pb.SolveResponse{Ref: id}, nil
+
+ return resp, nil
}
func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) {
ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx)
@@ -353,7 +439,68 @@
}
func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) {
- return &pb.PongResponse{}, nil
+
+ workers := lbf.workers.WorkerInfos()
+ pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers))
+ for _, w := range workers {
+ pbWorkers = append(pbWorkers, &apitypes.WorkerRecord{
+ ID: w.ID,
+ Labels: w.Labels,
+ Platforms: opspb.PlatformsFromSpec(w.Platforms),
+ })
+ }
+
+ return &pb.PongResponse{
+ FrontendAPICaps: pb.Caps.All(),
+ Workers: pbWorkers,
+ // TODO: add LLB info
+ }, nil
+}
+
+func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) (*pb.ReturnResponse, error) {
+ if in.Error != nil {
+ lbf.err = status.ErrorProto(&spb.Status{
+ Code: in.Error.Code,
+ Message: in.Error.Message,
+ // Details: in.Error.Details,
+ })
+ } else {
+ lbf.result = &frontend.Result{
+ Metadata: in.Result.Metadata,
+ }
+
+ switch res := in.Result.Result.(type) {
+ case *pb.Result_Ref:
+ ref, err := lbf.convertRef(res.Ref)
+ if err != nil {
+ return nil, err
+ }
+ lbf.result.Ref = ref
+ case *pb.Result_Refs:
+ m := map[string]solver.CachedResult{}
+ for k, v := range res.Refs.Refs {
+ ref, err := lbf.convertRef(v)
+ if err != nil {
+ return nil, err
+ }
+ m[k] = ref
+ }
+ lbf.result.Refs = m
+ }
+ }
+
+ return &pb.ReturnResponse{}, nil
+}
+
+func (lbf *llbBridgeForwarder) convertRef(id string) (solver.CachedResult, error) {
+ if id == "" {
+ return nil, nil
+ }
+ r, ok := lbf.refs[id]
+ if !ok {
+ return nil, errors.Errorf("return reference %s not found", id)
+ }
+ return r, nil
}
func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
new file mode 100644
index 0000000..b5e902b
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
@@ -0,0 +1,64 @@
+package moby_buildkit_v1_frontend
+
+import "github.com/moby/buildkit/util/apicaps"
+
+var Caps apicaps.CapList
+
+// Every backwards or forwards non-compatible change needs to add a new capability row.
+// By default new capabilities should be experimental. After merge a capability is
+// considered immutable. After a capability is marked stable it should not be disabled.
+
+const (
+ CapSolveBase apicaps.CapID = "solve.base"
+ CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn"
+ CapResolveImage apicaps.CapID = "resolveimage"
+ CapReadFile apicaps.CapID = "readfile"
+ CapReturnResult apicaps.CapID = "return"
+ CapReturnMap apicaps.CapID = "returnmap"
+)
+
+func init() {
+
+ Caps.Init(apicaps.Cap{
+ ID: CapSolveBase,
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+ Caps.Init(apicaps.Cap{
+ ID: CapSolveInlineReturn,
+ Name: "inline return from solve",
+ Enabled: true,
+ Deprecated: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+ Caps.Init(apicaps.Cap{
+ ID: CapResolveImage,
+ Name: "resolve remote image config",
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+ Caps.Init(apicaps.Cap{
+ ID: CapReadFile,
+ Name: "read static file",
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+ Caps.Init(apicaps.Cap{
+ ID: CapReturnResult,
+ Name: "return solve result",
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+ Caps.Init(apicaps.Cap{
+ ID: CapReturnMap,
+ Name: "return reference map",
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
index bbd8531..27789c6 100644
--- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
@@ -8,6 +8,10 @@
gateway.proto
It has these top-level messages:
+ Result
+ RefMap
+ ReturnRequest
+ ReturnResponse
ResolveImageConfigRequest
ResolveImageConfigResponse
SolveRequest
@@ -24,7 +28,10 @@
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
+import google_rpc "github.com/gogo/googleapis/google/rpc"
import pb "github.com/moby/buildkit/solver/pb"
+import moby_buildkit_v1_types "github.com/moby/buildkit/api/types"
+import moby_buildkit_v1_apicaps "github.com/moby/buildkit/util/apicaps/pb"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
@@ -44,6 +51,181 @@
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+type Result struct {
+ // Types that are valid to be assigned to Result:
+ // *Result_Ref
+ // *Result_Refs
+ Result isResult_Result `protobuf_oneof:"result"`
+ Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *Result) Reset() { *m = Result{} }
+func (m *Result) String() string { return proto.CompactTextString(m) }
+func (*Result) ProtoMessage() {}
+func (*Result) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{0} }
+
+type isResult_Result interface {
+ isResult_Result()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Result_Ref struct {
+ Ref string `protobuf:"bytes,1,opt,name=ref,proto3,oneof"`
+}
+type Result_Refs struct {
+ Refs *RefMap `protobuf:"bytes,2,opt,name=refs,oneof"`
+}
+
+func (*Result_Ref) isResult_Result() {}
+func (*Result_Refs) isResult_Result() {}
+
+func (m *Result) GetResult() isResult_Result {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *Result) GetRef() string {
+ if x, ok := m.GetResult().(*Result_Ref); ok {
+ return x.Ref
+ }
+ return ""
+}
+
+func (m *Result) GetRefs() *RefMap {
+ if x, ok := m.GetResult().(*Result_Refs); ok {
+ return x.Refs
+ }
+ return nil
+}
+
+func (m *Result) GetMetadata() map[string][]byte {
+ if m != nil {
+ return m.Metadata
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Result) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _Result_OneofMarshaler, _Result_OneofUnmarshaler, _Result_OneofSizer, []interface{}{
+ (*Result_Ref)(nil),
+ (*Result_Refs)(nil),
+ }
+}
+
+func _Result_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*Result)
+ // result
+ switch x := m.Result.(type) {
+ case *Result_Ref:
+ _ = b.EncodeVarint(1<<3 | proto.WireBytes)
+ _ = b.EncodeStringBytes(x.Ref)
+ case *Result_Refs:
+ _ = b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.Refs); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("Result.Result has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _Result_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*Result)
+ switch tag {
+ case 1: // result.ref
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ x, err := b.DecodeStringBytes()
+ m.Result = &Result_Ref{x}
+ return true, err
+ case 2: // result.refs
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(RefMap)
+ err := b.DecodeMessage(msg)
+ m.Result = &Result_Refs{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _Result_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*Result)
+ // result
+ switch x := m.Result.(type) {
+ case *Result_Ref:
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(len(x.Ref)))
+ n += len(x.Ref)
+ case *Result_Refs:
+ s := proto.Size(x.Refs)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type RefMap struct {
+ Refs map[string]string `protobuf:"bytes,1,rep,name=refs" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *RefMap) Reset() { *m = RefMap{} }
+func (m *RefMap) String() string { return proto.CompactTextString(m) }
+func (*RefMap) ProtoMessage() {}
+func (*RefMap) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{1} }
+
+func (m *RefMap) GetRefs() map[string]string {
+ if m != nil {
+ return m.Refs
+ }
+ return nil
+}
+
+type ReturnRequest struct {
+ Result *Result `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"`
+ Error *google_rpc.Status `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"`
+}
+
+func (m *ReturnRequest) Reset() { *m = ReturnRequest{} }
+func (m *ReturnRequest) String() string { return proto.CompactTextString(m) }
+func (*ReturnRequest) ProtoMessage() {}
+func (*ReturnRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{2} }
+
+func (m *ReturnRequest) GetResult() *Result {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *ReturnRequest) GetError() *google_rpc.Status {
+ if m != nil {
+ return m.Error
+ }
+ return nil
+}
+
+type ReturnResponse struct {
+}
+
+func (m *ReturnResponse) Reset() { *m = ReturnResponse{} }
+func (m *ReturnResponse) String() string { return proto.CompactTextString(m) }
+func (*ReturnResponse) ProtoMessage() {}
+func (*ReturnResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{3} }
+
type ResolveImageConfigRequest struct {
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform" json:"Platform,omitempty"`
@@ -52,7 +234,7 @@
func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} }
func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) }
func (*ResolveImageConfigRequest) ProtoMessage() {}
-func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{0} }
+func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{4} }
func (m *ResolveImageConfigRequest) GetRef() string {
if m != nil {
@@ -77,7 +259,7 @@
func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) }
func (*ResolveImageConfigResponse) ProtoMessage() {}
func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) {
- return fileDescriptorGateway, []int{1}
+ return fileDescriptorGateway, []int{5}
}
func (m *ResolveImageConfigResponse) GetConfig() []byte {
@@ -88,18 +270,20 @@
}
type SolveRequest struct {
- Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition" json:"Definition,omitempty"`
- Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
- FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- ImportCacheRefs []string `protobuf:"bytes,4,rep,name=ImportCacheRefs" json:"ImportCacheRefs,omitempty"`
- Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"`
- ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"`
+ Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition" json:"Definition,omitempty"`
+ Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
+ FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ ImportCacheRefs []string `protobuf:"bytes,4,rep,name=ImportCacheRefs" json:"ImportCacheRefs,omitempty"`
+ AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"`
+ // apicaps.CapSolveInlineReturn deprecated
+ Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"`
+ ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"`
}
func (m *SolveRequest) Reset() { *m = SolveRequest{} }
func (m *SolveRequest) String() string { return proto.CompactTextString(m) }
func (*SolveRequest) ProtoMessage() {}
-func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{2} }
+func (*SolveRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{6} }
func (m *SolveRequest) GetDefinition() *pb.Definition {
if m != nil {
@@ -129,6 +313,13 @@
return nil
}
+func (m *SolveRequest) GetAllowResultReturn() bool {
+ if m != nil {
+ return m.AllowResultReturn
+ }
+ return false
+}
+
func (m *SolveRequest) GetFinal() bool {
if m != nil {
return m.Final
@@ -144,14 +335,16 @@
}
type SolveResponse struct {
- Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
- ExporterAttr []byte `protobuf:"bytes,2,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"`
+ // deprecated
+ Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+ // these fields are returned when allowMapReturn was set
+ Result *Result `protobuf:"bytes,3,opt,name=result" json:"result,omitempty"`
}
func (m *SolveResponse) Reset() { *m = SolveResponse{} }
func (m *SolveResponse) String() string { return proto.CompactTextString(m) }
func (*SolveResponse) ProtoMessage() {}
-func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{3} }
+func (*SolveResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{7} }
func (m *SolveResponse) GetRef() string {
if m != nil {
@@ -160,9 +353,9 @@
return ""
}
-func (m *SolveResponse) GetExporterAttr() []byte {
+func (m *SolveResponse) GetResult() *Result {
if m != nil {
- return m.ExporterAttr
+ return m.Result
}
return nil
}
@@ -176,7 +369,7 @@
func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} }
func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) }
func (*ReadFileRequest) ProtoMessage() {}
-func (*ReadFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{4} }
+func (*ReadFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{8} }
func (m *ReadFileRequest) GetRef() string {
if m != nil {
@@ -207,7 +400,7 @@
func (m *FileRange) Reset() { *m = FileRange{} }
func (m *FileRange) String() string { return proto.CompactTextString(m) }
func (*FileRange) ProtoMessage() {}
-func (*FileRange) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{5} }
+func (*FileRange) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{9} }
func (m *FileRange) GetOffset() int64 {
if m != nil {
@@ -230,7 +423,7 @@
func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} }
func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) }
func (*ReadFileResponse) ProtoMessage() {}
-func (*ReadFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{6} }
+func (*ReadFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{10} }
func (m *ReadFileResponse) GetData() []byte {
if m != nil {
@@ -245,17 +438,45 @@
func (m *PingRequest) Reset() { *m = PingRequest{} }
func (m *PingRequest) String() string { return proto.CompactTextString(m) }
func (*PingRequest) ProtoMessage() {}
-func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{7} }
+func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{11} }
type PongResponse struct {
+ FrontendAPICaps []moby_buildkit_v1_apicaps.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps" json:"FrontendAPICaps"`
+ LLBCaps []moby_buildkit_v1_apicaps.APICap `protobuf:"bytes,2,rep,name=LLBCaps" json:"LLBCaps"`
+ Workers []*moby_buildkit_v1_types.WorkerRecord `protobuf:"bytes,3,rep,name=Workers" json:"Workers,omitempty"`
}
func (m *PongResponse) Reset() { *m = PongResponse{} }
func (m *PongResponse) String() string { return proto.CompactTextString(m) }
func (*PongResponse) ProtoMessage() {}
-func (*PongResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{8} }
+func (*PongResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{12} }
+
+func (m *PongResponse) GetFrontendAPICaps() []moby_buildkit_v1_apicaps.APICap {
+ if m != nil {
+ return m.FrontendAPICaps
+ }
+ return nil
+}
+
+func (m *PongResponse) GetLLBCaps() []moby_buildkit_v1_apicaps.APICap {
+ if m != nil {
+ return m.LLBCaps
+ }
+ return nil
+}
+
+func (m *PongResponse) GetWorkers() []*moby_buildkit_v1_types.WorkerRecord {
+ if m != nil {
+ return m.Workers
+ }
+ return nil
+}
func init() {
+ proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result")
+ proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap")
+ proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest")
+ proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse")
proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest")
proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse")
proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest")
@@ -278,10 +499,14 @@
// Client API for LLBBridge service
type LLBBridgeClient interface {
+ // apicaps:CapResolveImage
ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error)
+ // apicaps:CapSolveBase
Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error)
+ // apicaps:CapReadFile
ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error)
Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error)
+ Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error)
}
type lLBBridgeClient struct {
@@ -328,13 +553,26 @@
return out, nil
}
+func (c *lLBBridgeClient) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) {
+ out := new(ReturnResponse)
+ err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Return", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// Server API for LLBBridge service
type LLBBridgeServer interface {
+ // apicaps:CapResolveImage
ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error)
+ // apicaps:CapSolveBase
Solve(context.Context, *SolveRequest) (*SolveResponse, error)
+ // apicaps:CapReadFile
ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error)
Ping(context.Context, *PingRequest) (*PongResponse, error)
+ Return(context.Context, *ReturnRequest) (*ReturnResponse, error)
}
func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) {
@@ -413,6 +651,24 @@
return interceptor(ctx, in, info, handler)
}
+func _LLBBridge_Return_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ReturnRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LLBBridgeServer).Return(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Return",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LLBBridgeServer).Return(ctx, req.(*ReturnRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
var _LLBBridge_serviceDesc = grpc.ServiceDesc{
ServiceName: "moby.buildkit.v1.frontend.LLBBridge",
HandlerType: (*LLBBridgeServer)(nil),
@@ -433,11 +689,176 @@
MethodName: "Ping",
Handler: _LLBBridge_Ping_Handler,
},
+ {
+ MethodName: "Return",
+ Handler: _LLBBridge_Return_Handler,
+ },
},
Streams: []grpc.StreamDesc{},
Metadata: "gateway.proto",
}
+func (m *Result) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Result) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Result != nil {
+ nn1, err := m.Result.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += nn1
+ }
+ if len(m.Metadata) > 0 {
+ for k, _ := range m.Metadata {
+ dAtA[i] = 0x52
+ i++
+ v := m.Metadata[k]
+ byteSize := 0
+ if len(v) > 0 {
+ byteSize = 1 + len(v) + sovGateway(uint64(len(v)))
+ }
+ mapSize := 1 + len(k) + sovGateway(uint64(len(k))) + byteSize
+ i = encodeVarintGateway(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ if len(v) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(len(v)))
+ i += copy(dAtA[i:], v)
+ }
+ }
+ }
+ return i, nil
+}
+
+func (m *Result_Ref) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref)))
+ i += copy(dAtA[i:], m.Ref)
+ return i, nil
+}
+func (m *Result_Refs) MarshalTo(dAtA []byte) (int, error) {
+ i := 0
+ if m.Refs != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(m.Refs.Size()))
+ n2, err := m.Refs.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+func (m *RefMap) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RefMap) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Refs) > 0 {
+ for k, _ := range m.Refs {
+ dAtA[i] = 0xa
+ i++
+ v := m.Refs[k]
+ mapSize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v)))
+ i = encodeVarintGateway(dAtA, i, uint64(mapSize))
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(len(k)))
+ i += copy(dAtA[i:], k)
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(len(v)))
+ i += copy(dAtA[i:], v)
+ }
+ }
+ return i, nil
+}
+
+func (m *ReturnRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Result != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(m.Result.Size()))
+ n3, err := m.Result.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ if m.Error != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(m.Error.Size()))
+ n4, err := m.Error.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ return i, nil
+}
+
+func (m *ReturnResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -463,11 +884,11 @@
dAtA[i] = 0x12
i++
i = encodeVarintGateway(dAtA, i, uint64(m.Platform.Size()))
- n1, err := m.Platform.MarshalTo(dAtA[i:])
+ n5, err := m.Platform.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n1
+ i += n5
}
return i, nil
}
@@ -521,11 +942,11 @@
dAtA[i] = 0xa
i++
i = encodeVarintGateway(dAtA, i, uint64(m.Definition.Size()))
- n2, err := m.Definition.MarshalTo(dAtA[i:])
+ n6, err := m.Definition.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n2
+ i += n6
}
if len(m.Frontend) > 0 {
dAtA[i] = 0x12
@@ -565,6 +986,16 @@
i += copy(dAtA[i:], s)
}
}
+ if m.AllowResultReturn {
+ dAtA[i] = 0x28
+ i++
+ if m.AllowResultReturn {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
if m.Final {
dAtA[i] = 0x50
i++
@@ -605,11 +1036,15 @@
i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref)))
i += copy(dAtA[i:], m.Ref)
}
- if len(m.ExporterAttr) > 0 {
- dAtA[i] = 0x12
+ if m.Result != nil {
+ dAtA[i] = 0x1a
i++
- i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr)))
- i += copy(dAtA[i:], m.ExporterAttr)
+ i = encodeVarintGateway(dAtA, i, uint64(m.Result.Size()))
+ n7, err := m.Result.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
}
return i, nil
}
@@ -645,11 +1080,11 @@
dAtA[i] = 0x1a
i++
i = encodeVarintGateway(dAtA, i, uint64(m.Range.Size()))
- n3, err := m.Range.MarshalTo(dAtA[i:])
+ n8, err := m.Range.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n3
+ i += n8
}
return i, nil
}
@@ -739,6 +1174,42 @@
_ = i
var l int
_ = l
+ if len(m.FrontendAPICaps) > 0 {
+ for _, msg := range m.FrontendAPICaps {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.LLBCaps) > 0 {
+ for _, msg := range m.LLBCaps {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
+ if len(m.Workers) > 0 {
+ for _, msg := range m.Workers {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintGateway(dAtA, i, uint64(msg.Size()))
+ n, err := msg.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n
+ }
+ }
return i, nil
}
@@ -751,6 +1222,77 @@
dAtA[offset] = uint8(v)
return offset + 1
}
+func (m *Result) Size() (n int) {
+ var l int
+ _ = l
+ if m.Result != nil {
+ n += m.Result.Size()
+ }
+ if len(m.Metadata) > 0 {
+ for k, v := range m.Metadata {
+ _ = k
+ _ = v
+ l = 0
+ if len(v) > 0 {
+ l = 1 + len(v) + sovGateway(uint64(len(v)))
+ }
+ mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l
+ n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *Result_Ref) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Ref)
+ n += 1 + l + sovGateway(uint64(l))
+ return n
+}
+func (m *Result_Refs) Size() (n int) {
+ var l int
+ _ = l
+ if m.Refs != nil {
+ l = m.Refs.Size()
+ n += 1 + l + sovGateway(uint64(l))
+ }
+ return n
+}
+func (m *RefMap) Size() (n int) {
+ var l int
+ _ = l
+ if len(m.Refs) > 0 {
+ for k, v := range m.Refs {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize))
+ }
+ }
+ return n
+}
+
+func (m *ReturnRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Result != nil {
+ l = m.Result.Size()
+ n += 1 + l + sovGateway(uint64(l))
+ }
+ if m.Error != nil {
+ l = m.Error.Size()
+ n += 1 + l + sovGateway(uint64(l))
+ }
+ return n
+}
+
+func (m *ReturnResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
func (m *ResolveImageConfigRequest) Size() (n int) {
var l int
_ = l
@@ -804,6 +1346,9 @@
n += 1 + l + sovGateway(uint64(l))
}
}
+ if m.AllowResultReturn {
+ n += 2
+ }
if m.Final {
n += 2
}
@@ -821,8 +1366,8 @@
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
- l = len(m.ExporterAttr)
- if l > 0 {
+ if m.Result != nil {
+ l = m.Result.Size()
n += 1 + l + sovGateway(uint64(l))
}
return n
@@ -877,6 +1422,24 @@
func (m *PongResponse) Size() (n int) {
var l int
_ = l
+ if len(m.FrontendAPICaps) > 0 {
+ for _, e := range m.FrontendAPICaps {
+ l = e.Size()
+ n += 1 + l + sovGateway(uint64(l))
+ }
+ }
+ if len(m.LLBCaps) > 0 {
+ for _, e := range m.LLBCaps {
+ l = e.Size()
+ n += 1 + l + sovGateway(uint64(l))
+ }
+ }
+ if len(m.Workers) > 0 {
+ for _, e := range m.Workers {
+ l = e.Size()
+ n += 1 + l + sovGateway(uint64(l))
+ }
+ }
return n
}
@@ -893,6 +1456,570 @@
func sozGateway(x uint64) (n int) {
return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (m *Result) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Result: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Result = &Result_Ref{string(dAtA[iNdEx:postIndex])}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RefMap{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Result = &Result_Refs{v}
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Metadata == nil {
+ m.Metadata = make(map[string][]byte)
+ }
+ var mapkey string
+ mapvalue := []byte{}
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapbyteLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapbyteLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intMapbyteLen := int(mapbyteLen)
+ if intMapbyteLen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postbytesIndex := iNdEx + intMapbyteLen
+ if postbytesIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = make([]byte, mapbyteLen)
+ copy(mapvalue, dAtA[iNdEx:postbytesIndex])
+ iNdEx = postbytesIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGateway(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGateway
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Metadata[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGateway(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGateway
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RefMap) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RefMap: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Refs == nil {
+ m.Refs = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGateway(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGateway
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Refs[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGateway(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGateway
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReturnRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Result == nil {
+ m.Result = &Result{}
+ }
+ if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Error == nil {
+ m.Error = &google_rpc.Status{}
+ }
+ if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGateway(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGateway
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ReturnResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ReturnResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ReturnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGateway(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthGateway
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -1353,6 +2480,26 @@
}
m.ImportCacheRefs = append(m.ImportCacheRefs, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AllowResultReturn = bool(v != 0)
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType)
@@ -1483,11 +2630,11 @@
}
m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
}
- var byteLen int
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
@@ -1497,21 +2644,23 @@
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= (int(b) & 0x7F) << shift
+ msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthGateway
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...)
- if m.ExporterAttr == nil {
- m.ExporterAttr = []byte{}
+ if m.Result == nil {
+ m.Result = &Result{}
+ }
+ if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
iNdEx = postIndex
default:
@@ -1924,6 +3073,99 @@
return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FrontendAPICaps", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.FrontendAPICaps = append(m.FrontendAPICaps, moby_buildkit_v1_apicaps.APICap{})
+ if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LLBCaps", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.LLBCaps = append(m.LLBCaps, moby_buildkit_v1_apicaps.APICap{})
+ if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGateway
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGateway
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Workers = append(m.Workers, &moby_buildkit_v1_types.WorkerRecord{})
+ if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGateway(dAtA[iNdEx:])
@@ -2053,46 +3295,66 @@
func init() { proto.RegisterFile("gateway.proto", fileDescriptorGateway) }
var fileDescriptorGateway = []byte{
- // 652 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4d, 0x4f, 0xdb, 0x4a,
- 0x14, 0x7d, 0x8e, 0x01, 0x25, 0x37, 0xe6, 0x43, 0xa3, 0xa7, 0x27, 0xe3, 0x05, 0x44, 0xd6, 0x13,
- 0xcf, 0xe2, 0x15, 0x5b, 0x4d, 0x5b, 0x09, 0x51, 0xa9, 0x52, 0xc3, 0x87, 0x44, 0x85, 0x44, 0x34,
- 0x5d, 0x20, 0xb1, 0x1b, 0x27, 0x63, 0x33, 0xc2, 0x99, 0x71, 0xed, 0x09, 0x6d, 0xd4, 0x4d, 0xfb,
- 0x73, 0xfa, 0x4f, 0x58, 0x76, 0xcd, 0x02, 0x55, 0xfc, 0x92, 0xca, 0xe3, 0x71, 0x30, 0x50, 0x52,
- 0xba, 0x9b, 0x73, 0x7d, 0xef, 0x99, 0x73, 0xe7, 0xdc, 0x6b, 0x58, 0x8c, 0x89, 0xa4, 0x1f, 0xc9,
- 0xc4, 0x4f, 0x33, 0x21, 0x05, 0x5a, 0x1d, 0x89, 0x70, 0xe2, 0x87, 0x63, 0x96, 0x0c, 0xcf, 0x99,
- 0xf4, 0x2f, 0x9e, 0xfb, 0x51, 0x26, 0xb8, 0xa4, 0x7c, 0xe8, 0x6c, 0xc5, 0x4c, 0x9e, 0x8d, 0x43,
- 0x7f, 0x20, 0x46, 0x41, 0x2c, 0x62, 0x11, 0xa8, 0x8a, 0x70, 0x1c, 0x29, 0xa4, 0x80, 0x3a, 0x95,
- 0x4c, 0xce, 0xb3, 0x5a, 0x7a, 0x41, 0x1a, 0x54, 0xa4, 0x41, 0x2e, 0x92, 0x0b, 0x9a, 0x05, 0x69,
- 0x18, 0x88, 0x34, 0x2f, 0xb3, 0xdd, 0x13, 0x58, 0xc5, 0x54, 0x7d, 0x38, 0x1c, 0x91, 0x98, 0xee,
- 0x0a, 0x1e, 0xb1, 0x18, 0xd3, 0x0f, 0x63, 0x9a, 0x4b, 0xb4, 0x02, 0x26, 0xa6, 0x91, 0x6d, 0x74,
- 0x0c, 0xaf, 0x85, 0x8b, 0x23, 0xf2, 0xa0, 0xd9, 0x4f, 0x88, 0x8c, 0x44, 0x36, 0xb2, 0x1b, 0x1d,
- 0xc3, 0x6b, 0x77, 0x2d, 0x3f, 0x0d, 0xfd, 0x2a, 0x86, 0xa7, 0x5f, 0xdd, 0x2f, 0x06, 0x38, 0xbf,
- 0x62, 0xce, 0x53, 0xc1, 0x73, 0x8a, 0xde, 0xc1, 0xc2, 0x1e, 0x8b, 0x69, 0x2e, 0x4b, 0xf6, 0x5e,
- 0xf7, 0xf2, 0x7a, 0xfd, 0xaf, 0xab, 0xeb, 0xf5, 0xcd, 0x9a, 0x7a, 0x91, 0x52, 0x3e, 0x10, 0x5c,
- 0x12, 0xc6, 0x69, 0x96, 0x07, 0xb1, 0xd8, 0x1a, 0xaa, 0x12, 0xbf, 0xac, 0xc4, 0x9a, 0x01, 0xfd,
- 0x03, 0x0b, 0x25, 0xbb, 0x92, 0x64, 0x61, 0x8d, 0xdc, 0xab, 0x06, 0x58, 0xef, 0x0b, 0x01, 0x55,
- 0x3f, 0x3e, 0xc0, 0x1e, 0x8d, 0x18, 0x67, 0x92, 0x09, 0xae, 0x2e, 0x6e, 0x77, 0x97, 0x0a, 0xfd,
- 0xb7, 0x51, 0x5c, 0xcb, 0x40, 0x0e, 0x34, 0x0f, 0xb4, 0x0b, 0x8a, 0xba, 0x85, 0xa7, 0x18, 0x9d,
- 0x42, 0xbb, 0x3a, 0x1f, 0xa7, 0xd2, 0x36, 0x3b, 0xa6, 0xd7, 0xee, 0x6e, 0xfb, 0x8f, 0xda, 0xe8,
- 0xd7, 0x95, 0xf8, 0xb5, 0xd2, 0x7d, 0x2e, 0xb3, 0x09, 0xae, 0x93, 0x21, 0x0f, 0x96, 0x0f, 0x47,
- 0xa9, 0xc8, 0xe4, 0x2e, 0x19, 0x9c, 0x51, 0x4c, 0xa3, 0xdc, 0x9e, 0xeb, 0x98, 0x5e, 0x0b, 0xdf,
- 0x0f, 0xa3, 0xbf, 0x61, 0xfe, 0x80, 0x71, 0x92, 0xd8, 0xd0, 0x31, 0xbc, 0x26, 0x2e, 0x01, 0x72,
- 0xc1, 0xda, 0xff, 0x54, 0x24, 0xd2, 0xec, 0xad, 0x94, 0x99, 0xdd, 0x56, 0xcf, 0x72, 0x27, 0xe6,
- 0xbc, 0x81, 0x95, 0xfb, 0x22, 0x0a, 0xbf, 0xcf, 0xe9, 0xa4, 0xf2, 0xfb, 0x9c, 0x4e, 0x0a, 0xfe,
- 0x0b, 0x92, 0x8c, 0xa9, 0x6e, 0xbf, 0x04, 0x3b, 0x8d, 0x6d, 0xc3, 0xdd, 0x87, 0x45, 0xdd, 0x91,
- 0x76, 0xf4, 0xe1, 0xb0, 0xdc, 0x97, 0xd1, 0x78, 0x28, 0xc3, 0xfd, 0x0c, 0xcb, 0x98, 0x92, 0xe1,
- 0x01, 0x4b, 0xe8, 0xe3, 0x53, 0x57, 0xf8, 0xc0, 0x12, 0xda, 0x27, 0xf2, 0x6c, 0xea, 0x83, 0xc6,
- 0x68, 0x07, 0xe6, 0x31, 0xe1, 0x31, 0xb5, 0x4d, 0x65, 0xe7, 0xbf, 0x33, 0x1c, 0x50, 0x97, 0x14,
- 0xb9, 0xb8, 0x2c, 0x71, 0x5f, 0x43, 0x6b, 0x1a, 0x2b, 0xa6, 0xe8, 0x38, 0x8a, 0x72, 0x5a, 0x4e,
- 0xa4, 0x89, 0x35, 0x2a, 0xe2, 0x47, 0x94, 0xc7, 0xfa, 0x6a, 0x13, 0x6b, 0xe4, 0x6e, 0xc0, 0xca,
- 0xad, 0x72, 0xfd, 0x06, 0x08, 0xe6, 0xf6, 0x88, 0x24, 0x8a, 0xc1, 0xc2, 0xea, 0xec, 0x2e, 0x42,
- 0xbb, 0xcf, 0x78, 0xb5, 0x53, 0xee, 0x12, 0x58, 0x7d, 0xc1, 0xa7, 0x8b, 0xd0, 0xfd, 0x66, 0x42,
- 0xeb, 0xe8, 0xa8, 0xd7, 0xcb, 0xd8, 0x30, 0xa6, 0xe8, 0xab, 0x01, 0xe8, 0xe1, 0xd6, 0xa0, 0x97,
- 0x33, 0xba, 0x7a, 0x74, 0x7d, 0x9d, 0x57, 0x7f, 0x58, 0xa5, 0x9b, 0x38, 0x85, 0x79, 0xe5, 0x2c,
- 0xfa, 0xef, 0x89, 0xd3, 0xec, 0x78, 0xbf, 0x4f, 0xd4, 0xdc, 0x03, 0x68, 0x56, 0x8f, 0x86, 0x36,
- 0x67, 0xca, 0xbb, 0x33, 0x13, 0xce, 0xff, 0x4f, 0xca, 0xd5, 0x97, 0x9c, 0xc0, 0x5c, 0xf1, 0xe2,
- 0x68, 0x63, 0x46, 0x51, 0xcd, 0x12, 0x67, 0x56, 0x9f, 0x75, 0xaf, 0x7a, 0xd6, 0xe5, 0xcd, 0x9a,
- 0xf1, 0xfd, 0x66, 0xcd, 0xf8, 0x71, 0xb3, 0x66, 0x84, 0x0b, 0xea, 0x0f, 0xfa, 0xe2, 0x67, 0x00,
- 0x00, 0x00, 0xff, 0xff, 0xbc, 0x68, 0x1b, 0xf0, 0xca, 0x05, 0x00, 0x00,
+ // 969 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x4b, 0x6f, 0xdb, 0x46,
+ 0x10, 0x0e, 0x4d, 0x49, 0x91, 0x46, 0x52, 0xac, 0x2e, 0x8a, 0x42, 0xe1, 0xc1, 0x51, 0x89, 0x22,
+ 0x65, 0xf3, 0x20, 0x51, 0xa5, 0x45, 0xd2, 0x04, 0x48, 0x1b, 0xd9, 0x31, 0xe2, 0x56, 0x41, 0x84,
+ 0xcd, 0xc1, 0x40, 0xd0, 0x1e, 0x56, 0xd2, 0x92, 0x26, 0x4c, 0x71, 0xd9, 0xe5, 0xca, 0xae, 0xd0,
+ 0x4b, 0xdb, 0x53, 0x7e, 0x5a, 0x8e, 0x3d, 0xf7, 0x10, 0x14, 0xbe, 0xf5, 0x5f, 0x14, 0xfb, 0xa0,
+ 0x4c, 0xbf, 0x64, 0xfb, 0xa4, 0x9d, 0xe5, 0x7c, 0x33, 0xdf, 0xce, 0x7c, 0xb3, 0x2b, 0x68, 0x47,
+ 0x44, 0xd0, 0x43, 0xb2, 0xf0, 0x33, 0xce, 0x04, 0x43, 0xb7, 0x67, 0x6c, 0xbc, 0xf0, 0xc7, 0xf3,
+ 0x38, 0x99, 0xee, 0xc7, 0xc2, 0x3f, 0xf8, 0xda, 0x0f, 0x39, 0x4b, 0x05, 0x4d, 0xa7, 0xce, 0xc3,
+ 0x28, 0x16, 0x7b, 0xf3, 0xb1, 0x3f, 0x61, 0xb3, 0x20, 0x62, 0x11, 0x0b, 0x14, 0x62, 0x3c, 0x0f,
+ 0x95, 0xa5, 0x0c, 0xb5, 0xd2, 0x91, 0x9c, 0xfe, 0x69, 0xf7, 0x88, 0xb1, 0x28, 0xa1, 0x24, 0x8b,
+ 0x73, 0xb3, 0x0c, 0x78, 0x36, 0x09, 0x72, 0x41, 0xc4, 0x3c, 0x37, 0x98, 0x07, 0x25, 0x8c, 0x24,
+ 0x12, 0x14, 0x44, 0x82, 0x9c, 0x25, 0x07, 0x94, 0x07, 0xd9, 0x38, 0x60, 0x59, 0xe1, 0x1d, 0x5c,
+ 0xe8, 0x4d, 0xb2, 0x38, 0x10, 0x8b, 0x8c, 0xe6, 0xc1, 0x21, 0xe3, 0xfb, 0x94, 0x1b, 0xc0, 0xa3,
+ 0x0b, 0x01, 0x73, 0x11, 0x27, 0x12, 0x35, 0x21, 0x59, 0x2e, 0x93, 0xc8, 0x5f, 0x0d, 0x72, 0xff,
+ 0xb3, 0xa0, 0x86, 0x69, 0x3e, 0x4f, 0x04, 0x42, 0x60, 0x73, 0x1a, 0x76, 0xad, 0x9e, 0xe5, 0x35,
+ 0x5e, 0xdd, 0xc0, 0xd2, 0x40, 0x8f, 0xa1, 0xc2, 0x69, 0x98, 0x77, 0xd7, 0x7a, 0x96, 0xd7, 0xec,
+ 0x7f, 0xee, 0x5f, 0x58, 0x3f, 0x1f, 0xd3, 0xf0, 0x35, 0xc9, 0x5e, 0xdd, 0xc0, 0x0a, 0x80, 0x7e,
+ 0x82, 0xfa, 0x8c, 0x0a, 0x32, 0x25, 0x82, 0x74, 0xa1, 0x67, 0x7b, 0xcd, 0x7e, 0xb0, 0x12, 0x2c,
+ 0x19, 0xf8, 0xaf, 0x0d, 0xe2, 0x65, 0x2a, 0xf8, 0x02, 0x2f, 0x03, 0x38, 0xcf, 0xa0, 0x7d, 0xe2,
+ 0x13, 0xea, 0x80, 0xbd, 0x4f, 0x17, 0x9a, 0x2a, 0x96, 0x4b, 0xf4, 0x29, 0x54, 0x0f, 0x48, 0x32,
+ 0xa7, 0x8a, 0x69, 0x0b, 0x6b, 0xe3, 0xe9, 0xda, 0x13, 0x6b, 0x50, 0x87, 0x1a, 0x57, 0xe1, 0xdd,
+ 0xbf, 0xd4, 0x59, 0x25, 0x4d, 0xf4, 0xbd, 0x39, 0x97, 0xa5, 0xa8, 0xdd, 0xbf, 0xf4, 0x5c, 0xf2,
+ 0x27, 0xd7, 0xb4, 0x14, 0xd0, 0x79, 0x0c, 0x8d, 0xe5, 0xd6, 0x65, 0x74, 0x1a, 0x25, 0x3a, 0xae,
+ 0x80, 0x36, 0xa6, 0x62, 0xce, 0x53, 0x4c, 0x7f, 0x9d, 0xd3, 0x5c, 0xa0, 0xef, 0x0a, 0x7e, 0x0a,
+ 0x7f, 0x59, 0x91, 0xa5, 0x23, 0x36, 0x00, 0xe4, 0x41, 0x95, 0x72, 0xce, 0xb8, 0x69, 0x0f, 0xf2,
+ 0xb5, 0xf2, 0x7c, 0x9e, 0x4d, 0xfc, 0xb7, 0x4a, 0x79, 0x58, 0x3b, 0xb8, 0x1d, 0xb8, 0x55, 0x64,
+ 0xcd, 0x33, 0x96, 0xe6, 0xd4, 0xdd, 0x85, 0xdb, 0x98, 0x2a, 0xdd, 0xed, 0xcc, 0x48, 0x44, 0x37,
+ 0x59, 0x1a, 0xc6, 0x51, 0xc1, 0xa9, 0x03, 0x36, 0x2e, 0xa4, 0x80, 0xe5, 0x12, 0x79, 0x50, 0x1f,
+ 0x25, 0x44, 0x84, 0x8c, 0xcf, 0x4c, 0xb6, 0x96, 0x9f, 0x8d, 0xfd, 0x62, 0x0f, 0x2f, 0xbf, 0xba,
+ 0x7f, 0x58, 0xe0, 0x9c, 0x17, 0x59, 0xe7, 0x45, 0x3f, 0x42, 0x6d, 0x2b, 0x8e, 0x68, 0xae, 0x8f,
+ 0xdb, 0x18, 0xf4, 0x3f, 0x7c, 0xbc, 0x73, 0xe3, 0x9f, 0x8f, 0x77, 0xee, 0x95, 0xd4, 0xcb, 0x32,
+ 0x9a, 0x4e, 0x58, 0x2a, 0x48, 0x9c, 0x52, 0x2e, 0xe7, 0xe9, 0xe1, 0x54, 0x41, 0x7c, 0x8d, 0xc4,
+ 0x26, 0x02, 0xfa, 0x0c, 0x6a, 0x3a, 0xba, 0xe9, 0xba, 0xb1, 0xdc, 0xf7, 0x36, 0xb4, 0xde, 0x4a,
+ 0x02, 0xc5, 0x79, 0x7c, 0x80, 0x2d, 0x1a, 0xc6, 0x69, 0x2c, 0x62, 0x96, 0x9a, 0x3a, 0xdf, 0x92,
+ 0xfc, 0x8f, 0x77, 0x71, 0xc9, 0x03, 0x39, 0x50, 0xdf, 0x36, 0x35, 0x37, 0x1d, 0x5c, 0xda, 0xe8,
+ 0x1d, 0x34, 0x8b, 0xf5, 0x9b, 0x4c, 0x74, 0x6d, 0xa5, 0xa0, 0x27, 0x2b, 0x9a, 0x56, 0x66, 0xe2,
+ 0x97, 0xa0, 0x5a, 0x4e, 0xe5, 0x60, 0xc8, 0x83, 0xf5, 0x9d, 0x59, 0xc6, 0xb8, 0xd8, 0x24, 0x93,
+ 0x3d, 0x2a, 0x05, 0xd6, 0xad, 0xf4, 0x6c, 0xaf, 0x81, 0x4f, 0x6f, 0xa3, 0x07, 0xf0, 0x09, 0x49,
+ 0x12, 0x76, 0x68, 0x14, 0xa1, 0x7a, 0xdb, 0xad, 0xf6, 0x2c, 0xaf, 0x8e, 0xcf, 0x7e, 0x90, 0x72,
+ 0xdc, 0x8e, 0x53, 0x92, 0x74, 0x41, 0x79, 0x68, 0x03, 0xb9, 0xd0, 0x7a, 0xf9, 0x9b, 0x0c, 0x4b,
+ 0xf9, 0x0b, 0x21, 0x78, 0xb7, 0xa9, 0x8a, 0x78, 0x62, 0xcf, 0x79, 0x0e, 0x9d, 0xd3, 0x94, 0xaf,
+ 0x25, 0xf7, 0x9f, 0xa1, 0x6d, 0xce, 0x6f, 0xfa, 0xdf, 0x29, 0xdd, 0x32, 0xfa, 0x8e, 0x39, 0x1e,
+ 0x00, 0xfb, 0x9a, 0x03, 0xe0, 0xfe, 0x0e, 0xeb, 0x98, 0x92, 0xe9, 0x76, 0x9c, 0xd0, 0x8b, 0xa5,
+ 0x2b, 0x9b, 0x19, 0x27, 0x74, 0x44, 0xc4, 0xde, 0xb2, 0x99, 0xc6, 0x46, 0x4f, 0xa1, 0x8a, 0x49,
+ 0x1a, 0x51, 0x93, 0xfa, 0x8b, 0x15, 0xa9, 0x55, 0x12, 0xe9, 0x8b, 0x35, 0xc4, 0x7d, 0x06, 0x8d,
+ 0xe5, 0x9e, 0x94, 0xe2, 0x9b, 0x30, 0xcc, 0xa9, 0x96, 0xb5, 0x8d, 0x8d, 0x25, 0xf7, 0x87, 0x34,
+ 0x8d, 0x4c, 0x6a, 0x1b, 0x1b, 0xcb, 0xbd, 0x0b, 0x9d, 0x63, 0xe6, 0xa6, 0x34, 0x08, 0x2a, 0x5b,
+ 0xf2, 0xbe, 0xb4, 0x54, 0x1f, 0xd4, 0xda, 0x6d, 0x43, 0x73, 0x14, 0xa7, 0xc5, 0x60, 0xba, 0x47,
+ 0x16, 0xb4, 0x46, 0x2c, 0x3d, 0x1e, 0xa7, 0x11, 0xac, 0x17, 0xfd, 0x79, 0x31, 0xda, 0xd9, 0x24,
+ 0x59, 0x71, 0xa7, 0xf5, 0xce, 0x1e, 0xc5, 0xbc, 0x00, 0xbe, 0x76, 0x1c, 0x54, 0xe4, 0xe4, 0xe1,
+ 0xd3, 0x70, 0xf4, 0x03, 0xdc, 0x1c, 0x0e, 0x07, 0x2a, 0xd2, 0xda, 0xb5, 0x22, 0x15, 0x30, 0xf4,
+ 0x1c, 0x6e, 0xee, 0xaa, 0x87, 0x29, 0x37, 0xd3, 0x71, 0x4e, 0x59, 0xd5, 0xfb, 0xe5, 0x6b, 0x37,
+ 0x4c, 0x27, 0x8c, 0x4f, 0x71, 0x01, 0xea, 0xbf, 0xaf, 0x40, 0x63, 0x38, 0x1c, 0x0c, 0x78, 0x3c,
+ 0x8d, 0x28, 0xfa, 0xd3, 0x02, 0x74, 0xf6, 0x3e, 0x41, 0xdf, 0xac, 0x56, 0xc9, 0xf9, 0x17, 0x9b,
+ 0xf3, 0xed, 0x35, 0x51, 0xa6, 0xca, 0xef, 0xa0, 0xaa, 0x54, 0x8c, 0xbe, 0xbc, 0xe2, 0x9c, 0x3b,
+ 0xde, 0xe5, 0x8e, 0x26, 0xf6, 0x04, 0xea, 0x85, 0x12, 0xd0, 0xbd, 0x95, 0xf4, 0x4e, 0x08, 0xdd,
+ 0xb9, 0x7f, 0x25, 0x5f, 0x93, 0x64, 0x17, 0x2a, 0x52, 0x46, 0xe8, 0xee, 0x0a, 0x50, 0x49, 0x67,
+ 0xce, 0xaa, 0x73, 0x9e, 0xd0, 0xdf, 0x2f, 0xf2, 0x49, 0x55, 0x77, 0x8c, 0xb7, 0x92, 0x4f, 0xe9,
+ 0xc5, 0x73, 0xbe, 0xba, 0x82, 0xa7, 0x0e, 0x3f, 0x68, 0x7d, 0x38, 0xda, 0xb0, 0xfe, 0x3e, 0xda,
+ 0xb0, 0xfe, 0x3d, 0xda, 0xb0, 0xc6, 0x35, 0xf5, 0x9f, 0xe5, 0xd1, 0xff, 0x01, 0x00, 0x00, 0xff,
+ 0xff, 0xb8, 0xcb, 0x8c, 0xfa, 0xd6, 0x09, 0x00, 0x00,
}
diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
index 9ba88bc..8f40a33 100644
--- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
+++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
@@ -3,17 +3,44 @@
package moby.buildkit.v1.frontend;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+import "github.com/gogo/googleapis/google/rpc/status.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
+import "github.com/moby/buildkit/api/types/worker.proto";
+import "github.com/moby/buildkit/util/apicaps/pb/caps.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
service LLBBridge {
+ // apicaps:CapResolveImage
rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse);
+ // apicaps:CapSolveBase
rpc Solve(SolveRequest) returns (SolveResponse);
+ // apicaps:CapReadFile
rpc ReadFile(ReadFileRequest) returns (ReadFileResponse);
rpc Ping(PingRequest) returns (PongResponse);
+ rpc Return(ReturnRequest) returns (ReturnResponse);
+}
+
+message Result {
+ oneof result {
+ string ref = 1;
+ RefMap refs = 2;
+ }
+ map<string, bytes> metadata = 10;
+}
+
+message RefMap {
+ map<string, string> refs = 1;
+}
+
+message ReturnRequest {
+ Result result = 1;
+ google.rpc.Status error = 2;
+}
+
+message ReturnResponse {
}
message ResolveImageConfigRequest {
@@ -31,13 +58,21 @@
string Frontend = 2;
map<string, string> FrontendOpt = 3;
repeated string ImportCacheRefs = 4;
+ bool allowResultReturn = 5;
+
+ // apicaps.CapSolveInlineReturn deprecated
bool Final = 10;
bytes ExporterAttr = 11;
}
message SolveResponse {
- string Ref = 1; // can be used by readfile request
- bytes ExporterAttr = 2;
+ // deprecated
+ string ref = 1; // can be used by readfile request
+ // deprecated
+/* bytes ExporterAttr = 2;*/
+
+ // these fields are returned when allowMapReturn was set
+ Result result = 3;
}
message ReadFileRequest {
@@ -58,4 +93,7 @@
message PingRequest{
}
message PongResponse{
+ repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false];
+ repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false];
+ repeated moby.buildkit.v1.types.WorkerRecord Workers = 3;
}
\ No newline at end of file
diff --git a/vendor/github.com/moby/buildkit/frontend/result.go b/vendor/github.com/moby/buildkit/frontend/result.go
new file mode 100644
index 0000000..37715de
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/frontend/result.go
@@ -0,0 +1,23 @@
+package frontend
+
+import "github.com/moby/buildkit/solver"
+
+type Result struct {
+ Ref solver.CachedResult
+ Refs map[string]solver.CachedResult
+ Metadata map[string][]byte
+}
+
+func (r *Result) EachRef(fn func(solver.CachedResult) error) (err error) {
+ if r.Ref != nil {
+ err = fn(r.Ref)
+ }
+ for _, r := range r.Refs {
+ if r != nil {
+ if err1 := fn(r); err1 != nil && err == nil {
+ err = err1
+ }
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
index 8d5f46e..b568d3c 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
@@ -20,16 +20,20 @@
)
type llbBridge struct {
- builder solver.Builder
- frontends map[string]frontend.Frontend
- resolveWorker func() (worker.Worker, error)
- ci *remotecache.CacheImporter
- cms map[string]solver.CacheManager
- cmsMu sync.Mutex
- platforms []specs.Platform
+ builder solver.Builder
+ frontends map[string]frontend.Frontend
+ resolveWorker func() (worker.Worker, error)
+ resolveCacheImporter remotecache.ResolveCacheImporterFunc
+ cms map[string]solver.CacheManager
+ cmsMu sync.Mutex
+ platforms []specs.Platform
}
-func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res solver.CachedResult, exp map[string][]byte, err error) {
+func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {
+ w, err := b.resolveWorker()
+ if err != nil {
+ return nil, err
+ }
var cms []solver.CacheManager
for _, ref := range req.ImportCacheRefs {
b.cmsMu.Lock()
@@ -37,14 +41,22 @@
if prevCm, ok := b.cms[ref]; !ok {
r, err := reference.ParseNormalizedNamed(ref)
if err != nil {
- return nil, nil, err
+ return nil, err
}
ref = reference.TagNameOnly(r).String()
func(ref string) {
cm = newLazyCacheManager(ref, func() (solver.CacheManager, error) {
var cmNew solver.CacheManager
if err := b.builder.Call(ctx, "importing cache manifest from "+ref, func(ctx context.Context) error {
- cmNew, err = b.ci.Resolve(ctx, ref)
+ if b.resolveCacheImporter == nil {
+ return errors.New("no cache importer is available")
+ }
+ typ := "" // TODO: support non-registry type
+ ci, desc, err := b.resolveCacheImporter(ctx, typ, ref)
+ if err != nil {
+ return err
+ }
+ cmNew, err = ci.Resolve(ctx, desc, ref, w)
return err
}); err != nil {
return nil, err
@@ -63,38 +75,43 @@
if req.Definition != nil && req.Definition.Def != nil {
edge, err := Load(req.Definition, WithCacheSources(cms), RuntimePlatforms(b.platforms))
if err != nil {
- return nil, nil, err
+ return nil, err
}
- res, err = b.builder.Build(ctx, edge)
+ ref, err := b.builder.Build(ctx, edge)
if err != nil {
- return nil, nil, err
+ return nil, err
}
+
+ res = &frontend.Result{Ref: ref}
}
if req.Frontend != "" {
f, ok := b.frontends[req.Frontend]
if !ok {
- return nil, nil, errors.Errorf("invalid frontend: %s", req.Frontend)
+ return nil, errors.Errorf("invalid frontend: %s", req.Frontend)
}
- res, exp, err = f.Solve(ctx, b, req.FrontendOpt)
+ res, err = f.Solve(ctx, b, req.FrontendOpt)
if err != nil {
- return nil, nil, err
+ return nil, err
}
} else {
if req.Definition == nil || req.Definition.Def == nil {
- return nil, nil, nil
+ return &frontend.Result{}, nil
}
}
- if res != nil {
- wr, ok := res.Sys().(*worker.WorkerRef)
+ if err := res.EachRef(func(r solver.CachedResult) error {
+ wr, ok := r.Sys().(*worker.WorkerRef)
if !ok {
- return nil, nil, errors.Errorf("invalid reference for exporting: %T", res.Sys())
+ return errors.Errorf("invalid reference for exporting: %T", r.Sys())
}
if wr.ImmutableRef != nil {
- if err := wr.ImmutableRef.Finalize(ctx); err != nil {
- return nil, nil, err
+ if err := wr.ImmutableRef.Finalize(ctx, false); err != nil {
+ return err
}
}
+ return nil
+ }); err != nil {
+ return nil, err
}
return
}
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
index 9f035e1..4b03004 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
@@ -117,12 +117,16 @@
lm.Unmount()
lm = nil
- newref, _, err := b.b.Solve(ctx, frontend.SolveRequest{
+ newRes, err := b.b.Solve(ctx, frontend.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, err
}
- return []solver.Result{newref}, err
+ for _, r := range newRes.Refs {
+ r.Release(context.TODO())
+ }
+
+ return []solver.Result{newRes.Ref}, err
}
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
index 1a03222..2e27f53 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
@@ -19,7 +19,7 @@
type ExporterRequest struct {
Exporter exporter.ExporterInstance
- CacheExporter *remotecache.RegistryCacheExporter
+ CacheExporter remotecache.Exporter
CacheExportMode solver.CacheExportMode
}
@@ -27,18 +27,18 @@
type ResolveWorkerFunc func() (worker.Worker, error)
type Solver struct {
- solver *solver.Solver
- resolveWorker ResolveWorkerFunc
- frontends map[string]frontend.Frontend
- ci *remotecache.CacheImporter
- platforms []specs.Platform
+ solver *solver.Solver
+ resolveWorker ResolveWorkerFunc
+ frontends map[string]frontend.Frontend
+ resolveCacheImporter remotecache.ResolveCacheImporterFunc
+ platforms []specs.Platform
}
-func New(wc *worker.Controller, f map[string]frontend.Frontend, cacheStore solver.CacheKeyStorage, ci *remotecache.CacheImporter) (*Solver, error) {
+func New(wc *worker.Controller, f map[string]frontend.Frontend, cacheStore solver.CacheKeyStorage, resolveCI remotecache.ResolveCacheImporterFunc) (*Solver, error) {
s := &Solver{
- resolveWorker: defaultResolver(wc),
- frontends: f,
- ci: ci,
+ resolveWorker: defaultResolver(wc),
+ frontends: f,
+ resolveCacheImporter: resolveCI,
}
results := newCacheResultStorage(wc)
@@ -71,12 +71,12 @@
func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge {
return &llbBridge{
- builder: b,
- frontends: s.frontends,
- resolveWorker: s.resolveWorker,
- ci: s.ci,
- cms: map[string]solver.CacheManager{},
- platforms: s.platforms,
+ builder: b,
+ frontends: s.frontends,
+ resolveWorker: s.resolveWorker,
+ resolveCacheImporter: s.resolveCacheImporter,
+ cms: map[string]solver.CacheManager{},
+ platforms: s.platforms,
}
}
@@ -90,21 +90,22 @@
j.SessionID = session.FromContext(ctx)
- res, exporterOpt, err := s.Bridge(j).Solve(ctx, req)
+ res, err := s.Bridge(j).Solve(ctx, req)
if err != nil {
return nil, err
}
defer func() {
- if res != nil {
- go res.Release(context.TODO())
- }
+ res.EachRef(func(ref solver.CachedResult) error {
+ go ref.Release(context.TODO())
+ return nil
+ })
}()
var exporterResponse map[string]string
if exp := exp.Exporter; exp != nil {
var immutable cache.ImmutableRef
- if res != nil {
+ if res := res.Ref; res != nil { // FIXME(tonistiigi):
workerRef, ok := res.Sys().(*worker.WorkerRef)
if !ok {
return nil, errors.Errorf("invalid reference: %T", res.Sys())
@@ -113,7 +114,7 @@
}
if err := j.Call(ctx, exp.Name(), func(ctx context.Context) error {
- exporterResponse, err = exp.Export(ctx, immutable, exporterOpt)
+ exporterResponse, err = exp.Export(ctx, immutable, res.Metadata)
return err
}); err != nil {
return nil, err
@@ -123,14 +124,16 @@
if e := exp.CacheExporter; e != nil {
if err := j.Call(ctx, "exporting cache", func(ctx context.Context) error {
prepareDone := oneOffProgress(ctx, "preparing build cache for export")
- if _, err := res.CacheKey().Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
- Convert: workerRefConverter,
- Mode: exp.CacheExportMode,
+ if err := res.EachRef(func(res solver.CachedResult) error {
+ _, err := res.CacheKey().Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
+ Convert: workerRefConverter,
+ Mode: exp.CacheExportMode,
+ })
+ return err
}); err != nil {
return prepareDone(err)
}
prepareDone(nil)
-
return e.Finalize(ctx)
}); err != nil {
return nil, err
diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
index 64551ca..bd04e1e 100644
--- a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
+++ b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
@@ -51,8 +51,9 @@
func RuntimePlatforms(p []specs.Platform) LoadOpt {
var defaultPlatform *pb.Platform
+ pp := make([]specs.Platform, len(p))
for i := range p {
- p[i] = platforms.Normalize(p[i])
+ pp[i] = platforms.Normalize(p[i])
}
return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
if op.Platform == nil {
@@ -67,7 +68,7 @@
}
if _, ok := op.Op.(*pb.Op_Exec); ok {
var found bool
- for _, pp := range p {
+ for _, pp := range pp {
if pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant {
found = true
break
diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go
index cee4f1b..6f5f77f 100644
--- a/vendor/github.com/moby/buildkit/solver/pb/attr.go
+++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go
@@ -3,6 +3,7 @@
const AttrKeepGitDir = "git.keepgitdir"
const AttrFullRemoteURL = "git.fullurl"
const AttrLocalSessionID = "local.session"
+const AttrLocalUniqueID = "local.unique"
const AttrIncludePatterns = "local.includepattern"
const AttrFollowPaths = "local.followpaths"
const AttrExcludePatterns = "local.excludepatterns"
diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
index 5806eb7..881e87c 100644
--- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
+++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
@@ -609,6 +609,7 @@
}
// BuildOp is used for nested build invocation.
+// BuildOp is experimental and can break without backwards compatibility
type BuildOp struct {
Builder InputIndex `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"`
Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto
index 6f0a524..db44c5a 100644
--- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto
+++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto
@@ -114,6 +114,7 @@
}
// BuildOp is used for nested build invocation.
+// BuildOp is experimental and can break without backwards compatibility
message BuildOp {
int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
map<string, BuildInput> inputs = 2;
diff --git a/vendor/github.com/moby/buildkit/solver/pb/platform.go b/vendor/github.com/moby/buildkit/solver/pb/platform.go
new file mode 100644
index 0000000..a434aa7
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/solver/pb/platform.go
@@ -0,0 +1,41 @@
+package pb
+
+import (
+ specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func (p *Platform) Spec() specs.Platform {
+ return specs.Platform{
+ OS: p.OS,
+ Architecture: p.Architecture,
+ Variant: p.Variant,
+ OSVersion: p.OSVersion,
+ OSFeatures: p.OSFeatures,
+ }
+}
+
+func PlatformFromSpec(p specs.Platform) Platform {
+ return Platform{
+ OS: p.OS,
+ Architecture: p.Architecture,
+ Variant: p.Variant,
+ OSVersion: p.OSVersion,
+ OSFeatures: p.OSFeatures,
+ }
+}
+
+func ToSpecPlatforms(p []Platform) []specs.Platform {
+ out := make([]specs.Platform, 0, len(p))
+ for _, pp := range p {
+ out = append(out, pp.Spec())
+ }
+ return out
+}
+
+func PlatformsFromSpec(p []specs.Platform) []Platform {
+ out := make([]Platform, 0, len(p))
+ for _, pp := range p {
+ out = append(out, PlatformFromSpec(pp))
+ }
+ return out
+}
diff --git a/vendor/github.com/moby/buildkit/util/apicaps/caps.go b/vendor/github.com/moby/buildkit/util/apicaps/caps.go
new file mode 100644
index 0000000..2509b53
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/apicaps/caps.go
@@ -0,0 +1,161 @@
+package apicaps
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ pb "github.com/moby/buildkit/util/apicaps/pb"
+ "github.com/pkg/errors"
+)
+
+type PBCap = pb.APICap
+
+// ExportedProduct is the name of the product using this package.
+// Users vendoring this library may override it to provide better versioning hints
+// for their users (or set it with a flag to buildkitd).
+var ExportedProduct string
+
+// CapStatus defines the stability properties of a capability
+type CapStatus int
+
+const (
+ // CapStatusStable refers to a capability that should never be changed in
+ // backwards incompatible manner unless there is a serious security issue.
+ CapStatusStable CapStatus = iota
+ // CapStatusExperimental refers to a capability that may be removed in the future.
+ // If incompatible changes are made the previous ID is disabled and new is added.
+ CapStatusExperimental
+ // CapStatusPrerelease is same as CapStatusExperimental that can be used for new
+ // features before they move to stable.
+ CapStatusPrerelease
+)
+
+// CapID is type for capability identifier
+type CapID string
+
+// Cap describes an API feature
+type Cap struct {
+ ID CapID
+ Name string // readable name, may contain spaces but keep in one sentence
+ Status CapStatus
+ Enabled bool
+ Deprecated bool
+ SupportedHint map[string]string
+ DisabledReason string
+ DisabledReasonMsg string
+ DisabledAlternative string
+}
+
+// CapList is a collection of capability definitions
+type CapList struct {
+ m map[CapID]Cap
+}
+
+// Init initializes definition for a new capability.
+// Not safe to be called concurrently with other methods.
+func (l *CapList) Init(cc ...Cap) {
+ if l.m == nil {
+ l.m = make(map[CapID]Cap, len(cc))
+ }
+ for _, c := range cc {
+ l.m[c.ID] = c
+ }
+}
+
+// All reports the configuration of all known capabilities
+func (l *CapList) All() []pb.APICap {
+ out := make([]pb.APICap, 0, len(l.m))
+ for _, c := range l.m {
+ out = append(out, pb.APICap{
+ ID: string(c.ID),
+ Enabled: c.Enabled,
+ Deprecated: c.Deprecated,
+ DisabledReason: c.DisabledReason,
+ DisabledReasonMsg: c.DisabledReasonMsg,
+ DisabledAlternative: c.DisabledAlternative,
+ })
+ }
+ sort.Slice(out, func(i, j int) bool {
+ return out[i].ID < out[j].ID
+ })
+ return out
+}
+
+// CapSet returns a CapSet for an capability configuration
+func (l *CapList) CapSet(caps []pb.APICap) CapSet {
+ m := make(map[string]*pb.APICap, len(caps))
+ for _, c := range caps {
+ if c.ID != "" {
+ m[c.ID] = &c
+ }
+ }
+ return CapSet{
+ list: l,
+ set: m,
+ }
+}
+
+// CapSet is a configuration for detecting supported capabilities
+type CapSet struct {
+ list *CapList
+ set map[string]*pb.APICap
+}
+
+// Supports returns an error if capability is not supported
+func (s *CapSet) Supports(id CapID) error {
+ err := &CapError{ID: id}
+ c, ok := s.list.m[id]
+ if !ok {
+ return errors.WithStack(err)
+ }
+ err.Definition = &c
+ state, ok := s.set[string(id)]
+ if !ok {
+ return errors.WithStack(err)
+ }
+ err.State = state
+ if !state.Enabled {
+ return errors.WithStack(err)
+ }
+ return nil
+}
+
+// CapError is an error for unsupported capability
+type CapError struct {
+ ID CapID
+ Definition *Cap
+ State *pb.APICap
+}
+
+func (e CapError) Error() string {
+ if e.Definition == nil {
+ return fmt.Sprintf("unknown API capability %s", e.ID)
+ }
+ typ := ""
+ if e.Definition.Status == CapStatusExperimental {
+ typ = "experimental "
+ }
+ if e.Definition.Status == CapStatusPrerelease {
+ typ = "prerelease "
+ }
+ name := ""
+ if e.Definition.Name != "" {
+ name = "(" + e.Definition.Name + ")"
+ }
+ b := &strings.Builder{}
+ fmt.Fprintf(b, "requested %sfeature %s %s", typ, e.ID, name)
+ if e.State == nil {
+ fmt.Fprint(b, " is not supported by build server")
+ if hint, ok := e.Definition.SupportedHint[ExportedProduct]; ok {
+ fmt.Fprintf(b, " (added in %s)", hint)
+ }
+ fmt.Fprintf(b, ", please update %s", ExportedProduct)
+ } else {
+ fmt.Fprint(b, " has been disabled on the build server")
+ if e.State.DisabledReasonMsg != "" {
+ fmt.Fprintf(b, ": %s", e.State.DisabledReasonMsg)
+ }
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go
new file mode 100644
index 0000000..9d4d488
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go
@@ -0,0 +1,535 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: caps.proto
+
+/*
+ Package moby_buildkit_v1_apicaps is a generated protocol buffer package.
+
+ It is generated from these files:
+ caps.proto
+
+ It has these top-level messages:
+ APICap
+*/
+package moby_buildkit_v1_apicaps
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// APICap defines a capability supported by the service
+type APICap struct {
+ ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"`
+ Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"`
+ DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"`
+ DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"`
+ DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"`
+}
+
+func (m *APICap) Reset() { *m = APICap{} }
+func (m *APICap) String() string { return proto.CompactTextString(m) }
+func (*APICap) ProtoMessage() {}
+func (*APICap) Descriptor() ([]byte, []int) { return fileDescriptorCaps, []int{0} }
+
+func (m *APICap) GetID() string {
+ if m != nil {
+ return m.ID
+ }
+ return ""
+}
+
+func (m *APICap) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *APICap) GetDeprecated() bool {
+ if m != nil {
+ return m.Deprecated
+ }
+ return false
+}
+
+func (m *APICap) GetDisabledReason() string {
+ if m != nil {
+ return m.DisabledReason
+ }
+ return ""
+}
+
+func (m *APICap) GetDisabledReasonMsg() string {
+ if m != nil {
+ return m.DisabledReasonMsg
+ }
+ return ""
+}
+
+func (m *APICap) GetDisabledAlternative() string {
+ if m != nil {
+ return m.DisabledAlternative
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*APICap)(nil), "moby.buildkit.v1.apicaps.APICap")
+}
+func (m *APICap) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *APICap) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.ID) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintCaps(dAtA, i, uint64(len(m.ID)))
+ i += copy(dAtA[i:], m.ID)
+ }
+ if m.Enabled {
+ dAtA[i] = 0x10
+ i++
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if m.Deprecated {
+ dAtA[i] = 0x18
+ i++
+ if m.Deprecated {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i++
+ }
+ if len(m.DisabledReason) > 0 {
+ dAtA[i] = 0x22
+ i++
+ i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReason)))
+ i += copy(dAtA[i:], m.DisabledReason)
+ }
+ if len(m.DisabledReasonMsg) > 0 {
+ dAtA[i] = 0x2a
+ i++
+ i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReasonMsg)))
+ i += copy(dAtA[i:], m.DisabledReasonMsg)
+ }
+ if len(m.DisabledAlternative) > 0 {
+ dAtA[i] = 0x32
+ i++
+ i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative)))
+ i += copy(dAtA[i:], m.DisabledAlternative)
+ }
+ return i, nil
+}
+
+func encodeVarintCaps(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *APICap) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovCaps(uint64(l))
+ }
+ if m.Enabled {
+ n += 2
+ }
+ if m.Deprecated {
+ n += 2
+ }
+ l = len(m.DisabledReason)
+ if l > 0 {
+ n += 1 + l + sovCaps(uint64(l))
+ }
+ l = len(m.DisabledReasonMsg)
+ if l > 0 {
+ n += 1 + l + sovCaps(uint64(l))
+ }
+ l = len(m.DisabledAlternative)
+ if l > 0 {
+ n += 1 + l + sovCaps(uint64(l))
+ }
+ return n
+}
+
+func sovCaps(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozCaps(x uint64) (n int) {
+ return sovCaps(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *APICap) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: APICap: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: APICap: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCaps
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Deprecated = bool(v != 0)
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisabledReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCaps
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisabledReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisabledReasonMsg", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCaps
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisabledReasonMsg = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DisabledAlternative", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCaps
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.DisabledAlternative = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCaps(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCaps
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipCaps(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthCaps
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowCaps
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipCaps(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthCaps = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowCaps = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("caps.proto", fileDescriptorCaps) }
+
+var fileDescriptorCaps = []byte{
+ // 236 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28,
+ 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd,
+ 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0xd4, 0x4b, 0x2c, 0xc8, 0x04, 0xc9, 0x4b, 0xe9,
+ 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb,
+ 0x83, 0x35, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x48, 0xe9, 0x16, 0x23,
+ 0x17, 0x9b, 0x63, 0x80, 0xa7, 0x73, 0x62, 0x81, 0x10, 0x1f, 0x17, 0x93, 0xa7, 0x8b, 0x04, 0xa3,
+ 0x02, 0xa3, 0x06, 0x67, 0x10, 0x93, 0xa7, 0x8b, 0x90, 0x04, 0x17, 0xbb, 0x6b, 0x5e, 0x62, 0x52,
+ 0x4e, 0x6a, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x8c, 0x2b, 0x24, 0xc7, 0xc5, 0xe5,
+ 0x92, 0x5a, 0x50, 0x94, 0x9a, 0x9c, 0x58, 0x92, 0x9a, 0x22, 0xc1, 0x0c, 0x96, 0x44, 0x12, 0x11,
+ 0x52, 0xe3, 0xe2, 0x73, 0xc9, 0x2c, 0x06, 0xab, 0x0d, 0x4a, 0x4d, 0x2c, 0xce, 0xcf, 0x93, 0x60,
+ 0x01, 0x9b, 0x8a, 0x26, 0x2a, 0xa4, 0xc3, 0x25, 0x88, 0x2a, 0xe2, 0x5b, 0x9c, 0x2e, 0xc1, 0x0a,
+ 0x56, 0x8a, 0x29, 0x21, 0x64, 0xc0, 0x25, 0x0c, 0x13, 0x74, 0xcc, 0x29, 0x49, 0x2d, 0xca, 0x4b,
+ 0x2c, 0xc9, 0x2c, 0x4b, 0x95, 0x60, 0x03, 0xab, 0xc7, 0x26, 0xe5, 0xc4, 0x73, 0xe2, 0x91, 0x1c,
+ 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x26, 0xb1, 0x81, 0x7d, 0x6c, 0x0c, 0x08,
+ 0x00, 0x00, 0xff, 0xff, 0x02, 0x2d, 0x9e, 0x91, 0x48, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto
new file mode 100644
index 0000000..1e8c065
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+package moby.buildkit.v1.apicaps;
+
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+option (gogoproto.sizer_all) = true;
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// APICap defines a capability supported by the service
+message APICap {
+ string ID = 1;
+ bool Enabled = 2;
+ bool Deprecated = 3; // Unused. May be used for warnings in the future
+ string DisabledReason = 4; // Reason key for detection code
+ string DisabledReasonMsg = 5; // Message to the user
+ string DisabledAlternative = 6; // Identifier that updated client could catch.
+}
\ No newline at end of file
diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go
new file mode 100644
index 0000000..281dfab
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go
@@ -0,0 +1,3 @@
+package moby_buildkit_v1_apicaps
+
+//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto
diff --git a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go
index 645b619..0c87e64 100644
--- a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go
+++ b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go
@@ -5,35 +5,28 @@
"io"
"github.com/containerd/containerd/content"
- "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
-func FromFetcher(f remotes.Fetcher, desc ocispec.Descriptor) content.Provider {
+func FromFetcher(f remotes.Fetcher) content.Provider {
return &fetchedProvider{
- f: f,
- desc: desc,
+ f: f,
}
}
type fetchedProvider struct {
- f remotes.Fetcher
- desc ocispec.Descriptor
+ f remotes.Fetcher
}
func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
- if desc.Digest != p.desc.Digest {
- return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest)
- }
-
- rc, err := p.f.Fetch(ctx, p.desc)
+ rc, err := p.f.Fetch(ctx, desc)
if err != nil {
return nil, err
}
- return &readerAt{Reader: rc, Closer: rc, size: p.desc.Size}, nil
+ return &readerAt{Reader: rc, Closer: rc, size: desc.Size}, nil
}
type readerAt struct {
diff --git a/vendor/github.com/moby/buildkit/util/contentutil/pusher.go b/vendor/github.com/moby/buildkit/util/contentutil/pusher.go
new file mode 100644
index 0000000..ab88128
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/contentutil/pusher.go
@@ -0,0 +1,58 @@
+package contentutil
+
+import (
+ "context"
+
+ "github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/errdefs"
+ "github.com/containerd/containerd/remotes"
+ "github.com/pkg/errors"
+)
+
+func FromPusher(p remotes.Pusher) content.Ingester {
+ return &pushingIngester{
+ p: p,
+ }
+}
+
+type pushingIngester struct {
+ p remotes.Pusher
+}
+
+// Writer implements content.Ingester. desc.MediaType must be set for manifest blobs.
+func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
+ var wOpts content.WriterOpts
+ for _, opt := range opts {
+ if err := opt(&wOpts); err != nil {
+ return nil, err
+ }
+ }
+ if wOpts.Ref == "" {
+ return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
+ }
+ // pusher requires desc.MediaType to determine the PUT URL, especially for manifest blobs.
+ contentWriter, err := i.p.Push(ctx, wOpts.Desc)
+ if err != nil {
+ return nil, err
+ }
+ return &writer{
+ Writer: contentWriter,
+ contentWriterRef: wOpts.Ref,
+ }, nil
+}
+
+type writer struct {
+ content.Writer // returned from pusher.Push
+ contentWriterRef string // ref passed for Writer()
+}
+
+func (w *writer) Status() (content.Status, error) {
+ st, err := w.Writer.Status()
+ if err != nil {
+ return st, err
+ }
+ if w.contentWriterRef != "" {
+ st.Ref = w.contentWriterRef
+ }
+ return st, nil
+}
diff --git a/vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md b/vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md
deleted file mode 100644
index 7b985ba..0000000
--- a/vendor/github.com/moby/buildkit/util/libcontainer_specconv/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Temporary forked from https://github.com/opencontainers/runc/pull/1692
diff --git a/vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go b/vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go
deleted file mode 100644
index 352ef45..0000000
--- a/vendor/github.com/moby/buildkit/util/libcontainer_specconv/example.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package specconv
-
-import (
- "os"
- "sort"
- "strings"
-
- "github.com/opencontainers/runc/libcontainer/system"
- "github.com/opencontainers/runc/libcontainer/user"
- "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// RootlessOpts is an optional spec for ToRootless
-type RootlessOpts struct {
- // Add sub{u,g}id to spec.Linux.{U,G}IDMappings.
- // Requires newuidmap(1) and newgidmap(1) with suid bit.
- // Ignored when running in userns.
- MapSubUIDGID bool
-}
-
-// Run-time context for ToRootless.
-type RootlessContext struct {
- EUID uint32
- EGID uint32
- SubUIDs []user.SubID
- SubGIDs []user.SubID
- UIDMap []user.IDMap
- GIDMap []user.IDMap
- InUserNS bool
-}
-
-// ToRootless converts the given spec file into one that should work with
-// rootless containers, by removing incompatible options and adding others that
-// are needed.
-func ToRootless(spec *specs.Spec, opts *RootlessOpts) error {
- var err error
- ctx := RootlessContext{}
- ctx.EUID = uint32(os.Geteuid())
- ctx.EGID = uint32(os.Getegid())
- ctx.SubUIDs, err = user.CurrentUserSubUIDs()
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- ctx.SubGIDs, err = user.CurrentGroupSubGIDs()
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- ctx.UIDMap, err = user.CurrentProcessUIDMap()
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- uidMapExists := !os.IsNotExist(err)
- ctx.GIDMap, err = user.CurrentProcessUIDMap()
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- ctx.InUserNS = uidMapExists && system.UIDMapInUserNS(ctx.UIDMap)
- return ToRootlessWithContext(ctx, spec, opts)
-}
-
-// ToRootlessWithContext converts the spec with the run-time context.
-// ctx can be internally modified for sorting.
-func ToRootlessWithContext(ctx RootlessContext, spec *specs.Spec, opts *RootlessOpts) error {
- if opts == nil {
- opts = &RootlessOpts{}
- }
- var namespaces []specs.LinuxNamespace
-
- // Remove networkns from the spec.
- for _, ns := range spec.Linux.Namespaces {
- switch ns.Type {
- case specs.NetworkNamespace, specs.UserNamespace:
- // Do nothing.
- default:
- namespaces = append(namespaces, ns)
- }
- }
- // Add userns to the spec.
- namespaces = append(namespaces, specs.LinuxNamespace{
- Type: specs.UserNamespace,
- })
- spec.Linux.Namespaces = namespaces
-
- // Add mappings for the current user.
- if ctx.InUserNS {
- uNextContainerID := int64(0)
- sort.Sort(idmapSorter(ctx.UIDMap))
- for _, uidmap := range ctx.UIDMap {
- spec.Linux.UIDMappings = append(spec.Linux.UIDMappings,
- specs.LinuxIDMapping{
- HostID: uint32(uidmap.ID),
- ContainerID: uint32(uNextContainerID),
- Size: uint32(uidmap.Count),
- })
- uNextContainerID += uidmap.Count
- }
- gNextContainerID := int64(0)
- sort.Sort(idmapSorter(ctx.GIDMap))
- for _, gidmap := range ctx.GIDMap {
- spec.Linux.GIDMappings = append(spec.Linux.GIDMappings,
- specs.LinuxIDMapping{
- HostID: uint32(gidmap.ID),
- ContainerID: uint32(gNextContainerID),
- Size: uint32(gidmap.Count),
- })
- gNextContainerID += gidmap.Count
- }
- // opts.MapSubUIDGID is ignored in userns
- } else {
- spec.Linux.UIDMappings = []specs.LinuxIDMapping{{
- HostID: ctx.EUID,
- ContainerID: 0,
- Size: 1,
- }}
- spec.Linux.GIDMappings = []specs.LinuxIDMapping{{
- HostID: ctx.EGID,
- ContainerID: 0,
- Size: 1,
- }}
- if opts.MapSubUIDGID {
- uNextContainerID := int64(1)
- sort.Sort(subIDSorter(ctx.SubUIDs))
- for _, subuid := range ctx.SubUIDs {
- spec.Linux.UIDMappings = append(spec.Linux.UIDMappings,
- specs.LinuxIDMapping{
- HostID: uint32(subuid.SubID),
- ContainerID: uint32(uNextContainerID),
- Size: uint32(subuid.Count),
- })
- uNextContainerID += subuid.Count
- }
- gNextContainerID := int64(1)
- sort.Sort(subIDSorter(ctx.SubGIDs))
- for _, subgid := range ctx.SubGIDs {
- spec.Linux.GIDMappings = append(spec.Linux.GIDMappings,
- specs.LinuxIDMapping{
- HostID: uint32(subgid.SubID),
- ContainerID: uint32(gNextContainerID),
- Size: uint32(subgid.Count),
- })
- gNextContainerID += subgid.Count
- }
- }
- }
-
- // Fix up mounts.
- var mounts []specs.Mount
- for _, mount := range spec.Mounts {
- // Ignore all mounts that are under /sys.
- if strings.HasPrefix(mount.Destination, "/sys") {
- continue
- }
-
- // Remove all gid= and uid= mappings.
- var options []string
- for _, option := range mount.Options {
- if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") {
- options = append(options, option)
- }
- }
-
- mount.Options = options
- mounts = append(mounts, mount)
- }
- // Add the sysfs mount as an rbind.
- mounts = append(mounts, specs.Mount{
- Source: "/sys",
- Destination: "/sys",
- Type: "none",
- Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
- })
- spec.Mounts = mounts
-
- // Remove cgroup settings.
- spec.Linux.Resources = nil
- return nil
-}
-
-// subIDSorter is required for Go <= 1.7
-type subIDSorter []user.SubID
-
-func (x subIDSorter) Len() int { return len(x) }
-func (x subIDSorter) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x subIDSorter) Less(i, j int) bool { return x[i].SubID < x[j].SubID }
-
-type idmapSorter []user.IDMap
-
-func (x idmapSorter) Len() int { return len(x) }
-func (x idmapSorter) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-func (x idmapSorter) Less(i, j int) bool { return x[i].ID < x[j].ID }
diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go
deleted file mode 100644
index 5cc2f01..0000000
--- a/vendor/github.com/moby/buildkit/util/push/push.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package push
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "sync"
- "time"
-
- "github.com/containerd/containerd/content"
- "github.com/containerd/containerd/images"
- "github.com/containerd/containerd/remotes"
- "github.com/containerd/containerd/remotes/docker"
- "github.com/docker/distribution/reference"
- "github.com/moby/buildkit/session"
- "github.com/moby/buildkit/session/auth"
- "github.com/moby/buildkit/util/imageutil"
- "github.com/moby/buildkit/util/progress"
- "github.com/moby/buildkit/util/tracing"
- digest "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/sirupsen/logrus"
-)
-
-func getCredentialsFunc(ctx context.Context, sm *session.Manager) func(string) (string, string, error) {
- id := session.FromContext(ctx)
- if id == "" {
- return nil
- }
- return func(host string) (string, string, error) {
- timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- caller, err := sm.Get(timeoutCtx, id)
- if err != nil {
- return "", "", err
- }
-
- return auth.CredentialsFunc(context.TODO(), caller)(host)
- }
-}
-
-func Push(ctx context.Context, sm *session.Manager, cs content.Provider, dgst digest.Digest, ref string, insecure bool) error {
- desc := ocispec.Descriptor{
- Digest: dgst,
- }
- parsed, err := reference.ParseNormalizedNamed(ref)
- if err != nil {
- return err
- }
- ref = reference.TagNameOnly(parsed).String()
-
- resolver := docker.NewResolver(docker.ResolverOptions{
- Client: tracing.DefaultClient,
- Credentials: getCredentialsFunc(ctx, sm),
- PlainHTTP: insecure,
- })
-
- pusher, err := resolver.Pusher(ctx, ref)
- if err != nil {
- return err
- }
-
- var m sync.Mutex
- manifestStack := []ocispec.Descriptor{}
-
- filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
- switch desc.MediaType {
- case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
- images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
- m.Lock()
- manifestStack = append(manifestStack, desc)
- m.Unlock()
- return nil, images.ErrStopHandler
- default:
- return nil, nil
- }
- })
-
- pushHandler := remotes.PushHandler(pusher, cs)
-
- handlers := append([]images.Handler{},
- childrenHandler(cs),
- filterHandler,
- pushHandler,
- )
-
- ra, err := cs.ReaderAt(ctx, desc)
- if err != nil {
- return err
- }
-
- mtype, err := imageutil.DetectManifestMediaType(ra)
- if err != nil {
- return err
- }
-
- layersDone := oneOffProgress(ctx, "pushing layers")
- err = images.Dispatch(ctx, images.Handlers(handlers...), ocispec.Descriptor{
- Digest: dgst,
- Size: ra.Size(),
- MediaType: mtype,
- })
- layersDone(err)
- if err != nil {
- return err
- }
-
- mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref))
- for i := len(manifestStack) - 1; i >= 0; i-- {
- _, err := pushHandler(ctx, manifestStack[i])
- if err != nil {
- mfstDone(err)
- return err
- }
- }
- mfstDone(nil)
- return nil
-}
-
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.FromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- // TODO: set error on status
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
- }
-}
-
-func childrenHandler(provider content.Provider) images.HandlerFunc {
- return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
- var descs []ocispec.Descriptor
- switch desc.MediaType {
- case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
- p, err := content.ReadBlob(ctx, provider, desc)
- if err != nil {
- return nil, err
- }
-
- // TODO(stevvooe): We just assume oci manifest, for now. There may be
- // subtle differences from the docker version.
- var manifest ocispec.Manifest
- if err := json.Unmarshal(p, &manifest); err != nil {
- return nil, err
- }
-
- descs = append(descs, manifest.Config)
- descs = append(descs, manifest.Layers...)
- case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
- p, err := content.ReadBlob(ctx, provider, desc)
- if err != nil {
- return nil, err
- }
-
- var index ocispec.Index
- if err := json.Unmarshal(p, &index); err != nil {
- return nil, err
- }
-
- for _, m := range index.Manifests {
- if m.Digest != "" {
- descs = append(descs, m)
- }
- }
- case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
- images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
- ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip:
- // childless data types.
- return nil, nil
- default:
- logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
- }
-
- return descs, nil
- }
-}
diff --git a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go
new file mode 100644
index 0000000..1b90219
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go
@@ -0,0 +1,113 @@
+package specconv
+
+import (
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/opencontainers/runc/libcontainer/system"
+ "github.com/opencontainers/runc/libcontainer/user"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+// ToRootless converts spec to be compatible with "rootless" runc.
+// * Adds userns (Note: since we are already in userns, ideally we should not need to do this. runc-side issue is tracked at https://github.com/opencontainers/runc/issues/1837)
+// * Fix up mount flags (same as above)
+// * Replace /sys with bind-mount (FIXME: we don't need to do this if netns is unshared)
+func ToRootless(spec *specs.Spec) error {
+ if !system.RunningInUserNS() {
+ return errors.New("needs to be in user namespace")
+ }
+ uidMap, err := user.CurrentProcessUIDMap()
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ gidMap, err := user.CurrentProcessUIDMap()
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ return toRootless(spec, uidMap, gidMap)
+}
+
+// toRootless was forked from github.com/opencontainers/runc/libcontainer/specconv
+func toRootless(spec *specs.Spec, uidMap, gidMap []user.IDMap) error {
+ if err := configureUserNS(spec, uidMap, gidMap); err != nil {
+ return err
+ }
+ if err := configureMounts(spec); err != nil {
+ return err
+ }
+
+ // Remove cgroup settings.
+ spec.Linux.Resources = nil
+ spec.Linux.CgroupsPath = ""
+ return nil
+}
+
+// configureUserNS add suserns and the current ID map to the spec.
+// Since we are already in userns, ideally we should not need to add userns.
+// However, currently rootless runc always requires userns to be added.
+// https://github.com/opencontainers/runc/issues/1837
+func configureUserNS(spec *specs.Spec, uidMap, gidMap []user.IDMap) error {
+ spec.Linux.Namespaces = append(spec.Linux.Namespaces, specs.LinuxNamespace{
+ Type: specs.UserNamespace,
+ })
+
+ sort.Slice(uidMap, func(i, j int) bool { return uidMap[i].ID < uidMap[j].ID })
+ uNextContainerID := int64(0)
+ for _, u := range uidMap {
+ spec.Linux.UIDMappings = append(spec.Linux.UIDMappings,
+ specs.LinuxIDMapping{
+ HostID: uint32(u.ID),
+ ContainerID: uint32(uNextContainerID),
+ Size: uint32(u.Count),
+ })
+ uNextContainerID += int64(u.Count)
+ }
+ sort.Slice(gidMap, func(i, j int) bool { return gidMap[i].ID < gidMap[j].ID })
+ gNextContainerID := int64(0)
+ for _, g := range gidMap {
+ spec.Linux.GIDMappings = append(spec.Linux.GIDMappings,
+ specs.LinuxIDMapping{
+ HostID: uint32(g.ID),
+ ContainerID: uint32(gNextContainerID),
+ Size: uint32(g.Count),
+ })
+ gNextContainerID += int64(g.Count)
+ }
+ return nil
+}
+
+func configureMounts(spec *specs.Spec) error {
+ var mounts []specs.Mount
+ for _, mount := range spec.Mounts {
+ // Ignore all mounts that are under /sys, because we add /sys later.
+ if strings.HasPrefix(mount.Destination, "/sys") {
+ continue
+ }
+
+ // Remove all gid= and uid= mappings.
+ // Since we are already in userns, ideally we should not need to do this.
+ // https://github.com/opencontainers/runc/issues/1837
+ var options []string
+ for _, option := range mount.Options {
+ if !strings.HasPrefix(option, "gid=") && !strings.HasPrefix(option, "uid=") {
+ options = append(options, option)
+ }
+ }
+ mount.Options = options
+ mounts = append(mounts, mount)
+ }
+
+ // Add the sysfs mount as an rbind, because we can't mount /sys unless we have netns.
+ // TODO: keep original /sys mount when we have netns.
+ mounts = append(mounts, specs.Mount{
+ Source: "/sys",
+ Destination: "/sys",
+ Type: "none",
+ Options: []string{"rbind", "nosuid", "noexec", "nodev", "ro"},
+ })
+ spec.Mounts = mounts
+ return nil
+}
diff --git a/vendor/github.com/moby/buildkit/vendor.conf b/vendor/github.com/moby/buildkit/vendor.conf
index 26d6a3b..9bde31d 100644
--- a/vendor/github.com/moby/buildkit/vendor.conf
+++ b/vendor/github.com/moby/buildkit/vendor.conf
@@ -6,8 +6,8 @@
github.com/pmezard/go-difflib v1.0.0
golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993
-github.com/containerd/containerd 08f7ee9828af1783dc98cc5cc1739e915697c667
-github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
+github.com/containerd/containerd b41633746ed4833f52c3c071e8edcfa2713e5677
+github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/sirupsen/logrus v1.0.0
google.golang.org/grpc v1.12.0
@@ -23,7 +23,7 @@
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/opencontainers/runtime-spec v1.0.1
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
-github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
+github.com/containerd/console 5d1b48d6114b8c9666f0c8b916f871af97b0a761
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
@@ -46,7 +46,7 @@
github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f
-github.com/tonistiigi/units 29de085e9400559bd68aea2e7bc21566e7b8281d
+github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
github.com/docker/cli 99576756eb3303b7af8102c502f21a912e3c1af6 https://github.com/tonistiigi/docker-cli.git
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/libnetwork 822e5b59d346b7ad0735df2c8e445e9787320e67
@@ -60,8 +60,10 @@
github.com/codahale/hdrhistogram f8ad88b59a584afeee9d334eff879b104439117b
github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc
-github.com/opencontainers/selinux 74a747aeaf2d66097b6908f572794f49f07dda2c
# used by dockerfile tests
gotest.tools v2.1.0
github.com/google/go-cmp v0.2.0
+
+# used by rootless spec conv test
+github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
diff --git a/vendor/github.com/moby/buildkit/worker/workercontroller.go b/vendor/github.com/moby/buildkit/worker/workercontroller.go
index 2e52006..b07db8f 100644
--- a/vendor/github.com/moby/buildkit/worker/workercontroller.go
+++ b/vendor/github.com/moby/buildkit/worker/workercontroller.go
@@ -4,6 +4,7 @@
"sync"
"github.com/containerd/containerd/filters"
+ "github.com/moby/buildkit/client"
"github.com/pkg/errors"
)
@@ -58,3 +59,19 @@
}
// TODO: add Get(Constraint) (*Worker, error)
+
+func (c *Controller) WorkerInfos() []client.WorkerInfo {
+ workers, err := c.List()
+ if err != nil {
+ return nil
+ }
+ out := make([]client.WorkerInfo, 0, len(workers))
+ for _, w := range workers {
+ out = append(out, client.WorkerInfo{
+ ID: w.ID(),
+ Labels: w.Labels(),
+ Platforms: w.Platforms(),
+ })
+ }
+ return out
+}