Merge pull request #42061 from thaJeztah/20.10_backport_bump_buildkit

[20.10 backport] vendor: github.com/moby/buildkit v0.8.2
diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go
index e731e3e..8bd9b92 100644
--- a/builder/builder-next/adapters/containerimage/pull.go
+++ b/builder/builder-next/adapters/containerimage/pull.go
@@ -13,7 +13,9 @@
 
 	"github.com/containerd/containerd/content"
 	containerderrors "github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/gc"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/platforms"
 	ctdreference "github.com/containerd/containerd/reference"
 	"github.com/containerd/containerd/remotes"
@@ -34,6 +36,7 @@
 	"github.com/moby/buildkit/source"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/imageutil"
+	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/resolver"
 	digest "github.com/opencontainers/go-digest"
@@ -54,6 +57,8 @@
 	ImageStore      image.Store
 	RegistryHosts   docker.RegistryHosts
 	LayerStore      layer.Store
+	LeaseManager    leases.Manager
+	GarbageCollect  func(ctx context.Context) (gc.Stats, error)
 }
 
 // Source is the source implementation for accessing container images
@@ -101,7 +106,7 @@
 	key := "getconfig::" + ref + "::" + platforms.Format(p)
 	res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) {
 		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
-		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, nil, platform)
+		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
 		if err != nil {
 			return nil, err
 		}
@@ -183,6 +188,7 @@
 type puller struct {
 	is               *Source
 	resolveLocalOnce sync.Once
+	g                flightcontrol.Group
 	src              *source.ImageIdentifier
 	desc             ocispec.Descriptor
 	ref              string
@@ -253,9 +259,7 @@
 }
 
 func (p *puller) resolve(ctx context.Context, g session.Group) error {
-	// key is used to synchronize resolutions that can happen in parallel when doing multi-stage.
-	key := "resolve::" + p.ref + "::" + platforms.Format(p.platform)
-	_, err := p.is.g.Do(ctx, key, func(ctx context.Context) (_ interface{}, err error) {
+	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) {
 		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
 		defer func() {
 			resolveProgressDone(err)
@@ -329,6 +333,10 @@
 		return dgst.String(), nil, false, nil
 	}
 
+	if len(p.config) == 0 {
+		return "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String())
+	}
+
 	k := cacheKeyFromConfig(p.config).String()
 	if k == "" {
 		dgst, err := p.mainManifestKey(p.platform)
@@ -360,8 +368,10 @@
 
 func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) {
 	p.resolveLocal()
-	if err := p.resolve(ctx, g); err != nil {
-		return nil, err
+	if len(p.config) == 0 {
+		if err := p.resolve(ctx, g); err != nil {
+			return nil, err
+		}
 	}
 
 	if p.config != nil {
@@ -384,6 +394,17 @@
 
 	ongoing := newJobs(p.ref)
 
+	ctx, done, err := leaseutil.WithLease(ctx, p.is.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		done(context.TODO())
+		if p.is.GarbageCollect != nil {
+			go p.is.GarbageCollect(context.TODO())
+		}
+	}()
+
 	pctx, stopProgress := context.WithCancel(ctx)
 
 	pw, _, ctx := progress.FromContext(ctx)
@@ -406,6 +427,8 @@
 
 	platform := platforms.Only(p.platform)
 
+	var nonLayers []digest.Digest
+
 	var (
 		schema1Converter *schema1.Converter
 		handlers         []images.Handler
@@ -426,6 +449,7 @@
 			case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
 				images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex,
 				images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
+				nonLayers = append(nonLayers, desc.Digest)
 			default:
 				return nil, images.ErrSkipDesc
 			}
@@ -435,8 +459,6 @@
 
 		// Get all the children for a descriptor
 		childrenHandler := images.ChildrenHandler(p.is.ContentStore)
-		// Set any children labels for that content
-		childrenHandler = images.SetChildrenLabels(p.is.ContentStore, childrenHandler)
 		// Filter the children by the platform
 		childrenHandler = images.FilterPlatforms(childrenHandler, platform)
 		// Limit manifests pulled to the best match in an index
@@ -539,9 +561,6 @@
 
 	defer func() {
 		<-progressDone
-		for _, desc := range mfst.Layers {
-			p.is.ContentStore.Delete(context.TODO(), desc.Digest)
-		}
 	}()
 
 	r := image.NewRootFS()
@@ -557,6 +576,16 @@
 		return nil, err
 	}
 
+	// keep manifest blobs until ref is alive for cache
+	for _, nl := range nonLayers {
+		if err := p.is.LeaseManager.AddResource(ctx, leases.Lease{ID: ref.ID()}, leases.Resource{
+			ID:   nl.String(),
+			Type: "content",
+		}); err != nil {
+			return nil, err
+		}
+	}
+
 	// TODO: handle windows layers for cross platform builds
 
 	if p.src.RecordType != "" && cache.GetRecordType(ref) == "" {
@@ -801,6 +830,7 @@
 	var img ocispec.Image
 	err := json.Unmarshal(dt, &img)
 	if err != nil {
+		logrus.WithError(err).Errorf("failed to unmarshal image config for cache key %v", err)
 		return digest.FromBytes(dt)
 	}
 	if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go
index 730917e..fd086ca 100644
--- a/builder/builder-next/controller.go
+++ b/builder/builder-next/controller.go
@@ -111,6 +111,7 @@
 		PruneRefChecker: refChecker,
 		LeaseManager:    lm,
 		ContentStore:    store,
+		GarbageCollect:  mdb.GarbageCollect,
 	})
 	if err != nil {
 		return nil, err
@@ -125,6 +126,8 @@
 		ReferenceStore:  dist.ReferenceStore,
 		RegistryHosts:   opt.RegistryHosts,
 		LayerStore:      dist.LayerStore,
+		LeaseManager:    lm,
+		GarbageCollect:  mdb.GarbageCollect,
 	})
 	if err != nil {
 		return nil, err
diff --git a/contrib/check-config.sh b/contrib/check-config.sh
index 849dc32..b363d3d 100755
--- a/contrib/check-config.sh
+++ b/contrib/check-config.sh
@@ -198,6 +198,7 @@
 	VETH BRIDGE BRIDGE_NETFILTER
 	IP_NF_FILTER IP_NF_TARGET_MASQUERADE
 	NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS}
+	NETFILTER_XT_MARK
 	IP_NF_NAT NF_NAT
 
 	# required for bind-mounting /dev/mqueue into containers
diff --git a/contrib/dockerd-rootless.sh b/contrib/dockerd-rootless.sh
index 62da531..1a4d1da 100755
--- a/contrib/dockerd-rootless.sh
+++ b/contrib/dockerd-rootless.sh
@@ -18,6 +18,12 @@
 # See the documentation for the further information: https://docs.docker.com/engine/security/rootless/
 
 set -e -x
+case "$1" in
+	"check" | "install" | "uninstall")
+		echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
+		exit 1
+		;;
+esac
 if ! [ -w $XDG_RUNTIME_DIR ]; then
 	echo "XDG_RUNTIME_DIR needs to be set and writable"
 	exit 1
diff --git a/daemon/logger/loginfo.go b/daemon/logger/loginfo.go
index 947abd9..1203442 100644
--- a/daemon/logger/loginfo.go
+++ b/daemon/logger/loginfo.go
@@ -42,7 +42,7 @@
 	}
 
 	labelsRegex, ok := info.Config["labels-regex"]
-	if ok && len(labels) > 0 {
+	if ok && len(labelsRegex) > 0 {
 		re, err := regexp.Compile(labelsRegex)
 		if err != nil {
 			return nil, err
diff --git a/vendor.conf b/vendor.conf
index cdf48ae..68b4f86 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -142,7 +142,7 @@
 github.com/cilium/ebpf                              1c8d4c9ef7759622653a1d319284a44652333b28
 
 # cluster
-github.com/docker/swarmkit                          d6592ddefd8a5319aadff74c558b816b1a0b2590
+github.com/docker/swarmkit                          17d8d4e4d8bdec33d386e6362d3537fa9493ba00
 github.com/gogo/protobuf                            5628607bb4c51c3157aacc3a50f0ab707582b805 # v1.3.1
 github.com/golang/protobuf                          84668698ea25b64748563aa20726db66a6b8d299 # v1.3.5
 github.com/cloudflare/cfssl                         5d63dbd981b5c408effbb58c442d54761ff94fbd # 1.3.2
diff --git a/vendor/github.com/docker/swarmkit/manager/manager.go b/vendor/github.com/docker/swarmkit/manager/manager.go
index 27820ec..6c31ef3 100644
--- a/vendor/github.com/docker/swarmkit/manager/manager.go
+++ b/vendor/github.com/docker/swarmkit/manager/manager.go
@@ -1049,7 +1049,16 @@
 
 	go func(d *dispatcher.Dispatcher) {
 		// Initialize the dispatcher.
-		d.Init(m.raftNode, dispatcher.DefaultConfig(), drivers.New(m.config.PluginGetter), m.config.SecurityConfig)
+		var cluster *api.Cluster
+		s.View(func(tx store.ReadTx) {
+			cluster = store.GetCluster(tx, clusterID)
+		})
+		var defaultConfig = dispatcher.DefaultConfig()
+		heartbeatPeriod, err := gogotypes.DurationFromProto(cluster.Spec.Dispatcher.HeartbeatPeriod)
+		if err == nil {
+			defaultConfig.HeartbeatPeriod = heartbeatPeriod
+		}
+		d.Init(m.raftNode, defaultConfig, drivers.New(m.config.PluginGetter), m.config.SecurityConfig)
 		if err := d.Run(ctx); err != nil {
 			log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
 		}
diff --git a/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go b/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
index fda0089..78dd7dc 100644
--- a/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
+++ b/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
@@ -721,15 +721,32 @@
 
 		newT := *t
 		newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
-		if explanation != "" {
-			newT.Status.Err = "no suitable node (" + explanation + ")"
+		sv := service.SpecVersion
+		tv := newT.SpecVersion
+		if sv != nil && tv != nil && sv.Index > tv.Index {
+			log.G(ctx).WithField("task.id", t.ID).Debug(
+				"task belongs to old revision of service",
+			)
+			if t.Status.State == api.TaskStatePending && t.DesiredState >= api.TaskStateShutdown {
+				log.G(ctx).WithField("task.id", t.ID).Debug(
+					"task is desired shutdown, scheduler will go ahead and do so",
+				)
+				newT.Status.State = api.TaskStateShutdown
+				newT.Status.Err = ""
+			}
 		} else {
-			newT.Status.Err = "no suitable node"
+			if explanation != "" {
+				newT.Status.Err = "no suitable node (" + explanation + ")"
+			} else {
+				newT.Status.Err = "no suitable node"
+			}
+
+			// re-enqueue a task that should still be attempted
+			s.enqueue(&newT)
 		}
+
 		s.allTasks[t.ID] = &newT
 		schedulingDecisions[t.ID] = schedulingDecision{old: t, new: &newT}
-
-		s.enqueue(&newT)
 	}
 }