Merge pull request #35844 from thaJeztah/remove-test-events-limit
Remove TestEventsLimit(), and minor cleanups
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a38f54d..519e238 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -303,9 +303,8 @@
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the
-global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
-file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
-repository.
+[/project/GOVERNANCE.md](/project/GOVERNANCE.md)
+file in this repository.
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
@@ -371,6 +370,11 @@
used to ping maintainers to review a pull request, a proposal or an
issue.
+The open source governance for this repository is handled via the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc)
+charter. For any concerns with the community process regarding technical contributions,
+please contact the TSC. More information on project governance is available in
+our [project/GOVERNANCE.md](/project/GOVERNANCE.md) document.
+
### Guideline violations — 3 strikes method
The point of this section is not to find opportunities to punish people, but we
diff --git a/MAINTAINERS b/MAINTAINERS
index a896687..4c831d7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,12 +1,14 @@
# Moby maintainers file
#
-# This file describes who runs the docker/docker project and how.
-# This is a living document - if you see something out of date or missing, speak up!
+# This file describes the maintainer groups within the moby/moby project.
+# More detail on Moby project governance is available in the
+# project/GOVERNANCE.md file found in this repository.
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
+# TODO(estesp): This file should not necessarily depend on docker/opensource
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go
index dabab3b..fd95420 100644
--- a/api/server/router/image/image_routes.go
+++ b/api/server/router/image/image_routes.go
@@ -13,7 +13,6 @@
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
- "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
@@ -46,9 +45,6 @@
if err != nil && err != io.EOF { //Do not fail if body is empty.
return err
}
- if c == nil {
- c = &container.Config{}
- }
commitCfg := &backend.ContainerCommitConfig{
ContainerCommitConfig: types.ContainerCommitConfig{
diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go
index b62d6fc..20f1650 100644
--- a/builder/dockerfile/builder.go
+++ b/builder/dockerfile/builder.go
@@ -396,7 +396,8 @@
}
dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, newBuildArgs(b.options.BuildArgs), newStagesBuildResults())
- dispatchRequest.state.runConfig = config
+ // We make mutations to the configuration, ensure we have a copy
+ dispatchRequest.state.runConfig = copyRunConfig(config)
dispatchRequest.state.imageID = config.Image
for _, cmd := range commands {
err := dispatch(dispatchRequest, cmd)
diff --git a/daemon/commit.go b/daemon/commit.go
index 0053132..1bdbd6b 100644
--- a/daemon/commit.go
+++ b/daemon/commit.go
@@ -149,6 +149,10 @@
defer daemon.containerUnpause(container)
}
+ if c.MergeConfigs && c.Config == nil {
+ c.Config = container.Config
+ }
+
newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes)
if err != nil {
return "", err
diff --git a/daemon/daemon.go b/daemon/daemon.go
index e63e209..dd8c100 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -62,8 +62,8 @@
"github.com/pkg/errors"
)
-// MainNamespace is the name of the namespace used for users containers
-const MainNamespace = "moby"
+// ContainersNamespace is the name of the namespace used for users containers
+const ContainersNamespace = "moby"
var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
@@ -247,6 +247,11 @@
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
}
+ } else if !daemon.configStore.LiveRestoreEnabled {
+ if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
+ logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
+ return
+ }
}
if c.IsRunning() || c.IsPaused() {
@@ -317,24 +322,24 @@
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
- } else {
- // get list of containers we need to restart
+ }
- // Do not autostart containers which
- // has endpoints in a swarm scope
- // network yet since the cluster is
- // not initialized yet. We will start
- // it after the cluster is
- // initialized.
- if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
- mapLock.Lock()
- restartContainers[c] = make(chan struct{})
- mapLock.Unlock()
- } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
- mapLock.Lock()
- removeContainers[c.ID] = c
- mapLock.Unlock()
- }
+ // get list of containers we need to restart
+
+ // Do not autostart containers which
+ // has endpoints in a swarm scope
+ // network yet since the cluster is
+ // not initialized yet. We will start
+ // it after the cluster is
+ // initialized.
+ if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
+ mapLock.Lock()
+ restartContainers[c] = make(chan struct{})
+ mapLock.Unlock()
+ } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
+ mapLock.Lock()
+ removeContainers[c.ID] = c
+ mapLock.Unlock()
}
c.Lock()
@@ -890,7 +895,7 @@
go d.execCommandGC()
- d.containerd, err = containerdRemote.NewClient(MainNamespace, d)
+ d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d)
if err != nil {
return nil, err
}
diff --git a/daemon/health.go b/daemon/health.go
index f40c0dd..9acf190 100644
--- a/daemon/health.go
+++ b/daemon/health.go
@@ -80,6 +80,7 @@
execConfig.Tty = false
execConfig.Privileged = false
execConfig.User = cntr.Config.User
+ execConfig.WorkingDir = cntr.Config.WorkingDir
linkedEnv, err := d.setupLinkedContainers(cntr)
if err != nil {
diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go
index 4ea9420..25dd215 100644
--- a/daemon/logger/awslogs/cloudwatchlogs.go
+++ b/daemon/logger/awslogs/cloudwatchlogs.go
@@ -95,6 +95,17 @@
}
}
+// eventBatch holds the events that are batched for submission and the
+// associated data about it.
+//
+// Warning: this type is not threadsafe and must not be used
+// concurrently. This type is expected to be consumed in a single go
+// routine and never concurrently.
+type eventBatch struct {
+ batch []wrappedEvent
+ bytes int
+}
+
// New creates an awslogs logger using the configuration passed in on the
// context. Supported context configuration variables are awslogs-region,
// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern
@@ -389,32 +400,32 @@
// Logs, the processEvents method is called. If a multiline pattern is not
// configured, log events are submitted to the processEvents method immediately.
func (l *logStream) collectBatch() {
- timer := newTicker(batchPublishFrequency)
- var events []wrappedEvent
+ ticker := newTicker(batchPublishFrequency)
var eventBuffer []byte
var eventBufferTimestamp int64
+ var batch = newEventBatch()
for {
select {
- case t := <-timer.C:
+ case t := <-ticker.C:
// If event buffer is older than batch publish frequency flush the event buffer
if eventBufferTimestamp > 0 && len(eventBuffer) > 0 {
eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp
eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond)
eventBufferNegative := eventBufferAge < 0
if eventBufferExpired || eventBufferNegative {
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
}
}
- l.publishBatch(events)
- events = events[:0]
+ l.publishBatch(batch)
+ batch.reset()
case msg, more := <-l.messages:
if !more {
// Flush event buffer and release resources
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
- l.publishBatch(events)
- events = events[:0]
+ l.publishBatch(batch)
+ batch.reset()
return
}
if eventBufferTimestamp == 0 {
@@ -425,7 +436,7 @@
if l.multilinePattern.Match(unprocessedLine) || len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent {
// This is a new log event or we will exceed max bytes per event
// so flush the current eventBuffer to events and reset timestamp
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
eventBuffer = eventBuffer[:0]
}
@@ -434,7 +445,7 @@
eventBuffer = append(eventBuffer, processedLine...)
logger.PutMessage(msg)
} else {
- events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
+ l.processEvent(batch, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
logger.PutMessage(msg)
}
}
@@ -450,8 +461,7 @@
// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event
// byte overhead (defined in perEventBytes) which is accounted for in split- and
// batch-calculations.
-func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent {
- bytes := 0
+func (l *logStream) processEvent(batch *eventBatch, unprocessedLine []byte, timestamp int64) {
for len(unprocessedLine) > 0 {
// Split line length so it does not exceed the maximum
lineBytes := len(unprocessedLine)
@@ -459,38 +469,33 @@
lineBytes = maximumBytesPerEvent
}
line := unprocessedLine[:lineBytes]
- unprocessedLine = unprocessedLine[lineBytes:]
- if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) {
- // Publish an existing batch if it's already over the maximum number of events or if adding this
- // event would push it over the maximum number of total bytes.
- l.publishBatch(events)
- events = events[:0]
- bytes = 0
- }
- events = append(events, wrappedEvent{
+
+ event := wrappedEvent{
inputLogEvent: &cloudwatchlogs.InputLogEvent{
Message: aws.String(string(line)),
Timestamp: aws.Int64(timestamp),
},
- insertOrder: len(events),
- })
- bytes += (lineBytes + perEventBytes)
+ insertOrder: batch.count(),
+ }
+
+ added := batch.add(event, lineBytes)
+ if added {
+ unprocessedLine = unprocessedLine[lineBytes:]
+ } else {
+ l.publishBatch(batch)
+ batch.reset()
+ }
}
- return events
}
// publishBatch calls PutLogEvents for a given set of InputLogEvents,
// accounting for sequencing requirements (each request must reference the
// sequence token returned by the previous request).
-func (l *logStream) publishBatch(events []wrappedEvent) {
- if len(events) == 0 {
+func (l *logStream) publishBatch(batch *eventBatch) {
+ if batch.isEmpty() {
return
}
-
- // events in a batch must be sorted by timestamp
- // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
- sort.Sort(byTimestamp(events))
- cwEvents := unwrapEvents(events)
+ cwEvents := unwrapEvents(batch.events())
nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken)
@@ -615,3 +620,70 @@
}
return cwEvents
}
+
+func newEventBatch() *eventBatch {
+ return &eventBatch{
+ batch: make([]wrappedEvent, 0),
+ bytes: 0,
+ }
+}
+
+// events returns a slice of wrappedEvents sorted in order of their
+// timestamps and then by their insertion order (see `byTimestamp`).
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) events() []wrappedEvent {
+ sort.Sort(byTimestamp(b.batch))
+ return b.batch
+}
+
+// add adds an event to the batch of events accounting for the
+// necessary overhead for an event to be logged. An error will be
+// returned if the event cannot be added to the batch due to service
+// limits.
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) add(event wrappedEvent, size int) bool {
+ addBytes := size + perEventBytes
+
+ // verify we are still within service limits
+ switch {
+ case len(b.batch)+1 > maximumLogEventsPerPut:
+ return false
+ case b.bytes+addBytes > maximumBytesPerPut:
+ return false
+ }
+
+ b.bytes += addBytes
+ b.batch = append(b.batch, event)
+
+ return true
+}
+
+// count is the number of batched events. Warning: this method
+// is not threadsafe and must not be used concurrently.
+func (b *eventBatch) count() int {
+ return len(b.batch)
+}
+
+// size is the total number of bytes that the batch represents.
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) size() int {
+ return b.bytes
+}
+
+func (b *eventBatch) isEmpty() bool {
+ zeroEvents := b.count() == 0
+ zeroSize := b.size() == 0
+ return zeroEvents && zeroSize
+}
+
+// reset prepares the batch for reuse.
+func (b *eventBatch) reset() {
+ b.bytes = 0
+ b.batch = b.batch[:0]
+}
diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go
index 7ebc5de..67ea474 100644
--- a/daemon/logger/awslogs/cloudwatchlogs_test.go
+++ b/daemon/logger/awslogs/cloudwatchlogs_test.go
@@ -49,6 +49,15 @@
}
}
+func testEventBatch(events []wrappedEvent) *eventBatch {
+ batch := newEventBatch()
+ for _, event := range events {
+ eventlen := len([]byte(*event.inputLogEvent.Message))
+ batch.add(event, eventlen)
+ }
+ return batch
+}
+
func TestNewAWSLogsClientUserAgentHandler(t *testing.T) {
info := logger.Info{
Config: map[string]string{
@@ -212,7 +221,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -257,7 +266,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -291,7 +300,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -354,7 +363,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -859,7 +868,8 @@
}
func TestCollectBatchMaxTotalBytes(t *testing.T) {
- mockClient := newMockClientBuffered(1)
+ expectedPuts := 2
+ mockClient := newMockClientBuffered(expectedPuts)
stream := &logStream{
client: mockClient,
logGroupName: groupName,
@@ -867,11 +877,14 @@
sequenceToken: aws.String(sequenceToken),
messages: make(chan *logger.Message),
}
- mockClient.putLogEventsResult <- &putLogEventsResult{
- successResult: &cloudwatchlogs.PutLogEventsOutput{
- NextSequenceToken: aws.String(nextSequenceToken),
- },
+ for i := 0; i < expectedPuts; i++ {
+ mockClient.putLogEventsResult <- &putLogEventsResult{
+ successResult: &cloudwatchlogs.PutLogEventsOutput{
+ NextSequenceToken: aws.String(nextSequenceToken),
+ },
+ }
}
+
var ticks = make(chan time.Time)
newTicker = func(_ time.Duration) *time.Ticker {
return &time.Ticker{
@@ -881,32 +894,57 @@
go stream.collectBatch()
- longline := strings.Repeat("A", maximumBytesPerPut)
+ numPayloads := maximumBytesPerPut / (maximumBytesPerEvent + perEventBytes)
+ // maxline is the maximum line that could be submitted after
+ // accounting for its overhead.
+ maxline := strings.Repeat("A", maximumBytesPerPut-(perEventBytes*numPayloads))
+ // This will be split and batched up to the `maximumBytesPerPut'
+ // (+/- `maximumBytesPerEvent'). This /should/ be aligned, but
+ // should also tolerate an offset within that range.
stream.Log(&logger.Message{
- Line: []byte(longline + "B"),
+ Line: []byte(maxline[:len(maxline)/2]),
+ Timestamp: time.Time{},
+ })
+ stream.Log(&logger.Message{
+ Line: []byte(maxline[len(maxline)/2:]),
+ Timestamp: time.Time{},
+ })
+ stream.Log(&logger.Message{
+ Line: []byte("B"),
Timestamp: time.Time{},
})
- // no ticks
+ // no ticks, guarantee batch by size (and chan close)
stream.Close()
argument := <-mockClient.putLogEventsArgument
if argument == nil {
t.Fatal("Expected non-nil PutLogEventsInput")
}
- bytes := 0
+
+ // Should total to the maximum allowed bytes.
+ eventBytes := 0
for _, event := range argument.LogEvents {
- bytes += len(*event.Message)
+ eventBytes += len(*event.Message)
}
- if bytes > maximumBytesPerPut {
- t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes)
+ eventsOverhead := len(argument.LogEvents) * perEventBytes
+ payloadTotal := eventBytes + eventsOverhead
+ // lowestMaxBatch allows the payload to be offset if the messages
+ // don't lend themselves to align with the maximum event size.
+ lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent
+
+ if payloadTotal > maximumBytesPerPut {
+ t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, payloadTotal)
+ }
+ if payloadTotal < lowestMaxBatch {
+ t.Errorf("Batch to be no less than %d but was %d", lowestMaxBatch, payloadTotal)
}
argument = <-mockClient.putLogEventsArgument
if len(argument.LogEvents) != 1 {
t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents))
}
- message := *argument.LogEvents[0].Message
+ message := *argument.LogEvents[len(argument.LogEvents)-1].Message
if message[len(message)-1:] != "B" {
t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:])
}
diff --git a/daemon/logger/awslogs/cwlogsiface_mock_test.go b/daemon/logger/awslogs/cwlogsiface_mock_test.go
index 82bb34b..d0a2eba 100644
--- a/daemon/logger/awslogs/cwlogsiface_mock_test.go
+++ b/daemon/logger/awslogs/cwlogsiface_mock_test.go
@@ -1,6 +1,10 @@
package awslogs
-import "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+)
type mockcwlogsclient struct {
createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput
@@ -67,7 +71,30 @@
LogGroupName: input.LogGroupName,
LogStreamName: input.LogStreamName,
}
+
+ // Intended mock output
output := <-m.putLogEventsResult
+
+ // Checked enforced limits in mock
+ totalBytes := 0
+ for _, evt := range events {
+ if evt.Message == nil {
+ continue
+ }
+ eventBytes := len([]byte(*evt.Message))
+ if eventBytes > maximumBytesPerEvent {
+ // exceeded per event message size limits
+ return nil, fmt.Errorf("maximum bytes per event exceeded: Event too large %d, max allowed: %d", eventBytes, maximumBytesPerEvent)
+ }
+ // total event bytes including overhead
+ totalBytes += eventBytes + perEventBytes
+ }
+
+ if totalBytes > maximumBytesPerPut {
+ // exceeded per put maximum size limit
+ return nil, fmt.Errorf("maximum bytes per put exceeded: Upload too large %d, max allowed: %d", totalBytes, maximumBytesPerPut)
+ }
+
return output.successResult, output.errorResult
}
diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go
index ef1e77d..89c2865 100644
--- a/integration-cli/docker_api_logs_test.go
+++ b/integration-cli/docker_api_logs_test.go
@@ -172,6 +172,8 @@
// Get timestamp of second log line
allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true})
+ c.Assert(len(allLogs), checker.GreaterOrEqualThan, 3)
+
t, err := time.Parse(time.RFC3339Nano, strings.Split(allLogs[1], " ")[0])
c.Assert(err, checker.IsNil)
until := t.Format(time.RFC3339Nano)
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 89e62c1..e60f4d5 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -4860,7 +4860,7 @@
}
}
-func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageArg(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `FROM busybox
ARG foo=abc
@@ -4884,7 +4884,7 @@
c.Assert(result.Stdout(), checker.Contains, "bar=def")
}
-func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageGlobalArg(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `ARG tag=nosuchtag
FROM busybox:${tag}
@@ -4909,7 +4909,7 @@
c.Assert(result.Stdout(), checker.Contains, "tag=latest")
}
-func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageUnusedArg(c *check.C) {
imgName := "multifromunusedarg"
dockerfile := `FROM busybox
ARG foo
@@ -5727,7 +5727,7 @@
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
-func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCache(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
@@ -5888,7 +5888,7 @@
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n")
}
-func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCopyFromSyntax(c *check.C) {
dockerfile := `
FROM busybox AS first
COPY foo bar
@@ -5946,7 +5946,7 @@
cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"})
}
-func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCopyFromErrors(c *check.C) {
testCases := []struct {
dockerfile string
expectedError string
@@ -5993,7 +5993,7 @@
}
}
-func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageMultipleBuilds(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar`
@@ -6026,7 +6026,7 @@
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
-func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageImplicitFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY --from=busybox /etc/passwd /mypasswd
@@ -6053,7 +6053,7 @@
}
}
-func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) {
+func (s *DockerRegistrySuite) TestBuildMultiStageImplicitPull(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL)
dockerfile := `
@@ -6083,7 +6083,7 @@
cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"})
}
-func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageNameVariants(c *check.C) {
dockerfile := `
FROM busybox as foo
COPY foo /
@@ -6094,7 +6094,7 @@
FROM foo
COPY --from=foo1 foo f1
COPY --from=FOo2 foo f2
- ` // foo2 case also tests that names are canse insensitive
+ ` // foo2 case also tests that names are case insensitive
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
@@ -6108,7 +6108,7 @@
cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"})
}
-func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) {
+func (s *DockerTrustSuite) TestBuildMultiStageTrusted(c *check.C) {
img1 := s.setupTrustedImage(c, "trusted-build1")
img2 := s.setupTrustedImage(c, "trusted-build2")
dockerFile := fmt.Sprintf(`
@@ -6130,7 +6130,7 @@
dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"})
}
-func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageMultipleBuildsWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
@@ -6218,7 +6218,7 @@
}
// #33176
-func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) {
+func (s *DockerSuite) TestBuildMulitStageResetScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
index 58a50ce..057c2d6 100644
--- a/integration-cli/docker_cli_commit_test.go
+++ b/integration-cli/docker_cli_commit_test.go
@@ -121,11 +121,19 @@
"test", "test-commit")
imageID = strings.TrimSpace(imageID)
+ // The ordering here is due to `PATH` being overridden from the container's
+ // ENV. On windows, the container doesn't have a `PATH` ENV variable so
+ // the ordering is the same as the cli.
+ expectedEnv := "[PATH=/foo DEBUG=true test=1]"
+ if testEnv.DaemonPlatform() == "windows" {
+ expectedEnv = "[DEBUG=true test=1 PATH=/foo]"
+ }
+
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalized on Windows
expected := map[string]string{
"Config.ExposedPorts": "map[8080/tcp:{}]",
- "Config.Env": "[DEBUG=true test=1 PATH=/foo]",
+ "Config.Env": expectedEnv,
"Config.Labels": "map[foo:bar]",
"Config.Cmd": "[/bin/sh]",
"Config.WorkingDir": prefix + slash + "opt",
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index 6865b92..fb61626 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -1451,7 +1451,7 @@
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
+ "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
// restart daemon.
d.Restart(c)
@@ -2011,7 +2011,7 @@
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
+ "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
// Give time to containerd to process the command if we don't
// the exit event might be received after we do the inspect
@@ -2106,7 +2106,7 @@
result := icmd.RunCommand(
ctrBinary,
"--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace,
+ "--namespace", moby_daemon.ContainersNamespace,
"tasks", "resume", cid)
result.Assert(t, icmd.Success)
diff --git a/integration/build/build_test.go b/integration/build/build_test.go
index b447b62..1271dae 100644
--- a/integration/build/build_test.go
+++ b/integration/build/build_test.go
@@ -197,3 +197,73 @@
resp.Body.Close()
require.NoError(t, err)
}
+
+// TestBuildMultiStageOnBuild checks that ONBUILD commands are applied to
+// multiple subsequent stages
+// #35652
+func TestBuildMultiStageOnBuild(t *testing.T) {
+ defer setupTest(t)()
+ // test both metadata and layer based commands as they may be implemented differently
+ dockerfile := `FROM busybox AS stage1
+ONBUILD RUN echo 'foo' >somefile
+ONBUILD ENV bar=baz
+
+FROM stage1
+RUN cat somefile # fails if ONBUILD RUN fails
+
+FROM stage1
+RUN cat somefile`
+
+ ctx := context.Background()
+ source := fakecontext.New(t, "",
+ fakecontext.WithDockerfile(dockerfile))
+ defer source.Close()
+
+ apiclient := testEnv.APIClient()
+ resp, err := apiclient.ImageBuild(ctx,
+ source.AsTarReader(t),
+ types.ImageBuildOptions{
+ Remove: true,
+ ForceRemove: true,
+ })
+
+ out := bytes.NewBuffer(nil)
+ require.NoError(t, err)
+ _, err = io.Copy(out, resp.Body)
+ resp.Body.Close()
+ require.NoError(t, err)
+
+ assert.Contains(t, out.String(), "Successfully built")
+
+ imageIDs, err := getImageIDsFromBuild(out.Bytes())
+ require.NoError(t, err)
+ assert.Equal(t, 3, len(imageIDs))
+
+ image, _, err := apiclient.ImageInspectWithRaw(context.Background(), imageIDs[2])
+ require.NoError(t, err)
+ assert.Contains(t, image.Config.Env, "bar=baz")
+}
+
+type buildLine struct {
+ Stream string
+ Aux struct {
+ ID string
+ }
+}
+
+func getImageIDsFromBuild(output []byte) ([]string, error) {
+ ids := []string{}
+ for _, line := range bytes.Split(output, []byte("\n")) {
+ if len(line) == 0 {
+ continue
+ }
+ entry := buildLine{}
+ if err := json.Unmarshal(line, &entry); err != nil {
+ return nil, err
+ }
+ if entry.Aux.ID != "" {
+ ids = append(ids, entry.Aux.ID)
+ }
+ }
+ return ids, nil
+}
diff --git a/integration/container/health_test.go b/integration/container/health_test.go
new file mode 100644
index 0000000..8ed86a8
--- /dev/null
+++ b/integration/container/health_test.go
@@ -0,0 +1,61 @@
+package container
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/gotestyourself/gotestyourself/poll"
+ "github.com/stretchr/testify/require"
+)
+
+// TestHealthCheckWorkdir verifies that health-checks inherit the containers'
+// working-dir.
+func TestHealthCheckWorkdir(t *testing.T) {
+ defer setupTest(t)()
+ ctx := context.Background()
+ client := request.NewAPIClient(t)
+
+ c, err := client.ContainerCreate(ctx,
+ &container.Config{
+ Image: "busybox",
+ Tty: true,
+ WorkingDir: "/foo",
+ Cmd: strslice.StrSlice([]string{"top"}),
+ Healthcheck: &container.HealthConfig{
+ Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"},
+ Interval: 50 * time.Millisecond,
+ Retries: 3,
+ },
+ },
+ &container.HostConfig{},
+ &network.NetworkingConfig{},
+ "healthtest",
+ )
+ require.NoError(t, err)
+ err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{})
+ require.NoError(t, err)
+
+ poll.WaitOn(t, pollForHealthStatus(ctx, client, c.ID, types.Healthy), poll.WithDelay(100*time.Millisecond))
+}
+
+func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus string) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ inspect, err := client.ContainerInspect(ctx, containerID)
+
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case inspect.State.Health.Status == healthStatus:
+ return poll.Success()
+ default:
+ return poll.Continue("waiting for container to become %s", healthStatus)
+ }
+ }
+}
diff --git a/integration/container/restart_test.go b/integration/container/restart_test.go
new file mode 100644
index 0000000..fe80f09
--- /dev/null
+++ b/integration/container/restart_test.go
@@ -0,0 +1,112 @@
+package container
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/integration-cli/daemon"
+)
+
+func TestDaemonRestartKillContainers(t *testing.T) {
+ type testCase struct {
+ desc string
+ config *container.Config
+ hostConfig *container.HostConfig
+
+ xRunning bool
+ xRunningLiveRestore bool
+ }
+
+ for _, c := range []testCase{
+ {
+ desc: "container without restart policy",
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
+ xRunningLiveRestore: true,
+ },
+ {
+ desc: "container with restart=always",
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
+ hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
+ xRunning: true,
+ xRunningLiveRestore: true,
+ },
+ } {
+ for _, liveRestoreEnabled := range []bool{false, true} {
+ for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){
+ "kill-daemon": func(t *testing.T, d *daemon.Daemon) {
+ if err := d.Kill(); err != nil {
+ t.Fatal(err)
+ }
+ },
+ "stop-daemon": func(t *testing.T, d *daemon.Daemon) {
+ d.Stop(t)
+ },
+ } {
+ t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) {
+ c := c
+ liveRestoreEnabled := liveRestoreEnabled
+ stopDaemon := stopDaemon
+
+ t.Parallel()
+
+ d := daemon.New(t, "", "dockerd", daemon.Config{})
+ client, err := d.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var args []string
+ if liveRestoreEnabled {
+ args = []string{"--live-restore"}
+ }
+
+ d.StartWithBusybox(t, args...)
+ defer d.Stop(t)
+ ctx := context.Background()
+
+ resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
+
+ if err := client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+ t.Fatal(err)
+ }
+
+ stopDaemon(t, d)
+ d.Start(t, args...)
+
+ expected := c.xRunning
+ if liveRestoreEnabled {
+ expected = c.xRunningLiveRestore
+ }
+
+ var running bool
+ for i := 0; i < 30; i++ {
+ inspect, err := client.ContainerInspect(ctx, resp.ID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ running = inspect.State.Running
+ if running == expected {
+ break
+ }
+ time.Sleep(2 * time.Second)
+
+ }
+
+ if running != expected {
+ t.Fatalf("got unexpected running state, expected %v, got: %v", expected, running)
+ }
+ // TODO(cpuguy83): test pause states... this seems to be rather undefined currently
+ })
+ }
+ }
+ }
+}
diff --git a/integration/image/commit_test.go b/integration/image/commit_test.go
new file mode 100644
index 0000000..13edbe1
--- /dev/null
+++ b/integration/image/commit_test.go
@@ -0,0 +1,47 @@
+package image
+
+import (
+ "context"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCommitInheritsEnv(t *testing.T) {
+ defer setupTest(t)()
+ client := request.NewAPIClient(t)
+ ctx := context.Background()
+
+ createResp1, err := client.ContainerCreate(ctx, &container.Config{Image: "busybox"}, nil, nil, "")
+ require.NoError(t, err)
+
+ commitResp1, err := client.ContainerCommit(ctx, createResp1.ID, types.ContainerCommitOptions{
+ Changes: []string{"ENV PATH=/bin"},
+ Reference: "test-commit-image",
+ })
+ require.NoError(t, err)
+
+ image1, _, err := client.ImageInspectWithRaw(ctx, commitResp1.ID)
+ require.NoError(t, err)
+
+ expectedEnv1 := []string{"PATH=/bin"}
+ assert.Equal(t, expectedEnv1, image1.Config.Env)
+
+ createResp2, err := client.ContainerCreate(ctx, &container.Config{Image: image1.ID}, nil, nil, "")
+ require.NoError(t, err)
+
+ commitResp2, err := client.ContainerCommit(ctx, createResp2.ID, types.ContainerCommitOptions{
+ Changes: []string{"ENV PATH=/usr/bin:$PATH"},
+ Reference: "test-commit-image",
+ })
+ require.NoError(t, err)
+
+ image2, _, err := client.ImageInspectWithRaw(ctx, commitResp2.ID)
+ require.NoError(t, err)
+ expectedEnv2 := []string{"PATH=/usr/bin:/bin"}
+ assert.Equal(t, expectedEnv2, image2.Config.Env)
+}
diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go
index 9839467..5343b85 100644
--- a/plugin/executor/containerd/containerd.go
+++ b/plugin/executor/containerd/containerd.go
@@ -16,7 +16,7 @@
)
// PluginNamespace is the name used for the plugins namespace
-var PluginNamespace = "moby-plugins"
+var PluginNamespace = "plugins.moby"
// ExitHandler represents an object that is called when the exit event is received from containerd
type ExitHandler interface {
diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md
index 6ae7baf..4b52989 100644
--- a/project/GOVERNANCE.md
+++ b/project/GOVERNANCE.md
@@ -1,17 +1,120 @@
-# Docker Governance Advisory Board Meetings
+# Moby project governance
-In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public.
-All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership.
+Moby projects are governed by the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc).
+See the Moby TSC [charter](https://github.com/moby/tsc/blob/master/README.md) for
+further information on the role of the TSC and procedures for escalation
+of technical issues or concerns.
-The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at
-[Google Docs Folder](https://goo.gl/Alfj8r)
+Contact [any Moby TSC member](https://github.com/moby/tsc/blob/master/MEMBERS.md) with your questions/concerns about the governance or a specific technical
+issue that you feel requires escalation.
-These include:
+## Project maintainers
-* First Meeting Notes
-* DGAB Charter
-* Presentation 1: Introductory Presentation, including State of The Project
-* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal
-* Presentation 3: Long Term Roadmap/Statement of Direction
-
+The current maintainers of the moby/moby repository are listed in the
+[MAINTAINERS](/MAINTAINERS) file.
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+ 1. They share responsibility in the project's success.
+ 2. They have made a long-term, recurring time investment to improve the project.
+ 3. They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is less visible.
+It's easy to recognize a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+
+### Adding maintainers
+
+Maintainers are first and foremost contributors who have shown their
+commitment to the long term success of a project. Contributors who want to
+become maintainers first demonstrate commitment to the project by contributing
+code, reviewing others' work, and triaging issues on a regular basis for at
+least three months.
+
+The contributions alone don't make you a maintainer. You need to earn the
+trust of the current maintainers and other project contributors, that your
+decisions and actions are in the best interest of the project.
+
+Periodically, the existing maintainers curate a list of contributors who have
+shown regular activity on the project over the prior months. From this
+list, maintainer candidates are selected and proposed on the maintainers
+mailing list.
+
+After a candidate is announced on the maintainers mailing list, the
+existing maintainers discuss the candidate over the next 5 business days,
+provide feedback, and vote. At least 66% of the current maintainers must
+vote in the affirmative.
+
+If a candidate is approved, a maintainer contacts the candidate to
+invite them to open a pull request that adds the contributor to
+the MAINTAINERS file. The candidate becomes a maintainer once the pull
+request is merged.
+
+### Removing maintainers
+
+Maintainers can be removed from the project, either at their own request
+or due to [project inactivity](#inactive-maintainer-policy).
+
+#### How to step down
+
+Life priorities, interests, and passions can change. If you're a maintainer but
+feel you must remove yourself from the list, inform other maintainers that you
+intend to step down, and if possible, help find someone to pick up your work.
+At the very least, ensure your work can be continued where you left off.
+
+After you've informed other maintainers, create a pull request to remove
+yourself from the MAINTAINERS file.
+
+#### Inactive maintainer policy
+
+An existing maintainer can be removed if they do not show significant activity
+on the project. Periodically, the maintainers review the list of maintainers
+and their activity over the last three months.
+
+If a maintainer has shown insufficient activity over this period, a project
+representative will contact the maintainer to ask if they want to continue
+being a maintainer. If the maintainer decides to step down as a maintainer,
+they open a pull request to be removed from the MAINTAINERS file.
+
+If the maintainer wants to continue in this role, but is unable to perform the
+required duties, they can be removed with a vote by at least 66% of the current
+maintainers. The maintainer under discussion will not be allowed to vote. An
+e-mail is sent to the mailing list, inviting maintainers of the project to
+vote. The voting period is five business days. Issues related to a maintainer's
+performance should be discussed with them among the other maintainers so that
+they are not surprised by a pull request removing them. This discussion should
+be handled objectively with no ad hominem attacks.
+
+## Project decision making
+
+Short answer: **Everything is a pull request**.
+
+The Moby core engine project is an open-source project with an open design
+philosophy. This means that the repository is the source of truth for **every**
+aspect of the project, including its philosophy, design, road map, and APIs.
+*If it's part of the project, it's in the repo. If it's in the repo, it's part
+of the project.*
+
+As a result, each decision can be expressed as a change to the repository. An
+implementation change is expressed as a change to the source code. An API
+change is a change to the API specification. A philosophy change is a change
+to the philosophy manifesto, and so on.
+
+All decisions affecting the moby/moby repository, both big and small, follow
+the same steps:
+
+ * **Step 1**: Open a pull request. Anyone can do this.
+
+ * **Step 2**: Discuss the pull request. Anyone can do this.
+
+ * **Step 3**: Maintainers merge, close or reject the pull request.
+
+Pull requests are reviewed by the current maintainers of the moby/moby
+repository. Weekly meetings are organized to are organized to synchronously
+discuss tricky PRs, as well as design and architecture decisions.. When
+technical agreement cannot be reached among the maintainers of the project,
+escalation or concerns can be raised by opening an issue to be handled
+by the [Moby Technical Steering Committee](https://github.com/moby/tsc).
diff --git a/vendor.conf b/vendor.conf
index 78330a2..2163706 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -114,7 +114,7 @@
github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
# cluster
-github.com/docker/swarmkit a6519e28ff2a558f5d32b2dab9fcb0882879b398
+github.com/docker/swarmkit 713d79dc8799b33465c58ed120b870c52eb5eb4f
github.com/gogo/protobuf v0.4
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
index bcef801..d702783 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
@@ -96,10 +96,10 @@
// Serviceless tasks can be cleaned up right away since they are not attached to a service.
tr.cleanup = append(tr.cleanup, t.ID)
}
- // tasks with desired state REMOVE that have progressed beyond SHUTDOWN can be cleaned up
+ // tasks with desired state REMOVE that have progressed beyond COMPLETE can be cleaned up
// right away
for _, t := range removeTasks {
- if t.Status.State >= api.TaskStateShutdown {
+ if t.Status.State >= api.TaskStateCompleted {
tr.cleanup = append(tr.cleanup, t.ID)
}
}
@@ -138,10 +138,10 @@
if t.Status.State >= api.TaskStateOrphaned && t.ServiceID == "" {
tr.cleanup = append(tr.cleanup, t.ID)
}
- // add tasks that have progressed beyond SHUTDOWN and have desired state REMOVE. These
+ // add tasks that have progressed beyond COMPLETE and have desired state REMOVE. These
// tasks are associated with slots that were removed as part of a service scale down
// or service removal.
- if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateShutdown {
+ if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateCompleted {
tr.cleanup = append(tr.cleanup, t.ID)
}
case api.EventUpdateCluster:
@@ -282,6 +282,8 @@
// Stop stops the TaskReaper and waits for the main loop to exit.
func (tr *TaskReaper) Stop() {
+ // TODO(dperny) calling stop on the task reaper twice will cause a panic
+ // because we try to close a channel that will already have been closed.
close(tr.stopChan)
<-tr.doneChan
}