Merge pull request #35674 from kolyshkin/zfs-rmdir
zfs: fix ebusy on umount etc
diff --git a/.DEREK.yml b/.DEREK.yml
new file mode 100644
index 0000000..3fd6789
--- /dev/null
+++ b/.DEREK.yml
@@ -0,0 +1,17 @@
+curators:
+ - aboch
+ - alexellis
+ - andrewhsu
+ - anonymuse
+ - chanwit
+ - ehazlett
+ - fntlnz
+ - gianarb
+ - mgoelzer
+ - programmerq
+ - rheinwein
+ - ripcurld0
+ - thajeztah
+
+features:
+ - comments
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b7961e1..519e238 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -303,14 +303,43 @@
### How can I become a maintainer?
The procedures for adding new maintainers are explained in the
-global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS)
-file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/)
-repository.
+[/project/GOVERNANCE.md](/project/GOVERNANCE.md)
+file in this repository.
Don't forget: being a maintainer is a time investment. Make sure you
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
+### Manage issues and pull requests using the Derek bot
+
+If you want to help label, assign, close or reopen issues or pull requests
+without commit rights, ask a maintainer to add your Github handle to the
+`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
+Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
+
+For example:
+
+* Labels
+
+```
+Derek add label: kind/question
+Derek remove label: status/claimed
+```
+
+* Assign work
+
+```
+Derek assign: username
+Derek unassign: me
+```
+
+* Manage issues and PRs
+
+```
+Derek close
+Derek reopen
+```
+
## Moby community guidelines
We want to keep the Moby community awesome, growing and collaborative. We need
@@ -341,6 +370,11 @@
used to ping maintainers to review a pull request, a proposal or an
issue.
+The open source governance for this repository is handled via the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc)
+charter. For any concerns with the community process regarding technical contributions,
+please contact the TSC. More information on project governance is available in
+our [project/GOVERNANCE.md](/project/GOVERNANCE.md) document.
+
### Guideline violations — 3 strikes method
The point of this section is not to find opportunities to punish people, but we
diff --git a/Dockerfile b/Dockerfile
index e2c8770..5f78eda 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -52,6 +52,7 @@
libapparmor-dev \
libcap-dev \
libdevmapper-dev \
+ libnet-dev \
libnl-3-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
@@ -94,11 +95,9 @@
ENV GOPATH /go
# Install CRIU for checkpoint/restore support
-ENV CRIU_VERSION 2.12.1
-# Install dependancy packages specific to criu
-RUN apt-get install libnet-dev -y && \
- mkdir -p /usr/src/criu \
- && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \
+ENV CRIU_VERSION 3.6
+RUN mkdir -p /usr/src/criu \
+ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make \
&& make install-criu
diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64
index 58ca40d..fde0c70 100644
--- a/Dockerfile.aarch64
+++ b/Dockerfile.aarch64
@@ -15,7 +15,7 @@
# the case. Therefore, you don't have to disable it anymore.
#
-FROM arm64v8/debian:stretch
+FROM debian:stretch
# allow replacing httpredir or deb mirror
ARG APT_MIRROR=deb.debian.org
@@ -142,10 +142,10 @@
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/
RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \
- aarch64/buildpack-deps:jessie@sha256:107f4a96837ed89c493fc205cd28508ed0b6b680b4bf3e514e9f0fa0f6667b77 \
- aarch64/busybox:latest@sha256:5a06b8b2fdf22dd1f4085c6c3efd23ee99af01b2d668d286bc4be6d8baa10efb \
- aarch64/debian:jessie@sha256:e6f90b568631705bd5cb27490977378ba762792b38d47c91c4da7a539f63079a \
- aarch64/hello-world:latest@sha256:bd1722550b97668b23ede297abf824d4855f4d9f600dab7b4db1a963dae7ec9e
+ buildpack-deps:jessie@sha256:dd86dced7c9cd2a724e779730f0a53f93b7ef42228d4344b25ce9a42a1486251 \
+ busybox:latest@sha256:bbc3a03235220b170ba48a157dd097dd1379299370e1ed99ce976df0355d24f0 \
+ debian:jessie@sha256:287a20c5f73087ab406e6b364833e3fb7b3ae63ca0eb3486555dc27ed32c6e60 \
+ hello-world:latest@sha256:be0cd392e45be79ffeffa6b05338b98ebb16c87b255f48e297ec7f98e123905c
# See also ensureFrozenImagesLinux() in "integration-cli/fixtures_linux_daemon_test.go" (which needs to be updated when adding images to this list)
# Install tomlv, vndr, runc, containerd, tini, docker-proxy
diff --git a/MAINTAINERS b/MAINTAINERS
index d81caba..4c831d7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,12 +1,14 @@
# Moby maintainers file
#
-# This file describes who runs the docker/docker project and how.
-# This is a living document - if you see something out of date or missing, speak up!
+# This file describes the maintainer groups within the moby/moby project.
+# More detail on Moby project governance is available in the
+# project/GOVERNANCE.md file found in this repository.
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant
# parser.
#
+# TODO(estesp): This file should not necessarily depend on docker/opensource
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
@@ -368,7 +370,7 @@
[people.mlaventure]
Name = "Kenfe-Mickaël Laventure"
- Email = "mickael.laventure@docker.com"
+ Email = "mickael.laventure@gmail.com"
GitHub = "mlaventure"
[people.moxiegirl]
@@ -465,4 +467,3 @@
Name = "Yong Tang"
Email = "yong.tang.github@outlook.com"
GitHub = "yongtang"
-
diff --git a/Makefile b/Makefile
index 3298815..6f5145a 100644
--- a/Makefile
+++ b/Makefile
@@ -53,7 +53,8 @@
-e http_proxy \
-e https_proxy \
-e no_proxy \
- -e VERSION
+ -e VERSION \
+ -e PLATFORM
# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds
# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test`
diff --git a/api/common.go b/api/common.go
index af34d0b..693d0a4 100644
--- a/api/common.go
+++ b/api/common.go
@@ -3,7 +3,7 @@
// Common constants for daemon and client.
const (
// DefaultVersion of Current REST API
- DefaultVersion string = "1.35"
+ DefaultVersion string = "1.36"
// NoBaseImageSpecifier is the symbol used by the FROM
// command to specify that no base image is to be used.
diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go
index d845fdd..b7848a3 100644
--- a/api/server/router/container/container_routes.go
+++ b/api/server/router/container/container_routes.go
@@ -593,7 +593,11 @@
close(done)
select {
case <-started:
- logrus.Errorf("Error attaching websocket: %s", err)
+ if err != nil {
+ logrus.Errorf("Error attaching websocket: %s", err)
+ } else {
+ logrus.Debug("websocket connection was closed by client")
+ }
return nil
default:
}
diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go
index dabab3b..fd95420 100644
--- a/api/server/router/image/image_routes.go
+++ b/api/server/router/image/image_routes.go
@@ -13,7 +13,6 @@
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
- "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
@@ -46,9 +45,6 @@
if err != nil && err != io.EOF { //Do not fail if body is empty.
return err
}
- if c == nil {
- c = &container.Config{}
- }
commitCfg := &backend.ContainerCommitConfig{
ContainerCommitConfig: types.ContainerCommitConfig{
diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go
index 8f6aecd..535956d 100644
--- a/api/server/router/system/system_routes.go
+++ b/api/server/router/system/system_routes.go
@@ -6,7 +6,6 @@
"net/http"
"time"
- "github.com/docker/docker/api"
"github.com/docker/docker/api/server/httputils"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
@@ -65,7 +64,6 @@
func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
info := s.backend.SystemVersion()
- info.APIVersion = api.DefaultVersion
return httputils.WriteJSON(w, http.StatusOK, info)
}
diff --git a/api/swagger.yaml b/api/swagger.yaml
index 07ee067..73ea1a3 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -19,10 +19,10 @@
consumes:
- "application/json"
- "text/plain"
-basePath: "/v1.35"
+basePath: "/v1.36"
info:
title: "Docker Engine API"
- version: "1.35"
+ version: "1.36"
x-logo:
url: "https://docs.docker.com/images/logo-docker-main.png"
description: |
@@ -49,8 +49,8 @@
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
is returned.
- If you omit the version-prefix, the current version of the API (v1.35) is used.
- For example, calling `/info` is the same as calling `/v1.35/info`. Using the
+ If you omit the version-prefix, the current version of the API (v1.36) is used.
+ For example, calling `/info` is the same as calling `/v1.36/info`. Using the
API without a version-prefix is deprecated and will be removed in a future release.
Engine releases in the near future should support this version of the API,
@@ -254,6 +254,7 @@
properties:
Propagation:
description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
+ type: "string"
enum:
- "private"
- "rprivate"
@@ -607,17 +608,7 @@
description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken
as a custom network's name to which this container should connect to."
PortBindings:
- type: "object"
- description: "A map of exposed container ports and the host port they should map to."
- additionalProperties:
- type: "object"
- properties:
- HostIp:
- type: "string"
- description: "The host IP address"
- HostPort:
- type: "string"
- description: "The host port number, as a string"
+ $ref: "#/definitions/PortMap"
RestartPolicy:
$ref: "#/definitions/RestartPolicy"
AutoRemove:
@@ -833,9 +824,7 @@
type: "string"
Cmd:
description: "Command to run specified as a string or an array of strings."
- type:
- - "array"
- - "string"
+ type: "array"
items:
type: "string"
Healthcheck:
@@ -863,9 +852,7 @@
The entry point for the container as a string or an array of strings.
If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
- type:
- - "array"
- - "string"
+ type: "array"
items:
type: "string"
NetworkDisabled:
@@ -6774,6 +6761,28 @@
schema:
type: "object"
properties:
+ Platform:
+ type: "object"
+ required: [Name]
+ properties:
+ Name:
+ type: "string"
+ Components:
+ type: "array"
+ items:
+ type: "object"
+ x-go-name: ComponentVersion
+ required: [Name, Version]
+ properties:
+ Name:
+ type: "string"
+ Version:
+ type: "string"
+ x-nullable: false
+ Details:
+ type: "object"
+ x-nullable: true
+
Version:
type: "string"
ApiVersion:
@@ -7269,6 +7278,9 @@
User:
type: "string"
description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`."
+ WorkingDir:
+ type: "string"
+ description: "The working directory for the exec process inside the container."
example:
AttachStdin: false
AttachStdout: true
diff --git a/api/types/configs.go b/api/types/configs.go
index 20c19f2..54d3e39 100644
--- a/api/types/configs.go
+++ b/api/types/configs.go
@@ -50,6 +50,7 @@
Detach bool // Execute in detach mode
DetachKeys string // Escape keys for detach
Env []string // Environment variables
+ WorkingDir string // Working directory
Cmd []string // Execution commands and args
}
diff --git a/api/types/swarm/runtime/plugin.proto b/api/types/swarm/runtime/plugin.proto
index 06eb7ba..6d63b77 100644
--- a/api/types/swarm/runtime/plugin.proto
+++ b/api/types/swarm/runtime/plugin.proto
@@ -1,5 +1,7 @@
syntax = "proto3";
+option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime";
+
// PluginSpec defines the base payload which clients can specify for creating
// a service with the plugin runtime.
message PluginSpec {
diff --git a/api/types/types.go b/api/types/types.go
index f7ac772..7814e6b 100644
--- a/api/types/types.go
+++ b/api/types/types.go
@@ -107,9 +107,21 @@
Experimental bool
}
+// ComponentVersion describes the version information for a specific component.
+type ComponentVersion struct {
+ Name string
+ Version string
+ Details map[string]string `json:",omitempty"`
+}
+
// Version contains response of Engine API:
// GET "/version"
type Version struct {
+ Platform struct{ Name string } `json:",omitempty"`
+ Components []ComponentVersion `json:",omitempty"`
+
+ // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
+
Version string
APIVersion string `json:"ApiVersion"`
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go
index b62d6fc..20f1650 100644
--- a/builder/dockerfile/builder.go
+++ b/builder/dockerfile/builder.go
@@ -396,7 +396,8 @@
}
dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, newBuildArgs(b.options.BuildArgs), newStagesBuildResults())
- dispatchRequest.state.runConfig = config
+ // We make mutations to the configuration, ensure we have a copy
+ dispatchRequest.state.runConfig = copyRunConfig(config)
dispatchRequest.state.imageID = config.Image
for _, cmd := range commands {
err := dispatch(dispatchRequest, cmd)
diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go
index b9d586a..c7da6ee 100644
--- a/cmd/dockerd/config.go
+++ b/cmd/dockerd/config.go
@@ -59,6 +59,8 @@
flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull")
flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push")
flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout")
+ flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server")
+ flags.MarkHidden("network-diagnostic-port")
flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address")
flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features")
@@ -90,6 +92,8 @@
flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication")
if runtime.GOOS != "windows" {
+ // TODO: Remove this flag after 3 release cycles (18.03)
flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries")
+ flags.MarkHidden("disable-legacy-registry")
}
}
diff --git a/cmd/dockerd/config_experimental.go b/cmd/dockerd/config_experimental.go
deleted file mode 100644
index 355a29e..0000000
--- a/cmd/dockerd/config_experimental.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package main
-
-import (
- "github.com/docker/docker/daemon/config"
- "github.com/spf13/pflag"
-)
-
-func attachExperimentalFlags(conf *config.Config, cmd *pflag.FlagSet) {
-}
diff --git a/cmd/dockerd/config_solaris.go b/cmd/dockerd/config_solaris.go
deleted file mode 100644
index ed67064..0000000
--- a/cmd/dockerd/config_solaris.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package main
-
-import (
- "github.com/docker/docker/daemon/config"
- "github.com/spf13/pflag"
-)
-
-// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
-func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) {
- // First handle install flags which are consistent cross-platform
- installCommonConfigFlags(conf, flags)
-
- // Then install flags common to unix platforms
- installUnixConfigFlags(conf, flags)
-
- attachExperimentalFlags(conf, flags)
-}
diff --git a/cmd/dockerd/config_unix.go b/cmd/dockerd/config_unix.go
index b3bd741..a3b0e36 100644
--- a/cmd/dockerd/config_unix.go
+++ b/cmd/dockerd/config_unix.go
@@ -44,6 +44,4 @@
flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers")
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
-
- attachExperimentalFlags(conf, flags)
}
diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go
index 02a0314..d73b63a 100644
--- a/cmd/dockerd/daemon.go
+++ b/cmd/dockerd/daemon.go
@@ -6,6 +6,7 @@
"fmt"
"os"
"path/filepath"
+ "runtime"
"strings"
"time"
@@ -472,8 +473,15 @@
return nil, err
}
- if !conf.V2Only {
- logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
+ if runtime.GOOS != "windows" {
+ if flags.Changed("disable-legacy-registry") {
+ // TODO: Remove this error after 3 release cycles (18.03)
+ return nil, errors.New("ERROR: The '--disable-legacy-registry' flag has been removed. Interacting with legacy (v1) registries is no longer supported")
+ }
+ if !conf.V2Only {
+ // TODO: Remove this error after 3 release cycles (18.03)
+ return nil, errors.New("ERROR: The 'disable-legacy-registry' configuration option has been removed. Interacting with legacy (v1) registries is no longer supported")
+ }
}
if flags.Changed("graph") {
diff --git a/cmd/dockerd/daemon_linux.go b/cmd/dockerd/daemon_linux.go
index b58f0f0..b09fed9 100644
--- a/cmd/dockerd/daemon_linux.go
+++ b/cmd/dockerd/daemon_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package main
import systemdDaemon "github.com/coreos/go-systemd/daemon"
diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go
index 41e6b61..a65d8ed 100644
--- a/cmd/dockerd/daemon_unix.go
+++ b/cmd/dockerd/daemon_unix.go
@@ -14,7 +14,6 @@
"github.com/docker/docker/cmd/dockerd/hack"
"github.com/docker/docker/daemon"
"github.com/docker/docker/libcontainerd"
- "github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/libnetwork/portallocator"
"golang.org/x/sys/unix"
)
@@ -38,24 +37,13 @@
}
func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) {
- // On older kernel, letting putting the containerd-shim in its own
- // namespace will effectively prevent operations such as unlink, rename
- // and remove on mountpoints that were present at the time the shim
- // namespace was created. This would led to a famous EBUSY will trying to
- // remove shm mounts.
- var noNewNS bool
- if !kernel.CheckKernelVersion(3, 18, 0) {
- noNewNS = true
- }
-
opts := []libcontainerd.RemoteOption{
libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust),
libcontainerd.WithPlugin("linux", &linux.Config{
- Shim: daemon.DefaultShimBinary,
- Runtime: daemon.DefaultRuntimeBinary,
- RuntimeRoot: filepath.Join(cli.Config.Root, "runc"),
- ShimDebug: cli.Config.Debug,
- ShimNoMountNS: noNewNS,
+ Shim: daemon.DefaultShimBinary,
+ Runtime: daemon.DefaultRuntimeBinary,
+ RuntimeRoot: filepath.Join(cli.Config.Root, "runc"),
+ ShimDebug: cli.Config.Debug,
}),
}
if cli.Config.Debug {
diff --git a/cmd/dockerd/daemon_unix_test.go b/cmd/dockerd/daemon_unix_test.go
index 475ff9e..41c392e 100644
--- a/cmd/dockerd/daemon_unix_test.go
+++ b/cmd/dockerd/daemon_unix_test.go
@@ -97,15 +97,3 @@
assert.True(t, loadedConfig.EnableUserlandProxy)
}
-
-func TestLoadDaemonConfigWithLegacyRegistryOptions(t *testing.T) {
- content := `{"disable-legacy-registry": false}`
- tempFile := fs.NewFile(t, "config", fs.WithContent(content))
- defer tempFile.Remove()
-
- opts := defaultOptions(tempFile.Path())
- loadedConfig, err := loadDaemonCliConfig(opts)
- require.NoError(t, err)
- require.NotNil(t, loadedConfig)
- assert.False(t, loadedConfig.V2Only)
-}
diff --git a/container/container_windows.go b/container/container_windows.go
index 5cb2e45..92b50a6 100644
--- a/container/container_windows.go
+++ b/container/container_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package container
import (
diff --git a/container/state_solaris.go b/container/state_solaris.go
deleted file mode 100644
index 1229650..0000000
--- a/container/state_solaris.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package container
-
-// setFromExitStatus is a platform specific helper function to set the state
-// based on the ExitStatus structure.
-func (s *State) setFromExitStatus(exitStatus *ExitStatus) {
- s.ExitCodeValue = exitStatus.ExitCode
-}
diff --git a/daemon/archive_tarcopyoptions_windows.go b/daemon/archive_tarcopyoptions_windows.go
index 535efd2..500e128 100644
--- a/daemon/archive_tarcopyoptions_windows.go
+++ b/daemon/archive_tarcopyoptions_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
import (
diff --git a/daemon/cluster/listen_addr_linux.go b/daemon/cluster/listen_addr_linux.go
index 3d4f239..2f342fd 100644
--- a/daemon/cluster/listen_addr_linux.go
+++ b/daemon/cluster/listen_addr_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package cluster
import (
diff --git a/daemon/cluster/listen_addr_solaris.go b/daemon/cluster/listen_addr_solaris.go
deleted file mode 100644
index 57a894b..0000000
--- a/daemon/cluster/listen_addr_solaris.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package cluster
-
-import (
- "bufio"
- "fmt"
- "net"
- "os/exec"
- "strings"
-)
-
-func (c *Cluster) resolveSystemAddr() (net.IP, error) {
- defRouteCmd := "/usr/sbin/ipadm show-addr -p -o addr " +
- "`/usr/sbin/route get default | /usr/bin/grep interface | " +
- "/usr/bin/awk '{print $2}'`"
- out, err := exec.Command("/usr/bin/bash", "-c", defRouteCmd).Output()
- if err != nil {
- return nil, fmt.Errorf("cannot get default route: %v", err)
- }
-
- defInterface := strings.SplitN(string(out), "/", 2)
- defInterfaceIP := net.ParseIP(defInterface[0])
-
- return defInterfaceIP, nil
-}
-
-func listSystemIPs() []net.IP {
- var systemAddrs []net.IP
- cmd := exec.Command("/usr/sbin/ipadm", "show-addr", "-p", "-o", "addr")
- cmdReader, err := cmd.StdoutPipe()
- if err != nil {
- return nil
- }
-
- if err := cmd.Start(); err != nil {
- return nil
- }
-
- scanner := bufio.NewScanner(cmdReader)
- go func() {
- for scanner.Scan() {
- text := scanner.Text()
- nameAddrPair := strings.SplitN(text, "/", 2)
- // Let go of loopback interfaces and docker interfaces
- systemAddrs = append(systemAddrs, net.ParseIP(nameAddrPair[0]))
- }
- }()
-
- if err := scanner.Err(); err != nil {
- fmt.Printf("scan underwent err: %+v\n", err)
- }
-
- if err := cmd.Wait(); err != nil {
- fmt.Printf("run command wait: %+v\n", err)
- }
-
- return systemAddrs
-}
diff --git a/daemon/cluster/swarm.go b/daemon/cluster/swarm.go
index 6122369..ec527ce 100644
--- a/daemon/cluster/swarm.go
+++ b/daemon/cluster/swarm.go
@@ -228,6 +228,13 @@
return err
}
+ // Validate spec name.
+ if spec.Annotations.Name == "" {
+ spec.Annotations.Name = "default"
+ } else if spec.Annotations.Name != "default" {
+ return validationError{errors.New(`swarm spec must be named "default"`)}
+ }
+
// In update, client should provide the complete spec of the swarm, including
// Name and Labels. If a field is specified with 0 or nil, then the default value
// will be used to swarmkit.
diff --git a/daemon/commit.go b/daemon/commit.go
index 0053132..1bdbd6b 100644
--- a/daemon/commit.go
+++ b/daemon/commit.go
@@ -149,6 +149,10 @@
defer daemon.containerUnpause(container)
}
+ if c.MergeConfigs && c.Config == nil {
+ c.Config = container.Config
+ }
+
newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes)
if err != nil {
return "", err
diff --git a/daemon/config/config.go b/daemon/config/config.go
index 1e22a6f..199fae6 100644
--- a/daemon/config/config.go
+++ b/daemon/config/config.go
@@ -85,26 +85,27 @@
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
- AuthzMiddleware *authorization.Middleware `json:"-"`
- AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
- AutoRestart bool `json:"-"`
- Context map[string][]string `json:"-"`
- DisableBridge bool `json:"-"`
- DNS []string `json:"dns,omitempty"`
- DNSOptions []string `json:"dns-opts,omitempty"`
- DNSSearch []string `json:"dns-search,omitempty"`
- ExecOptions []string `json:"exec-opts,omitempty"`
- GraphDriver string `json:"storage-driver,omitempty"`
- GraphOptions []string `json:"storage-opts,omitempty"`
- Labels []string `json:"labels,omitempty"`
- Mtu int `json:"mtu,omitempty"`
- Pidfile string `json:"pidfile,omitempty"`
- RawLogs bool `json:"raw-logs,omitempty"`
- RootDeprecated string `json:"graph,omitempty"`
- Root string `json:"data-root,omitempty"`
- ExecRoot string `json:"exec-root,omitempty"`
- SocketGroup string `json:"group,omitempty"`
- CorsHeaders string `json:"api-cors-header,omitempty"`
+ AuthzMiddleware *authorization.Middleware `json:"-"`
+ AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
+ AutoRestart bool `json:"-"`
+ Context map[string][]string `json:"-"`
+ DisableBridge bool `json:"-"`
+ DNS []string `json:"dns,omitempty"`
+ DNSOptions []string `json:"dns-opts,omitempty"`
+ DNSSearch []string `json:"dns-search,omitempty"`
+ ExecOptions []string `json:"exec-opts,omitempty"`
+ GraphDriver string `json:"storage-driver,omitempty"`
+ GraphOptions []string `json:"storage-opts,omitempty"`
+ Labels []string `json:"labels,omitempty"`
+ Mtu int `json:"mtu,omitempty"`
+ NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
+ Pidfile string `json:"pidfile,omitempty"`
+ RawLogs bool `json:"raw-logs,omitempty"`
+ RootDeprecated string `json:"graph,omitempty"`
+ Root string `json:"data-root,omitempty"`
+ ExecRoot string `json:"exec-root,omitempty"`
+ SocketGroup string `json:"group,omitempty"`
+ CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
diff --git a/daemon/config/config_solaris.go b/daemon/config/config_solaris.go
deleted file mode 100644
index 6b1e061..0000000
--- a/daemon/config/config_solaris.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package config
-
-// Config defines the configuration of a docker daemon.
-// These are the configuration settings that you pass
-// to the docker daemon when you launch it with say: `docker -d -e lxc`
-type Config struct {
- CommonConfig
-
- // These fields are common to all unix platforms.
- CommonUnixConfig
-}
-
-// BridgeConfig stores all the bridge driver specific
-// configuration.
-type BridgeConfig struct {
- commonBridgeConfig
-
- // Fields below here are platform specific.
- commonUnixBridgeConfig
-}
-
-// IsSwarmCompatible defines if swarm mode can be enabled in this config
-func (conf *Config) IsSwarmCompatible() error {
- return nil
-}
-
-// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid.
-func (conf *Config) ValidatePlatformConfig() error {
- return nil
-}
diff --git a/daemon/configs_linux.go b/daemon/configs_linux.go
index af20ad7..d498c95 100644
--- a/daemon/configs_linux.go
+++ b/daemon/configs_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
func configsSupported() bool {
diff --git a/daemon/configs_windows.go b/daemon/configs_windows.go
index 7cb2e9c..d498c95 100644
--- a/daemon/configs_windows.go
+++ b/daemon/configs_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
func configsSupported() bool {
diff --git a/daemon/container_windows.go b/daemon/container_windows.go
index 6fdd1e6..6db130a 100644
--- a/daemon/container_windows.go
+++ b/daemon/container_windows.go
@@ -1,5 +1,3 @@
-//+build windows
-
package daemon
import (
diff --git a/daemon/daemon.go b/daemon/daemon.go
index e63e209..dd8c100 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -62,8 +62,8 @@
"github.com/pkg/errors"
)
-// MainNamespace is the name of the namespace used for users containers
-const MainNamespace = "moby"
+// ContainersNamespace is the name of the namespace used for users containers
+const ContainersNamespace = "moby"
var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
@@ -247,6 +247,11 @@
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
}
+ } else if !daemon.configStore.LiveRestoreEnabled {
+ if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
+ logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
+ return
+ }
}
if c.IsRunning() || c.IsPaused() {
@@ -317,24 +322,24 @@
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
- } else {
- // get list of containers we need to restart
+ }
- // Do not autostart containers which
- // has endpoints in a swarm scope
- // network yet since the cluster is
- // not initialized yet. We will start
- // it after the cluster is
- // initialized.
- if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
- mapLock.Lock()
- restartContainers[c] = make(chan struct{})
- mapLock.Unlock()
- } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
- mapLock.Lock()
- removeContainers[c.ID] = c
- mapLock.Unlock()
- }
+ // get list of containers we need to restart
+
+ // Do not autostart containers which
+ // has endpoints in a swarm scope
+ // network yet since the cluster is
+ // not initialized yet. We will start
+ // it after the cluster is
+ // initialized.
+ if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
+ mapLock.Lock()
+ restartContainers[c] = make(chan struct{})
+ mapLock.Unlock()
+ } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
+ mapLock.Lock()
+ removeContainers[c.ID] = c
+ mapLock.Unlock()
}
c.Lock()
@@ -890,7 +895,7 @@
go d.execCommandGC()
- d.containerd, err = containerdRemote.NewClient(MainNamespace, d)
+ d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d)
if err != nil {
return nil, err
}
diff --git a/daemon/daemon_experimental.go b/daemon/daemon_experimental.go
deleted file mode 100644
index fb0251d..0000000
--- a/daemon/daemon_experimental.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package daemon
-
-import "github.com/docker/docker/api/types/container"
-
-func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) {
- return nil, nil
-}
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index 1f9885d..51ea7c7 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -574,11 +574,6 @@
var warnings []string
sysInfo := sysinfo.New(true)
- warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config)
- if err != nil {
- return warnings, err
- }
-
w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go
index 8029bbf..8582d4e 100644
--- a/daemon/daemon_windows.go
+++ b/daemon/daemon_windows.go
@@ -26,7 +26,6 @@
winlibnetwork "github.com/docker/libnetwork/drivers/windows"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/options"
- blkiodev "github.com/opencontainers/runc/libcontainer/configs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
@@ -47,10 +46,6 @@
return filepath.Join(root, "plugins")
}
-func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) {
- return nil, nil
-}
-
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
return parseSecurityOpt(container, hostConfig)
}
diff --git a/daemon/events/events.go b/daemon/events/events.go
index d1529e1..6a4990f 100644
--- a/daemon/events/events.go
+++ b/daemon/events/events.go
@@ -28,7 +28,7 @@
}
}
-// Subscribe adds new listener to events, returns slice of 64 stored
+// Subscribe adds new listener to events, returns slice of 256 stored
// last events, a channel in which you can expect new events (in form
// of interface{}, so you need type assertion), and a function to call
// to stop the stream of events.
@@ -46,7 +46,7 @@
return current, l, cancel
}
-// SubscribeTopic adds new listener to events, returns slice of 64 stored
+// SubscribeTopic adds new listener to events, returns slice of 256 stored
// last events, a channel in which you can expect new events (in form
// of interface{}, so you need type assertion).
func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) {
diff --git a/daemon/events/events_test.go b/daemon/events/events_test.go
index ebb222c..d74f258 100644
--- a/daemon/events/events_test.go
+++ b/daemon/events/events_test.go
@@ -135,21 +135,28 @@
t.Fatalf("Must be %d events, got %d", eventsLimit, len(current))
}
first := current[0]
- if first.Status != "action_16" {
- t.Fatalf("First action is %s, must be action_16", first.Status)
+
+ // TODO remove this once we removed the deprecated `ID`, `Status`, and `From` fields
+ if first.Action != first.Status {
+ // Verify that the (deprecated) Status is set to the expected value
+ t.Fatalf("Action (%s) does not match Status (%s)", first.Action, first.Status)
+ }
+
+ if first.Action != "action_16" {
+ t.Fatalf("First action is %s, must be action_16", first.Action)
}
last := current[len(current)-1]
- if last.Status != "action_271" {
- t.Fatalf("Last action is %s, must be action_271", last.Status)
+ if last.Action != "action_271" {
+ t.Fatalf("Last action is %s, must be action_271", last.Action)
}
firstC := msgs[0]
- if firstC.Status != "action_272" {
- t.Fatalf("First action is %s, must be action_272", firstC.Status)
+ if firstC.Action != "action_272" {
+ t.Fatalf("First action is %s, must be action_272", firstC.Action)
}
lastC := msgs[len(msgs)-1]
- if lastC.Status != "action_281" {
- t.Fatalf("Last action is %s, must be action_281", lastC.Status)
+ if lastC.Action != "action_281" {
+ t.Fatalf("Last action is %s, must be action_281", lastC.Action)
}
}
diff --git a/daemon/exec.go b/daemon/exec.go
index 01670fa..83b7de2 100644
--- a/daemon/exec.go
+++ b/daemon/exec.go
@@ -122,6 +122,7 @@
execConfig.Tty = config.Tty
execConfig.Privileged = config.Privileged
execConfig.User = config.User
+ execConfig.WorkingDir = config.WorkingDir
linkedEnv, err := d.setupLinkedContainers(cntr)
if err != nil {
@@ -131,6 +132,9 @@
if len(execConfig.User) == 0 {
execConfig.User = cntr.Config.User
}
+ if len(execConfig.WorkingDir) == 0 {
+ execConfig.WorkingDir = cntr.Config.WorkingDir
+ }
d.registerExecCommand(cntr, execConfig)
@@ -211,7 +215,7 @@
Args: append([]string{ec.Entrypoint}, ec.Args...),
Env: ec.Env,
Terminal: ec.Tty,
- Cwd: c.Config.WorkingDir,
+ Cwd: ec.WorkingDir,
}
if p.Cwd == "" {
p.Cwd = "/"
diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go
index 193d32f..370b403 100644
--- a/daemon/exec/exec.go
+++ b/daemon/exec/exec.go
@@ -31,6 +31,7 @@
Tty bool
Privileged bool
User string
+ WorkingDir string
Env []string
Pid int
}
diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go
deleted file mode 100644
index 7c1fc20..0000000
--- a/daemon/exec_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/container"
- "github.com/docker/docker/daemon/exec"
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func (daemon *Daemon) execSetPlatformOpt(_ *container.Container, _ *exec.Config, _ *specs.Process) error {
- return nil
-}
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index 11e763d..248b8bf 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -89,7 +89,16 @@
return nil, graphdriver.ErrNotSupported
}
- fsMagic, err := graphdriver.GetFSMagic(root)
+ // Perform feature detection on /var/lib/docker/aufs if it's an existing directory.
+ // This covers situations where /var/lib/docker/aufs is a mount, and on a different
+ // filesystem than /var/lib/docker.
+ // If the path does not exist, fall back to using /var/lib/docker for feature detection.
+ testdir := root
+ if _, err := os.Stat(testdir); os.IsNotExist(err) {
+ testdir = filepath.Dir(testdir)
+ }
+
+ fsMagic, err := graphdriver.GetFSMagic(testdir)
if err != nil {
return nil, err
}
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index 0dabf71..57313c9 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -51,7 +51,16 @@
// An error is returned if BTRFS is not supported.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
- fsMagic, err := graphdriver.GetFSMagic(home)
+ // Perform feature detection on /var/lib/docker/btrfs if it's an existing directory.
+ // This covers situations where /var/lib/docker/btrfs is a mount, and on a different
+ // filesystem than /var/lib/docker.
+ // If the path does not exist, fall back to using /var/lib/docker for feature detection.
+ testdir := home
+ if _, err := os.Stat(testdir); os.IsNotExist(err) {
+ testdir = filepath.Dir(testdir)
+ }
+
+ fsMagic, err := graphdriver.GetFSMagic(testdir)
if err != nil {
return nil, err
}
diff --git a/daemon/graphdriver/copy/copy.go b/daemon/graphdriver/copy/copy.go
index 8ec458d..7a98bec 100644
--- a/daemon/graphdriver/copy/copy.go
+++ b/daemon/graphdriver/copy/copy.go
@@ -11,6 +11,7 @@
*/
import "C"
import (
+ "container/list"
"fmt"
"io"
"os"
@@ -65,7 +66,7 @@
// as the ioctl may not have been available (therefore EINVAL)
if err == unix.EXDEV || err == unix.ENOSYS {
*copyWithFileRange = false
- } else if err != nil {
+ } else {
return err
}
}
@@ -106,11 +107,28 @@
return nil
}
+type fileID struct {
+ dev uint64
+ ino uint64
+}
+
+type dirMtimeInfo struct {
+ dstPath *string
+ stat *syscall.Stat_t
+}
+
// DirCopy copies or hardlinks the contents of one directory to another,
// properly handling xattrs, and soft links
-func DirCopy(srcDir, dstDir string, copyMode Mode) error {
+//
+// Copying xattrs can be opted out of by passing false for copyXattrs.
+func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
copyWithFileRange := true
copyWithFileClone := true
+
+ // This is a map of source file inodes to dst file paths
+ copiedFiles := make(map[fileID]string)
+
+ dirsToSetMtimes := list.New()
err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
if err != nil {
return err
@@ -136,15 +154,21 @@
switch f.Mode() & os.ModeType {
case 0: // Regular file
+ id := fileID{dev: stat.Dev, ino: stat.Ino}
if copyMode == Hardlink {
isHardlink = true
if err2 := os.Link(srcPath, dstPath); err2 != nil {
return err2
}
+ } else if hardLinkDstPath, ok := copiedFiles[id]; ok {
+ if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
+ return err2
+ }
} else {
if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil {
return err2
}
+ copiedFiles[id] = dstPath
}
case os.ModeDir:
@@ -192,16 +216,10 @@
return err
}
- if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
- return err
- }
-
- // We need to copy this attribute if it appears in an overlay upper layer, as
- // this function is used to copy those. It is set by overlay if a directory
- // is removed and then re-created and should not inherit anything from the
- // same dir in the lower dir.
- if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
- return err
+ if copyXattrs {
+ if err := doCopyXattrs(srcPath, dstPath); err != nil {
+ return err
+ }
}
isSymlink := f.Mode()&os.ModeSymlink != 0
@@ -216,7 +234,9 @@
// system.Chtimes doesn't support a NOFOLLOW flag atm
// nolint: unconvert
- if !isSymlink {
+ if f.IsDir() {
+ dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
+ } else if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
@@ -230,5 +250,31 @@
}
return nil
})
- return err
+ if err != nil {
+ return err
+ }
+ for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
+ mtimeInfo := e.Value.(*dirMtimeInfo)
+ ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
+ if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doCopyXattrs(srcPath, dstPath string) error {
+ if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+ return err
+ }
+
+ // We need to copy this attribute if it appears in an overlay upper layer, as
+ // this function is used to copy those. It is set by overlay if a directory
+ // is removed and then re-created and should not inherit anything from the
+ // same dir in the lower dir.
+ if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
+ return err
+ }
+ return nil
}
diff --git a/daemon/graphdriver/copy/copy_test.go b/daemon/graphdriver/copy/copy_test.go
index 6976503..d216991 100644
--- a/daemon/graphdriver/copy/copy_test.go
+++ b/daemon/graphdriver/copy/copy_test.go
@@ -3,15 +3,20 @@
package copy
import (
+ "fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
+ "syscall"
"testing"
+ "time"
"github.com/docker/docker/pkg/parsers/kernel"
+ "github.com/docker/docker/pkg/system"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "golang.org/x/sys/unix"
)
func TestIsCopyFileRangeSyscallAvailable(t *testing.T) {
@@ -45,6 +50,84 @@
doCopyTest(t, ©WithFileRange, ©WithFileClone)
}
+func TestCopyDir(t *testing.T) {
+ srcDir, err := ioutil.TempDir("", "srcDir")
+ require.NoError(t, err)
+ populateSrcDir(t, srcDir, 3)
+
+ dstDir, err := ioutil.TempDir("", "testdst")
+ require.NoError(t, err)
+ defer os.RemoveAll(dstDir)
+
+ assert.NoError(t, DirCopy(srcDir, dstDir, Content, false))
+ require.NoError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(srcDir, srcPath)
+ require.NoError(t, err)
+ if relPath == "." {
+ return nil
+ }
+
+ dstPath := filepath.Join(dstDir, relPath)
+ require.NoError(t, err)
+
+ // If we add non-regular dirs and files to the test
+ // then we need to add more checks here.
+ dstFileInfo, err := os.Lstat(dstPath)
+ require.NoError(t, err)
+
+ srcFileSys := f.Sys().(*syscall.Stat_t)
+ dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t)
+
+ t.Log(relPath)
+ if srcFileSys.Dev == dstFileSys.Dev {
+ assert.NotEqual(t, srcFileSys.Ino, dstFileSys.Ino)
+ }
+ // Todo: check size, and ctim is not equal
+ /// on filesystems that have granular ctimes
+ assert.Equal(t, srcFileSys.Mode, dstFileSys.Mode)
+ assert.Equal(t, srcFileSys.Uid, dstFileSys.Uid)
+ assert.Equal(t, srcFileSys.Gid, dstFileSys.Gid)
+ assert.Equal(t, srcFileSys.Mtim, dstFileSys.Mtim)
+
+ return nil
+ }))
+}
+
+func randomMode(baseMode int) os.FileMode {
+ for i := 0; i < 7; i++ {
+ baseMode = baseMode | (1&rand.Intn(2))<<uint(i)
+ }
+ return os.FileMode(baseMode)
+}
+
+func populateSrcDir(t *testing.T, srcDir string, remainingDepth int) {
+ if remainingDepth == 0 {
+ return
+ }
+ aTime := time.Unix(rand.Int63(), 0)
+ mTime := time.Unix(rand.Int63(), 0)
+
+ for i := 0; i < 10; i++ {
+ dirName := filepath.Join(srcDir, fmt.Sprintf("srcdir-%d", i))
+ // Owner all bits set
+ require.NoError(t, os.Mkdir(dirName, randomMode(0700)))
+ populateSrcDir(t, dirName, remainingDepth-1)
+ require.NoError(t, system.Chtimes(dirName, aTime, mTime))
+ }
+
+ for i := 0; i < 10; i++ {
+ fileName := filepath.Join(srcDir, fmt.Sprintf("srcfile-%d", i))
+ // Owner read bit set
+ require.NoError(t, ioutil.WriteFile(fileName, []byte{}, randomMode(0400)))
+ require.NoError(t, system.Chtimes(fileName, aTime, mTime))
+ }
+}
+
func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) {
dir, err := ioutil.TempDir("", "docker-copy-check")
require.NoError(t, err)
@@ -65,3 +148,32 @@
require.NoError(t, err)
assert.Equal(t, buf, readBuf)
}
+
+func TestCopyHardlink(t *testing.T) {
+ var srcFile1FileInfo, srcFile2FileInfo, dstFile1FileInfo, dstFile2FileInfo unix.Stat_t
+
+ srcDir, err := ioutil.TempDir("", "srcDir")
+ require.NoError(t, err)
+ defer os.RemoveAll(srcDir)
+
+ dstDir, err := ioutil.TempDir("", "dstDir")
+ require.NoError(t, err)
+ defer os.RemoveAll(dstDir)
+
+ srcFile1 := filepath.Join(srcDir, "file1")
+ srcFile2 := filepath.Join(srcDir, "file2")
+ dstFile1 := filepath.Join(dstDir, "file1")
+ dstFile2 := filepath.Join(dstDir, "file2")
+ require.NoError(t, ioutil.WriteFile(srcFile1, []byte{}, 0777))
+ require.NoError(t, os.Link(srcFile1, srcFile2))
+
+ assert.NoError(t, DirCopy(srcDir, dstDir, Content, false))
+
+ require.NoError(t, unix.Stat(srcFile1, &srcFile1FileInfo))
+ require.NoError(t, unix.Stat(srcFile2, &srcFile2FileInfo))
+ require.Equal(t, srcFile1FileInfo.Ino, srcFile2FileInfo.Ino)
+
+ require.NoError(t, unix.Stat(dstFile1, &dstFile1FileInfo))
+ require.NoError(t, unix.Stat(dstFile2, &dstFile2FileInfo))
+ assert.Equal(t, dstFile1FileInfo.Ino, dstFile2FileInfo.Ino)
+}
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index db41f05..6659878 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -1201,7 +1201,7 @@
options = joinMountOptions(options, devices.mountOptions)
if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
- return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256)))
+ return fmt.Errorf("Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options, err, string(dmesg.Dmesg(256)))
}
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
@@ -2392,7 +2392,7 @@
options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
- return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
+ return fmt.Errorf("devmapper: Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), path, fstype, options, err, string(dmesg.Dmesg(256)))
}
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go
index d08c6dc..ceb49b9 100644
--- a/daemon/graphdriver/driver.go
+++ b/daemon/graphdriver/driver.go
@@ -1,7 +1,6 @@
package graphdriver
import (
- "errors"
"fmt"
"io"
"os"
@@ -28,13 +27,6 @@
var (
// All registered drivers
drivers map[string]InitFunc
-
- // ErrNotSupported returned when driver is not supported.
- ErrNotSupported = errors.New("driver not supported")
- // ErrPrerequisites returned when driver does not meet prerequisites.
- ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
- // ErrIncompatibleFS returned when file system is not supported.
- ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
)
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
@@ -248,7 +240,7 @@
for _, name := range list {
driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps)
if err != nil {
- if isDriverNotSupported(err) {
+ if IsDriverNotSupported(err) {
continue
}
return nil, err
@@ -260,7 +252,7 @@
for name, initFunc := range drivers {
driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps)
if err != nil {
- if isDriverNotSupported(err) {
+ if IsDriverNotSupported(err) {
continue
}
return nil, err
@@ -270,12 +262,6 @@
return nil, fmt.Errorf("No supported storage backend found")
}
-// isDriverNotSupported returns true if the error initializing
-// the graph driver is a non-supported error.
-func isDriverNotSupported(err error) bool {
- return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS
-}
-
// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers
func scanPriorDrivers(root string) map[string]bool {
driversMap := make(map[string]bool)
@@ -291,6 +277,18 @@
return driversMap
}
+// IsInitialized checks if the driver's home-directory exists and is non-empty.
+func IsInitialized(driverHome string) bool {
+ _, err := os.Stat(driverHome)
+ if os.IsNotExist(err) {
+ return false
+ }
+ if err != nil {
+ logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err)
+ }
+ return !isEmptyDir(driverHome)
+}
+
// isEmptyDir checks if a directory is empty. It is used to check if prior
// storage-driver directories exist. If an error occurs, it also assumes the
// directory is not empty (which preserves the behavior _before_ this check
diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go
index aa3cfc9..d2d7c9f 100644
--- a/daemon/graphdriver/driver_linux.go
+++ b/daemon/graphdriver/driver_linux.go
@@ -1,10 +1,6 @@
-// +build linux
-
package graphdriver
import (
- "path/filepath"
-
"github.com/docker/docker/pkg/mount"
"golang.org/x/sys/unix"
)
@@ -82,7 +78,7 @@
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf unix.Statfs_t
- if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
+ if err := unix.Statfs(rootpath, &buf); err != nil {
return 0, err
}
return FsMagic(buf.Type), nil
diff --git a/daemon/graphdriver/errors.go b/daemon/graphdriver/errors.go
new file mode 100644
index 0000000..dd52ee4
--- /dev/null
+++ b/daemon/graphdriver/errors.go
@@ -0,0 +1,36 @@
+package graphdriver
+
+const (
+ // ErrNotSupported returned when driver is not supported.
+ ErrNotSupported NotSupportedError = "driver not supported"
+ // ErrPrerequisites returned when driver does not meet prerequisites.
+ ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)"
+ // ErrIncompatibleFS returned when file system is not supported.
+ ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver"
+)
+
+// ErrUnSupported signals that the graph-driver is not supported on the current configuration
+type ErrUnSupported interface {
+ NotSupported()
+}
+
+// NotSupportedError signals that the graph-driver is not supported on the current configuration
+type NotSupportedError string
+
+func (e NotSupportedError) Error() string {
+ return string(e)
+}
+
+// NotSupported signals that a graph-driver is not supported.
+func (e NotSupportedError) NotSupported() {}
+
+// IsDriverNotSupported returns true if the error initializing
+// the graph driver is a non-supported error.
+func IsDriverNotSupported(err error) bool {
+ switch err.(type) {
+ case ErrUnSupported:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/daemon/graphdriver/graphtest/graphtest_unix.go b/daemon/graphdriver/graphtest/graphtest_unix.go
index c25d482..da9443e 100644
--- a/daemon/graphdriver/graphtest/graphtest_unix.go
+++ b/daemon/graphdriver/graphtest/graphtest_unix.go
@@ -42,7 +42,7 @@
d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root})
if err != nil {
t.Logf("graphdriver: %v\n", err)
- if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS {
+ if graphdriver.IsDriverNotSupported(err) {
t.Skipf("Driver %s not supported", name)
}
t.Fatal(err)
diff --git a/daemon/graphdriver/lcow/lcow.go b/daemon/graphdriver/lcow/lcow.go
index 5ec8b8b..058c69f 100644
--- a/daemon/graphdriver/lcow/lcow.go
+++ b/daemon/graphdriver/lcow/lcow.go
@@ -824,7 +824,7 @@
return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
}
- // TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues.
+ // TODO @jhowardmsft - the retries are temporary to overcome platform reliability issues.
// Obviously this will be removed as platform bugs are fixed.
retries := 0
for {
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index 83a1677..bcb21d8 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -10,6 +10,7 @@
"os"
"os/exec"
"path"
+ "path/filepath"
"strconv"
"github.com/docker/docker/daemon/graphdriver"
@@ -119,7 +120,16 @@
return nil, graphdriver.ErrNotSupported
}
- fsMagic, err := graphdriver.GetFSMagic(home)
+ // Perform feature detection on /var/lib/docker/overlay if it's an existing directory.
+ // This covers situations where /var/lib/docker/overlay is a mount, and on a different
+ // filesystem than /var/lib/docker.
+ // If the path does not exist, fall back to using /var/lib/docker for feature detection.
+ testdir := home
+ if _, err := os.Stat(testdir); os.IsNotExist(err) {
+ testdir = filepath.Dir(testdir)
+ }
+
+ fsMagic, err := graphdriver.GetFSMagic(testdir)
if err != nil {
return nil, err
}
@@ -128,11 +138,23 @@
}
switch fsMagic {
- case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs:
+ case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs:
logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
}
+ supportsDType, err := fsutils.SupportsDType(testdir)
+ if err != nil {
+ return nil, err
+ }
+ if !supportsDType {
+ if !graphdriver.IsInitialized(home) {
+ return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
+ }
+ // allow running without d_type only for existing setups (#27443)
+ logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
+ }
+
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
@@ -146,15 +168,6 @@
return nil, err
}
- supportsDType, err := fsutils.SupportsDType(home)
- if err != nil {
- return nil, err
- }
- if !supportsDType {
- // not a fatal error until v17.12 (#27443)
- logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
- }
-
d := &Driver{
home: home,
uidMaps: uidMaps,
@@ -327,7 +340,7 @@
return err
}
- return copy.DirCopy(parentUpperDir, upperDir, copy.Content)
+ return copy.DirCopy(parentUpperDir, upperDir, copy.Content, true)
}
func (d *Driver) dir(id string) string {
@@ -466,7 +479,7 @@
}
}()
- if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink); err != nil {
+ if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink, true); err != nil {
return 0, err
}
diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go
index e660d80..4b596ae 100644
--- a/daemon/graphdriver/overlay2/overlay.go
+++ b/daemon/graphdriver/overlay2/overlay.go
@@ -31,6 +31,7 @@
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/system"
"github.com/docker/go-units"
+ rsystem "github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@@ -136,7 +137,16 @@
return nil, err
}
- fsMagic, err := graphdriver.GetFSMagic(home)
+ // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory.
+ // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different
+ // filesystem than /var/lib/docker.
+ // If the path does not exist, fall back to using /var/lib/docker for feature detection.
+ testdir := home
+ if _, err := os.Stat(testdir); os.IsNotExist(err) {
+ testdir = filepath.Dir(testdir)
+ }
+
+ fsMagic, err := graphdriver.GetFSMagic(testdir)
if err != nil {
return nil, err
}
@@ -144,9 +154,8 @@
backingFs = fsName
}
- // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs
switch fsMagic {
- case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs:
+ case graphdriver.FsMagicAufs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs:
logrus.Errorf("'overlay2' is not supported over %s", backingFs)
return nil, graphdriver.ErrIncompatibleFS
case graphdriver.FsMagicBtrfs:
@@ -165,12 +174,23 @@
if opts.overrideKernelCheck {
logrus.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update")
} else {
- if err := supportsMultipleLowerDir(filepath.Dir(home)); err != nil {
+ if err := supportsMultipleLowerDir(testdir); err != nil {
logrus.Debugf("Multiple lower dirs not supported: %v", err)
return nil, graphdriver.ErrNotSupported
}
}
}
+ supportsDType, err := fsutils.SupportsDType(testdir)
+ if err != nil {
+ return nil, err
+ }
+ if !supportsDType {
+ if !graphdriver.IsInitialized(home) {
+ return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs)
+ }
+ // allow running without d_type only for existing setups (#27443)
+ logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs))
+ }
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
@@ -185,15 +205,6 @@
return nil, err
}
- supportsDType, err := fsutils.SupportsDType(home)
- if err != nil {
- return nil, err
- }
- if !supportsDType {
- // not a fatal error until v17.12 (#27443)
- logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs))
- }
-
d := &Driver{
home: home,
uidMaps: uidMaps,
@@ -694,6 +705,7 @@
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
+ InUserNS: rsystem.RunningInUserNS(),
}); err != nil {
return 0, err
}
diff --git a/daemon/graphdriver/overlayutils/overlayutils.go b/daemon/graphdriver/overlayutils/overlayutils.go
index 7491c34..9f71c60 100644
--- a/daemon/graphdriver/overlayutils/overlayutils.go
+++ b/daemon/graphdriver/overlayutils/overlayutils.go
@@ -3,8 +3,9 @@
package overlayutils
import (
- "errors"
"fmt"
+
+ "github.com/docker/docker/daemon/graphdriver"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
@@ -13,6 +14,7 @@
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
- msg += " Running without d_type support will no longer be supported in Docker 17.12."
- return errors.New(msg)
+ msg += " Backing filesystems without d_type support are not supported."
+
+ return graphdriver.NotSupportedError(msg)
}
diff --git a/daemon/graphdriver/quota/projectquota.go b/daemon/graphdriver/quota/projectquota.go
index e25965b..8efe5cd 100644
--- a/daemon/graphdriver/quota/projectquota.go
+++ b/daemon/graphdriver/quota/projectquota.go
@@ -350,11 +350,17 @@
backingFsBlockDev := path.Join(home, "backingFsBlockDev")
// Re-create just in case someone copied the home directory over to a new device
unix.Unlink(backingFsBlockDev)
- if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil {
+ err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev))
+ switch err {
+ case nil:
+ return backingFsBlockDev, nil
+
+ case unix.ENOSYS:
+ return "", ErrQuotaNotSupported
+
+ default:
return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err)
}
-
- return backingFsBlockDev, nil
}
func hasQuotaSupport(backingFsBlockDev string) (bool, error) {
diff --git a/daemon/graphdriver/vfs/copy_linux.go b/daemon/graphdriver/vfs/copy_linux.go
new file mode 100644
index 0000000..1a63a11
--- /dev/null
+++ b/daemon/graphdriver/vfs/copy_linux.go
@@ -0,0 +1,7 @@
+package vfs
+
+import "github.com/docker/docker/daemon/graphdriver/copy"
+
+func dirCopy(srcDir, dstDir string) error {
+ return copy.DirCopy(srcDir, dstDir, copy.Content, false)
+}
diff --git a/daemon/graphdriver/vfs/copy_unsupported.go b/daemon/graphdriver/vfs/copy_unsupported.go
new file mode 100644
index 0000000..fcc4b69
--- /dev/null
+++ b/daemon/graphdriver/vfs/copy_unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package vfs
+
+import "github.com/docker/docker/pkg/chrootarchive"
+
+func dirCopy(srcDir, dstDir string) error {
+ return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
+}
diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go
index 610476f..5f9cad4 100644
--- a/daemon/graphdriver/vfs/driver.go
+++ b/daemon/graphdriver/vfs/driver.go
@@ -7,7 +7,6 @@
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/daemon/graphdriver/quota"
- "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
@@ -16,8 +15,8 @@
)
var (
- // CopyWithTar defines the copy method to use.
- CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+ // CopyDir defines the copy method to use.
+ CopyDir = dirCopy
)
func init() {
@@ -36,9 +35,7 @@
return nil, err
}
- if err := setupDriverQuota(d); err != nil {
- return nil, err
- }
+ setupDriverQuota(d)
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
}
@@ -133,7 +130,7 @@
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
}
- return CopyWithTar(parentDir.Path(), dir)
+ return CopyDir(parentDir.Path(), dir)
}
func (d *Driver) dir(id string) string {
diff --git a/daemon/graphdriver/vfs/quota_linux.go b/daemon/graphdriver/vfs/quota_linux.go
index 032c15b..f871bef 100644
--- a/daemon/graphdriver/vfs/quota_linux.go
+++ b/daemon/graphdriver/vfs/quota_linux.go
@@ -1,21 +1,20 @@
-// +build linux
-
package vfs
-import "github.com/docker/docker/daemon/graphdriver/quota"
+import (
+ "github.com/docker/docker/daemon/graphdriver/quota"
+ "github.com/sirupsen/logrus"
+)
type driverQuota struct {
quotaCtl *quota.Control
}
-func setupDriverQuota(driver *Driver) error {
+func setupDriverQuota(driver *Driver) {
if quotaCtl, err := quota.NewControl(driver.home); err == nil {
driver.quotaCtl = quotaCtl
} else if err != quota.ErrQuotaNotSupported {
- return err
+ logrus.Warnf("Unable to setup quota: %v\n", err)
}
-
- return nil
}
func (d *Driver) setupQuota(dir string, size uint64) error {
diff --git a/daemon/health.go b/daemon/health.go
index f40c0dd..9acf190 100644
--- a/daemon/health.go
+++ b/daemon/health.go
@@ -80,6 +80,7 @@
execConfig.Tty = false
execConfig.Privileged = false
execConfig.User = cntr.Config.User
+ execConfig.WorkingDir = cntr.Config.WorkingDir
linkedEnv, err := d.setupLinkedContainers(cntr)
if err != nil {
diff --git a/daemon/image.go b/daemon/image.go
index 6e90429..486f2a2 100644
--- a/daemon/image.go
+++ b/daemon/image.go
@@ -6,7 +6,6 @@
"github.com/docker/distribution/reference"
"github.com/docker/docker/image"
- "github.com/docker/docker/pkg/stringid"
)
// errImageDoesNotExist is error returned when no image can be found for a reference.
@@ -59,21 +58,6 @@
return id, imageOS, nil
}
- // deprecated: repo:shortid https://github.com/docker/docker/pull/799
- if tagged, ok := namedRef.(reference.Tagged); ok {
- if tag := tagged.Tag(); stringid.IsShortID(stringid.TruncateID(tag)) {
- for platform := range daemon.stores {
- if id, err := daemon.stores[platform].imageStore.Search(tag); err == nil {
- for _, storeRef := range daemon.referenceStore.References(id.Digest()) {
- if storeRef.Name() == namedRef.Name() {
- return id, platform, nil
- }
- }
- }
- }
- }
- }
-
// Search based on ID
for os := range daemon.stores {
if id, err := daemon.stores[os].imageStore.Search(refOrID); err == nil {
diff --git a/daemon/info.go b/daemon/info.go
index b14e7ba..bbb027e 100644
--- a/daemon/info.go
+++ b/daemon/info.go
@@ -154,24 +154,46 @@
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
- v := types.Version{
- Version: dockerversion.Version,
- GitCommit: dockerversion.GitCommit,
- MinAPIVersion: api.MinVersion,
- GoVersion: runtime.Version(),
- Os: runtime.GOOS,
- Arch: runtime.GOARCH,
- BuildTime: dockerversion.BuildTime,
- Experimental: daemon.configStore.Experimental,
- }
-
kernelVersion := "<unknown>"
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
- v.KernelVersion = kernelVersion
+
+ v := types.Version{
+ Components: []types.ComponentVersion{
+ {
+ Name: "Engine",
+ Version: dockerversion.Version,
+ Details: map[string]string{
+ "GitCommit": dockerversion.GitCommit,
+ "ApiVersion": api.DefaultVersion,
+ "MinAPIVersion": api.MinVersion,
+ "GoVersion": runtime.Version(),
+ "Os": runtime.GOOS,
+ "Arch": runtime.GOARCH,
+ "BuildTime": dockerversion.BuildTime,
+ "KernelVersion": kernelVersion,
+ "Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
+ },
+ },
+ },
+
+ // Populate deprecated fields for older clients
+ Version: dockerversion.Version,
+ GitCommit: dockerversion.GitCommit,
+ APIVersion: api.DefaultVersion,
+ MinAPIVersion: api.MinVersion,
+ GoVersion: runtime.Version(),
+ Os: runtime.GOOS,
+ Arch: runtime.GOARCH,
+ BuildTime: dockerversion.BuildTime,
+ KernelVersion: kernelVersion,
+ Experimental: daemon.configStore.Experimental,
+ }
+
+ v.Platform.Name = dockerversion.PlatformName
return v
}
diff --git a/daemon/initlayer/setup_windows.go b/daemon/initlayer/setup_windows.go
index b47563e..ff78a4d 100644
--- a/daemon/initlayer/setup_windows.go
+++ b/daemon/initlayer/setup_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package initlayer
import (
diff --git a/daemon/inspect_unix.go b/daemon/inspect_linux.go
similarity index 98%
rename from daemon/inspect_unix.go
rename to daemon/inspect_linux.go
index f073695..8d334dc 100644
--- a/daemon/inspect_unix.go
+++ b/daemon/inspect_linux.go
@@ -1,5 +1,3 @@
-// +build !windows
-
package daemon
import (
diff --git a/daemon/inspect_solaris.go b/daemon/inspect_solaris.go
deleted file mode 100644
index 0b275c1..0000000
--- a/daemon/inspect_solaris.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/backend"
- "github.com/docker/docker/api/types/versions/v1p19"
- "github.com/docker/docker/container"
- "github.com/docker/docker/daemon/exec"
-)
-
-// This sets platform-specific fields
-func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase {
- return contJSONBase
-}
-
-// containerInspectPre120 get containers for pre 1.20 APIs.
-func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) {
- return &v1p19.ContainerJSON{}, nil
-}
-
-func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig {
- return &backend.ExecProcessConfig{
- Tty: e.Tty,
- Entrypoint: e.Entrypoint,
- Arguments: e.Args,
- }
-}
diff --git a/daemon/kill.go b/daemon/kill.go
index 1292f86..5cde0d7 100644
--- a/daemon/kill.go
+++ b/daemon/kill.go
@@ -4,10 +4,10 @@
"context"
"fmt"
"runtime"
- "strings"
"syscall"
"time"
+ "github.com/docker/docker/api/errdefs"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/pkg/signal"
@@ -97,15 +97,11 @@
}
if err := daemon.kill(container, sig); err != nil {
- err = errors.Wrapf(err, "Cannot kill container %s", container.ID)
- // if container or process not exists, ignore the error
- // TODO: we shouldn't have to parse error strings from containerd
- if strings.Contains(err.Error(), "container not found") ||
- strings.Contains(err.Error(), "no such process") {
- logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error())
+ if errdefs.IsNotFound(err) {
unpause = false
+ logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'")
} else {
- return err
+ return errors.Wrapf(err, "Cannot kill container %s", container.ID)
}
}
@@ -171,7 +167,7 @@
// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error.
func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error {
err := daemon.killWithSignal(container, sig)
- if err == syscall.ESRCH {
+ if errdefs.IsNotFound(err) {
e := errNoSuchProcess{container.GetPID(), sig}
logrus.Debug(e)
return e
diff --git a/daemon/listeners/listeners_unix.go b/daemon/listeners/listeners_linux.go
similarity index 98%
rename from daemon/listeners/listeners_unix.go
rename to daemon/listeners/listeners_linux.go
index 3a7c0f8..7e0aaa2 100644
--- a/daemon/listeners/listeners_unix.go
+++ b/daemon/listeners/listeners_linux.go
@@ -1,5 +1,3 @@
-// +build !windows
-
package listeners
import (
diff --git a/daemon/listeners/listeners_solaris.go b/daemon/listeners/listeners_solaris.go
deleted file mode 100644
index ee1bd0f..0000000
--- a/daemon/listeners/listeners_solaris.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package listeners
-
-import (
- "crypto/tls"
- "fmt"
- "net"
- "os"
-
- "github.com/docker/go-connections/sockets"
- "github.com/sirupsen/logrus"
-)
-
-// Init creates new listeners for the server.
-func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) {
- switch proto {
- case "tcp":
- l, err := sockets.NewTCPSocket(addr, tlsConfig)
- if err != nil {
- return nil, err
- }
- ls = append(ls, l)
- case "unix":
- gid, err := lookupGID(socketGroup)
- if err != nil {
- if socketGroup != "" {
- if socketGroup != defaultSocketGroup {
- return nil, err
- }
- logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err)
- }
- gid = os.Getgid()
- }
- l, err := sockets.NewUnixSocket(addr, gid)
- if err != nil {
- return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err)
- }
- ls = append(ls, l)
- default:
- return nil, fmt.Errorf("Invalid protocol format: %q", proto)
- }
-
- return
-}
diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go
index 4ea9420..25dd215 100644
--- a/daemon/logger/awslogs/cloudwatchlogs.go
+++ b/daemon/logger/awslogs/cloudwatchlogs.go
@@ -95,6 +95,17 @@
}
}
+// eventBatch holds the events that are batched for submission and the
+// associated data about it.
+//
+// Warning: this type is not threadsafe and must not be used
+// concurrently. This type is expected to be consumed in a single go
+// routine and never concurrently.
+type eventBatch struct {
+ batch []wrappedEvent
+ bytes int
+}
+
// New creates an awslogs logger using the configuration passed in on the
// context. Supported context configuration variables are awslogs-region,
// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern
@@ -389,32 +400,32 @@
// Logs, the processEvents method is called. If a multiline pattern is not
// configured, log events are submitted to the processEvents method immediately.
func (l *logStream) collectBatch() {
- timer := newTicker(batchPublishFrequency)
- var events []wrappedEvent
+ ticker := newTicker(batchPublishFrequency)
var eventBuffer []byte
var eventBufferTimestamp int64
+ var batch = newEventBatch()
for {
select {
- case t := <-timer.C:
+ case t := <-ticker.C:
// If event buffer is older than batch publish frequency flush the event buffer
if eventBufferTimestamp > 0 && len(eventBuffer) > 0 {
eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp
eventBufferExpired := eventBufferAge > int64(batchPublishFrequency)/int64(time.Millisecond)
eventBufferNegative := eventBufferAge < 0
if eventBufferExpired || eventBufferNegative {
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
}
}
- l.publishBatch(events)
- events = events[:0]
+ l.publishBatch(batch)
+ batch.reset()
case msg, more := <-l.messages:
if !more {
// Flush event buffer and release resources
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBuffer = eventBuffer[:0]
- l.publishBatch(events)
- events = events[:0]
+ l.publishBatch(batch)
+ batch.reset()
return
}
if eventBufferTimestamp == 0 {
@@ -425,7 +436,7 @@
if l.multilinePattern.Match(unprocessedLine) || len(eventBuffer)+len(unprocessedLine) > maximumBytesPerEvent {
// This is a new log event or we will exceed max bytes per event
// so flush the current eventBuffer to events and reset timestamp
- events = l.processEvent(events, eventBuffer, eventBufferTimestamp)
+ l.processEvent(batch, eventBuffer, eventBufferTimestamp)
eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond)
eventBuffer = eventBuffer[:0]
}
@@ -434,7 +445,7 @@
eventBuffer = append(eventBuffer, processedLine...)
logger.PutMessage(msg)
} else {
- events = l.processEvent(events, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
+ l.processEvent(batch, unprocessedLine, msg.Timestamp.UnixNano()/int64(time.Millisecond))
logger.PutMessage(msg)
}
}
@@ -450,8 +461,7 @@
// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event
// byte overhead (defined in perEventBytes) which is accounted for in split- and
// batch-calculations.
-func (l *logStream) processEvent(events []wrappedEvent, unprocessedLine []byte, timestamp int64) []wrappedEvent {
- bytes := 0
+func (l *logStream) processEvent(batch *eventBatch, unprocessedLine []byte, timestamp int64) {
for len(unprocessedLine) > 0 {
// Split line length so it does not exceed the maximum
lineBytes := len(unprocessedLine)
@@ -459,38 +469,33 @@
lineBytes = maximumBytesPerEvent
}
line := unprocessedLine[:lineBytes]
- unprocessedLine = unprocessedLine[lineBytes:]
- if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) {
- // Publish an existing batch if it's already over the maximum number of events or if adding this
- // event would push it over the maximum number of total bytes.
- l.publishBatch(events)
- events = events[:0]
- bytes = 0
- }
- events = append(events, wrappedEvent{
+
+ event := wrappedEvent{
inputLogEvent: &cloudwatchlogs.InputLogEvent{
Message: aws.String(string(line)),
Timestamp: aws.Int64(timestamp),
},
- insertOrder: len(events),
- })
- bytes += (lineBytes + perEventBytes)
+ insertOrder: batch.count(),
+ }
+
+ added := batch.add(event, lineBytes)
+ if added {
+ unprocessedLine = unprocessedLine[lineBytes:]
+ } else {
+ l.publishBatch(batch)
+ batch.reset()
+ }
}
- return events
}
// publishBatch calls PutLogEvents for a given set of InputLogEvents,
// accounting for sequencing requirements (each request must reference the
// sequence token returned by the previous request).
-func (l *logStream) publishBatch(events []wrappedEvent) {
- if len(events) == 0 {
+func (l *logStream) publishBatch(batch *eventBatch) {
+ if batch.isEmpty() {
return
}
-
- // events in a batch must be sorted by timestamp
- // see http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
- sort.Sort(byTimestamp(events))
- cwEvents := unwrapEvents(events)
+ cwEvents := unwrapEvents(batch.events())
nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken)
@@ -615,3 +620,70 @@
}
return cwEvents
}
+
+func newEventBatch() *eventBatch {
+ return &eventBatch{
+ batch: make([]wrappedEvent, 0),
+ bytes: 0,
+ }
+}
+
+// events returns a slice of wrappedEvents sorted in order of their
+// timestamps and then by their insertion order (see `byTimestamp`).
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) events() []wrappedEvent {
+ sort.Sort(byTimestamp(b.batch))
+ return b.batch
+}
+
+// add adds an event to the batch of events accounting for the
+// necessary overhead for an event to be logged. An error will be
+// returned if the event cannot be added to the batch due to service
+// limits.
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) add(event wrappedEvent, size int) bool {
+ addBytes := size + perEventBytes
+
+ // verify we are still within service limits
+ switch {
+ case len(b.batch)+1 > maximumLogEventsPerPut:
+ return false
+ case b.bytes+addBytes > maximumBytesPerPut:
+ return false
+ }
+
+ b.bytes += addBytes
+ b.batch = append(b.batch, event)
+
+ return true
+}
+
+// count is the number of batched events. Warning: this method
+// is not threadsafe and must not be used concurrently.
+func (b *eventBatch) count() int {
+ return len(b.batch)
+}
+
+// size is the total number of bytes that the batch represents.
+//
+// Warning: this method is not threadsafe and must not be used
+// concurrently.
+func (b *eventBatch) size() int {
+ return b.bytes
+}
+
+func (b *eventBatch) isEmpty() bool {
+ zeroEvents := b.count() == 0
+ zeroSize := b.size() == 0
+ return zeroEvents && zeroSize
+}
+
+// reset prepares the batch for reuse.
+func (b *eventBatch) reset() {
+ b.bytes = 0
+ b.batch = b.batch[:0]
+}
diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go
index 7ebc5de..67ea474 100644
--- a/daemon/logger/awslogs/cloudwatchlogs_test.go
+++ b/daemon/logger/awslogs/cloudwatchlogs_test.go
@@ -49,6 +49,15 @@
}
}
+func testEventBatch(events []wrappedEvent) *eventBatch {
+ batch := newEventBatch()
+ for _, event := range events {
+ eventlen := len([]byte(*event.inputLogEvent.Message))
+ batch.add(event, eventlen)
+ }
+ return batch
+}
+
func TestNewAWSLogsClientUserAgentHandler(t *testing.T) {
info := logger.Info{
Config: map[string]string{
@@ -212,7 +221,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -257,7 +266,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -291,7 +300,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -354,7 +363,7 @@
},
}
- stream.publishBatch(events)
+ stream.publishBatch(testEventBatch(events))
if stream.sequenceToken == nil {
t.Fatal("Expected non-nil sequenceToken")
}
@@ -859,7 +868,8 @@
}
func TestCollectBatchMaxTotalBytes(t *testing.T) {
- mockClient := newMockClientBuffered(1)
+ expectedPuts := 2
+ mockClient := newMockClientBuffered(expectedPuts)
stream := &logStream{
client: mockClient,
logGroupName: groupName,
@@ -867,11 +877,14 @@
sequenceToken: aws.String(sequenceToken),
messages: make(chan *logger.Message),
}
- mockClient.putLogEventsResult <- &putLogEventsResult{
- successResult: &cloudwatchlogs.PutLogEventsOutput{
- NextSequenceToken: aws.String(nextSequenceToken),
- },
+ for i := 0; i < expectedPuts; i++ {
+ mockClient.putLogEventsResult <- &putLogEventsResult{
+ successResult: &cloudwatchlogs.PutLogEventsOutput{
+ NextSequenceToken: aws.String(nextSequenceToken),
+ },
+ }
}
+
var ticks = make(chan time.Time)
newTicker = func(_ time.Duration) *time.Ticker {
return &time.Ticker{
@@ -881,32 +894,57 @@
go stream.collectBatch()
- longline := strings.Repeat("A", maximumBytesPerPut)
+ numPayloads := maximumBytesPerPut / (maximumBytesPerEvent + perEventBytes)
+ // maxline is the maximum line that could be submitted after
+ // accounting for its overhead.
+ maxline := strings.Repeat("A", maximumBytesPerPut-(perEventBytes*numPayloads))
+ // This will be split and batched up to the `maximumBytesPerPut'
+ // (+/- `maximumBytesPerEvent'). This /should/ be aligned, but
+ // should also tolerate an offset within that range.
stream.Log(&logger.Message{
- Line: []byte(longline + "B"),
+ Line: []byte(maxline[:len(maxline)/2]),
+ Timestamp: time.Time{},
+ })
+ stream.Log(&logger.Message{
+ Line: []byte(maxline[len(maxline)/2:]),
+ Timestamp: time.Time{},
+ })
+ stream.Log(&logger.Message{
+ Line: []byte("B"),
Timestamp: time.Time{},
})
- // no ticks
+ // no ticks, guarantee batch by size (and chan close)
stream.Close()
argument := <-mockClient.putLogEventsArgument
if argument == nil {
t.Fatal("Expected non-nil PutLogEventsInput")
}
- bytes := 0
+
+ // Should total to the maximum allowed bytes.
+ eventBytes := 0
for _, event := range argument.LogEvents {
- bytes += len(*event.Message)
+ eventBytes += len(*event.Message)
}
- if bytes > maximumBytesPerPut {
- t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes)
+ eventsOverhead := len(argument.LogEvents) * perEventBytes
+ payloadTotal := eventBytes + eventsOverhead
+ // lowestMaxBatch allows the payload to be offset if the messages
+ // don't lend themselves to align with the maximum event size.
+ lowestMaxBatch := maximumBytesPerPut - maximumBytesPerEvent
+
+ if payloadTotal > maximumBytesPerPut {
+ t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, payloadTotal)
+ }
+ if payloadTotal < lowestMaxBatch {
+ t.Errorf("Batch to be no less than %d but was %d", lowestMaxBatch, payloadTotal)
}
argument = <-mockClient.putLogEventsArgument
if len(argument.LogEvents) != 1 {
t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents))
}
- message := *argument.LogEvents[0].Message
+ message := *argument.LogEvents[len(argument.LogEvents)-1].Message
if message[len(message)-1:] != "B" {
t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:])
}
diff --git a/daemon/logger/awslogs/cwlogsiface_mock_test.go b/daemon/logger/awslogs/cwlogsiface_mock_test.go
index 82bb34b..d0a2eba 100644
--- a/daemon/logger/awslogs/cwlogsiface_mock_test.go
+++ b/daemon/logger/awslogs/cwlogsiface_mock_test.go
@@ -1,6 +1,10 @@
package awslogs
-import "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
+)
type mockcwlogsclient struct {
createLogGroupArgument chan *cloudwatchlogs.CreateLogGroupInput
@@ -67,7 +71,30 @@
LogGroupName: input.LogGroupName,
LogStreamName: input.LogStreamName,
}
+
+ // Intended mock output
output := <-m.putLogEventsResult
+
+ // Checked enforced limits in mock
+ totalBytes := 0
+ for _, evt := range events {
+ if evt.Message == nil {
+ continue
+ }
+ eventBytes := len([]byte(*evt.Message))
+ if eventBytes > maximumBytesPerEvent {
+ // exceeded per event message size limits
+ return nil, fmt.Errorf("maximum bytes per event exceeded: Event too large %d, max allowed: %d", eventBytes, maximumBytesPerEvent)
+ }
+ // total event bytes including overhead
+ totalBytes += eventBytes + perEventBytes
+ }
+
+ if totalBytes > maximumBytesPerPut {
+ // exceeded per put maximum size limit
+ return nil, fmt.Errorf("maximum bytes per put exceeded: Upload too large %d, max allowed: %d", totalBytes, maximumBytesPerPut)
+ }
+
return output.successResult, output.errorResult
}
diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go
index 6a0653e..cde36d4 100644
--- a/daemon/logger/fluentd/fluentd.go
+++ b/daemon/logger/fluentd/fluentd.go
@@ -48,11 +48,12 @@
defaultRetryWait = 1000
defaultMaxRetries = math.MaxInt32
- addressKey = "fluentd-address"
- bufferLimitKey = "fluentd-buffer-limit"
- retryWaitKey = "fluentd-retry-wait"
- maxRetriesKey = "fluentd-max-retries"
- asyncConnectKey = "fluentd-async-connect"
+ addressKey = "fluentd-address"
+ bufferLimitKey = "fluentd-buffer-limit"
+ retryWaitKey = "fluentd-retry-wait"
+ maxRetriesKey = "fluentd-max-retries"
+ asyncConnectKey = "fluentd-async-connect"
+ subSecondPrecisionKey = "fluentd-sub-second-precision"
)
func init() {
@@ -117,15 +118,23 @@
}
}
+ subSecondPrecision := false
+ if info.Config[subSecondPrecisionKey] != "" {
+ if subSecondPrecision, err = strconv.ParseBool(info.Config[subSecondPrecisionKey]); err != nil {
+ return nil, err
+ }
+ }
+
fluentConfig := fluent.Config{
- FluentPort: loc.port,
- FluentHost: loc.host,
- FluentNetwork: loc.protocol,
- FluentSocketPath: loc.path,
- BufferLimit: bufferLimit,
- RetryWait: retryWait,
- MaxRetry: maxRetries,
- AsyncConnect: asyncConnect,
+ FluentPort: loc.port,
+ FluentHost: loc.host,
+ FluentNetwork: loc.protocol,
+ FluentSocketPath: loc.path,
+ BufferLimit: bufferLimit,
+ RetryWait: retryWait,
+ MaxRetry: maxRetries,
+ AsyncConnect: asyncConnect,
+ SubSecondPrecision: subSecondPrecision,
}
logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig).
@@ -183,6 +192,7 @@
case retryWaitKey:
case maxRetriesKey:
case asyncConnectKey:
+ case subSecondPrecisionKey:
// Accepted
default:
return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key)
diff --git a/daemon/logger/gcplogs/gcplogging_linux.go b/daemon/logger/gcplogs/gcplogging_linux.go
index 8917bdd..41a0936 100644
--- a/daemon/logger/gcplogs/gcplogging_linux.go
+++ b/daemon/logger/gcplogs/gcplogging_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package gcplogs
import (
diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go
deleted file mode 100644
index 0995758..0000000
--- a/daemon/monitor_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/container"
- "github.com/docker/docker/libcontainerd"
-)
-
-// postRunProcessing perfoms any processing needed on the container after it has stopped.
-func (daemon *Daemon) postRunProcessing(_ *container.Container, _ libcontainerd.EventInfo) error {
- return nil
-}
diff --git a/daemon/network.go b/daemon/network.go
index 0420caa..573901e 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -3,6 +3,7 @@
import (
"fmt"
"net"
+ "runtime"
"sort"
"strings"
"sync"
@@ -183,21 +184,14 @@
// Otherwise continue down the call to create or recreate sandbox.
}
- n, err := daemon.GetNetworkByID(create.ID)
+ _, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
}
-
- if err = daemon.createLoadBalancerSandbox("ingress", create.ID, ip, n, libnetwork.OptionIngress()); err != nil {
- logrus.Errorf("Failed creating load balancer sandbox for ingress network: %v", err)
- }
}
func (daemon *Daemon) releaseIngress(id string) {
controller := daemon.netController
- if err := controller.SandboxDestroy("ingress-sbox"); err != nil {
- logrus.Errorf("Failed to delete ingress sandbox: %v", err)
- }
if id == "" {
return
@@ -209,13 +203,6 @@
return
}
- for _, ep := range n.Endpoints() {
- if err := ep.Delete(true); err != nil {
- logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
- return
- }
- }
-
if err := n.Delete(); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return
@@ -270,34 +257,6 @@
return resp, err
}
-func (daemon *Daemon) createLoadBalancerSandbox(prefix, id string, ip net.IP, n libnetwork.Network, options ...libnetwork.SandboxOption) error {
- c := daemon.netController
- sandboxName := prefix + "-sbox"
- sb, err := c.NewSandbox(sandboxName, options...)
- if err != nil {
- if _, ok := err.(networktypes.ForbiddenError); !ok {
- return errors.Wrapf(err, "Failed creating %s sandbox", sandboxName)
- }
- return nil
- }
-
- endpointName := prefix + "-endpoint"
- ep, err := n.CreateEndpoint(endpointName, libnetwork.CreateOptionIpam(ip, nil, nil, nil), libnetwork.CreateOptionLoadBalancer())
- if err != nil {
- return errors.Wrapf(err, "Failed creating %s in sandbox %s", endpointName, sandboxName)
- }
-
- if err := ep.Join(sb, nil); err != nil {
- return errors.Wrapf(err, "Failed joining %s to sandbox %s", endpointName, sandboxName)
- }
-
- if err := sb.EnableService(); err != nil {
- return errors.Wrapf(err, "Failed enabling service in %s sandbox", sandboxName)
- }
-
- return nil
-}
-
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
@@ -360,6 +319,15 @@
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
}
+ if agent && driver == "overlay" && (create.Ingress || runtime.GOOS == "windows") {
+ nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
+ if !exists {
+ return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
+ }
+
+ nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
+ }
+
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
@@ -375,18 +343,6 @@
}
daemon.LogNetworkEvent(n, "create")
- if agent && !n.Info().Ingress() && n.Type() == "overlay" {
- nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
- if !exists {
- return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
- }
-
- if err := daemon.createLoadBalancerSandbox(create.Name, id, nodeIP, n); err != nil {
- return nil, err
- }
-
- }
-
return &types.NetworkCreateResponse{
ID: n.ID(),
Warning: warning,
@@ -517,43 +473,16 @@
return daemon.deleteNetwork(networkID, false)
}
-func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) {
- controller := daemon.netController
-
- //The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint")
- endpoints := n.Endpoints()
- if len(endpoints) == 1 {
- sandboxName := n.Name() + "-sbox"
-
- info := endpoints[0].Info()
- if info != nil {
- sb := info.Sandbox()
- if sb != nil {
- if err := sb.DisableService(); err != nil {
- logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
- //Ignore error and attempt to delete the load balancer endpoint
- }
- }
- }
-
- if err := endpoints[0].Delete(true); err != nil {
- logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err)
- //Ignore error and attempt to delete the sandbox.
- }
-
- if err := controller.SandboxDestroy(sandboxName); err != nil {
- logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
- //Ignore error and attempt to delete the network.
- }
- }
-}
-
func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
nw, err := daemon.FindNetwork(networkID)
if err != nil {
return err
}
+ if nw.Info().Ingress() {
+ return nil
+ }
+
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return notAllowedError{err}
@@ -569,10 +498,6 @@
return notAllowedError{err}
}
- if !nw.Info().Ingress() && nw.Type() == "overlay" {
- daemon.deleteLoadBalancerSandbox(nw)
- }
-
if err := nw.Delete(); err != nil {
return err
}
diff --git a/daemon/reload.go b/daemon/reload.go
index 0d16bc8..a20eb68 100644
--- a/daemon/reload.go
+++ b/daemon/reload.go
@@ -61,6 +61,9 @@
if err := daemon.reloadLiveRestore(conf, attributes); err != nil {
return err
}
+ if err := daemon.reloadNetworkDiagnosticPort(conf, attributes); err != nil {
+ return err
+ }
return nil
}
@@ -308,3 +311,18 @@
attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
return nil
}
+
+// reloadNetworkDiagnosticPort updates the network controller starting the diagnose mode if the config is valid
+func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error {
+ if conf == nil || daemon.netController == nil {
+ return nil
+ }
+ // Enable the network diagnose if the flag is set with a valid port withing the range
+ if conf.IsValueSet("network-diagnostic-port") && conf.NetworkDiagnosticPort > 0 && conf.NetworkDiagnosticPort < 65536 {
+ logrus.Warnf("Calling the diagnostic start with %d", conf.NetworkDiagnosticPort)
+ daemon.netController.StartDiagnose(conf.NetworkDiagnosticPort)
+ } else {
+ daemon.netController.StopDiagnose()
+ }
+ return nil
+}
diff --git a/daemon/reload_test.go b/daemon/reload_test.go
index 96b1a24..03b249b 100644
--- a/daemon/reload_test.go
+++ b/daemon/reload_test.go
@@ -10,6 +10,7 @@
"github.com/docker/docker/pkg/discovery"
_ "github.com/docker/docker/pkg/discovery/memory"
"github.com/docker/docker/registry"
+ "github.com/docker/libnetwork"
"github.com/stretchr/testify/assert"
)
@@ -479,3 +480,71 @@
t.Fatal(e)
}
}
+
+func TestDaemonReloadNetworkDiagnosticPort(t *testing.T) {
+ daemon := &Daemon{}
+ daemon.configStore = &config.Config{}
+
+ valuesSet := make(map[string]interface{})
+ valuesSet["network-diagnostic-port"] = 2000
+ enableConfig := &config.Config{
+ CommonConfig: config.CommonConfig{
+ NetworkDiagnosticPort: 2000,
+ ValuesSet: valuesSet,
+ },
+ }
+ disableConfig := &config.Config{
+ CommonConfig: config.CommonConfig{},
+ }
+
+ netOptions, err := daemon.networkOptions(enableConfig, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ controller, err := libnetwork.New(netOptions...)
+ if err != nil {
+ t.Fatal(err)
+ }
+ daemon.netController = controller
+
+ // Enable/Disable the server for some iterations
+ for i := 0; i < 10; i++ {
+ enableConfig.CommonConfig.NetworkDiagnosticPort++
+ if err := daemon.Reload(enableConfig); err != nil {
+ t.Fatal(err)
+ }
+ // Check that the diagnose is enabled
+ if !daemon.netController.IsDiagnoseEnabled() {
+ t.Fatalf("diagnosed should be enable")
+ }
+
+ // Reload
+ if err := daemon.Reload(disableConfig); err != nil {
+ t.Fatal(err)
+ }
+ // Check that the diagnose is disabled
+ if daemon.netController.IsDiagnoseEnabled() {
+ t.Fatalf("diagnosed should be disable")
+ }
+ }
+
+ enableConfig.CommonConfig.NetworkDiagnosticPort++
+ // 2 times the enable should not create problems
+ if err := daemon.Reload(enableConfig); err != nil {
+ t.Fatal(err)
+ }
+ // Check that the diagnose is enabled
+ if !daemon.netController.IsDiagnoseEnabled() {
+ t.Fatalf("diagnosed should be enable")
+ }
+
+ // Check that another reload does not cause issues
+ if err := daemon.Reload(enableConfig); err != nil {
+ t.Fatal(err)
+ }
+ // Check that the diagnose is enable
+ if !daemon.netController.IsDiagnoseEnabled() {
+ t.Fatalf("diagnosed should be enable")
+ }
+
+}
diff --git a/daemon/secrets_linux.go b/daemon/secrets_linux.go
index fca4e12..6ae0117 100644
--- a/daemon/secrets_linux.go
+++ b/daemon/secrets_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
func secretsSupported() bool {
diff --git a/daemon/secrets_windows.go b/daemon/secrets_windows.go
index 9054354..6ae0117 100644
--- a/daemon/secrets_windows.go
+++ b/daemon/secrets_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
func secretsSupported() bool {
diff --git a/daemon/selinux_linux.go b/daemon/selinux_linux.go
index fb2578b..46da7f1 100644
--- a/daemon/selinux_linux.go
+++ b/daemon/selinux_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
import "github.com/opencontainers/selinux/go-selinux"
diff --git a/daemon/stats/collector_windows.go b/daemon/stats/collector_windows.go
index 5fb27ce..03109fd 100644
--- a/daemon/stats/collector_windows.go
+++ b/daemon/stats/collector_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package stats
// platformNewStatsCollector performs platform specific initialisation of the
diff --git a/daemon/update_linux.go b/daemon/update_linux.go
index 41d3b53..966d74e 100644
--- a/daemon/update_linux.go
+++ b/daemon/update_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package daemon
import (
diff --git a/daemon/update_solaris.go b/daemon/update_solaris.go
deleted file mode 100644
index f3b545c..0000000
--- a/daemon/update_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package daemon
-
-import (
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/libcontainerd"
-)
-
-func toContainerdResources(resources container.Resources) libcontainerd.Resources {
- var r libcontainerd.Resources
- return r
-}
diff --git a/daemon/update_windows.go b/daemon/update_windows.go
index 4f85f41..e60f63d 100644
--- a/daemon/update_windows.go
+++ b/daemon/update_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
import (
diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go
index 62c9e23..bfb5133 100644
--- a/daemon/volumes_windows.go
+++ b/daemon/volumes_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package daemon
import (
diff --git a/distribution/errors.go b/distribution/errors.go
index dd6ff0a..355e9da 100644
--- a/distribution/errors.go
+++ b/distribution/errors.go
@@ -126,21 +126,25 @@
// continueOnError returns true if we should fallback to the next endpoint
// as a result of this error.
-func continueOnError(err error) bool {
+func continueOnError(err error, mirrorEndpoint bool) bool {
switch v := err.(type) {
case errcode.Errors:
if len(v) == 0 {
return true
}
- return continueOnError(v[0])
+ return continueOnError(v[0], mirrorEndpoint)
case ErrNoSupport:
- return continueOnError(v.Err)
+ return continueOnError(v.Err, mirrorEndpoint)
case errcode.Error:
- return shouldV2Fallback(v)
+ return mirrorEndpoint || shouldV2Fallback(v)
case *client.UnexpectedHTTPResponseError:
return true
case ImageConfigPullError:
- return false
+ // ImageConfigPullError only happens with v2 images, v1 fallback is
+ // unnecessary.
+ // Failures from a mirror endpoint should result in fallback to the
+ // canonical repo.
+ return mirrorEndpoint
case error:
return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error()))
}
diff --git a/distribution/errors_test.go b/distribution/errors_test.go
new file mode 100644
index 0000000..aa9ef4f
--- /dev/null
+++ b/distribution/errors_test.go
@@ -0,0 +1,85 @@
+package distribution
+
+import (
+ "errors"
+ "strings"
+ "syscall"
+ "testing"
+
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/client"
+)
+
+var alwaysContinue = []error{
+ &client.UnexpectedHTTPResponseError{},
+
+ // Some errcode.Errors that don't disprove the existence of a V1 image
+ errcode.Error{Code: errcode.ErrorCodeUnauthorized},
+ errcode.Error{Code: v2.ErrorCodeManifestUnknown},
+ errcode.Error{Code: v2.ErrorCodeNameUnknown},
+
+ errors.New("some totally unexpected error"),
+}
+
+var continueFromMirrorEndpoint = []error{
+ ImageConfigPullError{},
+
+ // Some other errcode.Error that doesn't indicate we should search for a V1 image.
+ errcode.Error{Code: errcode.ErrorCodeTooManyRequests},
+}
+
+var neverContinue = []error{
+ errors.New(strings.ToLower(syscall.ESRCH.Error())), // No such process
+}
+
+func TestContinueOnError_NonMirrorEndpoint(t *testing.T) {
+ for _, err := range alwaysContinue {
+ if !continueOnError(err, false) {
+ t.Errorf("Should continue from non-mirror endpoint: %T: '%s'", err, err.Error())
+ }
+ }
+
+ for _, err := range continueFromMirrorEndpoint {
+ if continueOnError(err, false) {
+ t.Errorf("Should only continue from mirror endpoint: %T: '%s'", err, err.Error())
+ }
+ }
+}
+
+func TestContinueOnError_MirrorEndpoint(t *testing.T) {
+ errs := []error{}
+ errs = append(errs, alwaysContinue...)
+ errs = append(errs, continueFromMirrorEndpoint...)
+ for _, err := range errs {
+ if !continueOnError(err, true) {
+ t.Errorf("Should continue from mirror endpoint: %T: '%s'", err, err.Error())
+ }
+ }
+}
+
+func TestContinueOnError_NeverContinue(t *testing.T) {
+ for _, isMirrorEndpoint := range []bool{true, false} {
+ for _, err := range neverContinue {
+ if continueOnError(err, isMirrorEndpoint) {
+ t.Errorf("Should never continue: %T: '%s'", err, err.Error())
+ }
+ }
+ }
+}
+
+func TestContinueOnError_UnnestsErrors(t *testing.T) {
+ // ContinueOnError should evaluate nested errcode.Errors.
+
+ // Assumes that v2.ErrorCodeNameUnknown is a continueable error code.
+ err := errcode.Errors{errcode.Error{Code: v2.ErrorCodeNameUnknown}}
+ if !continueOnError(err, false) {
+ t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors")
+ }
+
+ // Assumes that errcode.ErrorCodeTooManyRequests is not a V1-fallback indication
+ err = errcode.Errors{errcode.Error{Code: errcode.ErrorCodeTooManyRequests}}
+ if continueOnError(err, false) {
+ t.Fatal("ContinueOnError should unnest, base return value on errcode.Errors")
+ }
+}
diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go
index c8d784c..35ff529 100644
--- a/distribution/pull_v2.go
+++ b/distribution/pull_v2.go
@@ -74,7 +74,7 @@
if _, ok := err.(fallbackError); ok {
return err
}
- if continueOnError(err) {
+ if continueOnError(err, p.endpoint.Mirror) {
return fallbackError{
err: err,
confirmedV2: p.confirmedV2,
diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go
index b4573e1..08ff437 100644
--- a/distribution/pull_v2_windows.go
+++ b/distribution/pull_v2_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package distribution
import (
diff --git a/distribution/push_v2.go b/distribution/push_v2.go
index 7ffce5b..2aecc18 100644
--- a/distribution/push_v2.go
+++ b/distribution/push_v2.go
@@ -67,7 +67,7 @@
}
if err = p.pushV2Repository(ctx); err != nil {
- if continueOnError(err) {
+ if continueOnError(err, p.endpoint.Mirror) {
return fallbackError{
err: err,
confirmedV2: p.pushState.confirmedV2,
diff --git a/dockerversion/version_lib.go b/dockerversion/version_lib.go
index 33f77d3..72f4893 100644
--- a/dockerversion/version_lib.go
+++ b/dockerversion/version_lib.go
@@ -13,4 +13,5 @@
ContainerdCommitID string = "library-import"
RuncCommitID string = "library-import"
InitCommitID string = "library-import"
+ PlatformName string = ""
)
diff --git a/docs/api/version-history.md b/docs/api/version-history.md
index 5056c0d..0fdf464 100644
--- a/docs/api/version-history.md
+++ b/docs/api/version-history.md
@@ -13,6 +13,11 @@
will be rejected.
-->
+## v1.36 API changes
+
+[Docker Engine API v1.36](https://docs.docker.com/engine/api/v1.36/) documentation
+
+
## v1.35 API changes
[Docker Engine API v1.35](https://docs.docker.com/engine/api/v1.35/) documentation
@@ -23,6 +28,18 @@
configuration is only used for Windows containers.
* `GET /containers/(name)/logs` now supports an additional query parameter: `until`,
which returns log lines that occurred before the specified timestamp.
+* `POST /containers/{id}/exec` now accepts a `WorkingDir` property to set the
+ work-dir for the exec process, independent of the container's work-dir.
+* `Get /version` now returns a `Platform.Name` field, which can be used by products
+ using Moby as a foundation to return information about the platform.
+* `Get /version` now returns a `Components` field, which can be used to return
+ information about the components used. Information about the engine itself is
+ now included as a "Component" version, and contains all information from the
+ top-level `Version`, `GitCommit`, `APIVersion`, `MinAPIVersion`, `GoVersion`,
+ `Os`, `Arch`, `BuildTime`, `KernelVersion`, and `Experimental` fields. Going
+ forward, the information from the `Components` section is preferred over their
+ top-level counterparts.
+
## v1.34 API changes
diff --git a/docs/contributing/set-up-dev-env.md b/docs/contributing/set-up-dev-env.md
index acd6888..b4cacf5 100644
--- a/docs/contributing/set-up-dev-env.md
+++ b/docs/contributing/set-up-dev-env.md
@@ -10,8 +10,7 @@
You use the `moby/moby` repository and its `Dockerfile` to create a Docker image,
run a Docker container, and develop code in the container.
-If you followed the procedures that <a href="/opensource/project/set-up-git/" target="_blank">
-set up Git for contributing</a>, you should have a fork of the `moby/moby`
+If you followed the procedures that [set up Git for contributing](./set-up-git.md), you should have a fork of the `moby/moby`
repository. You also created a branch called `dry-run-test`. In this section,
you continue working with your fork on this branch.
@@ -106,8 +105,7 @@
```
If you are following along with this guide, you created a `dry-run-test`
- branch when you <a href="/opensource/project/set-up-git/" target="_blank">
- set up Git for contributing</a>.
+ branch when you [set up Git for contributing](./set-up-git.md).
3. Ensure you are on your `dry-run-test` branch.
@@ -131,15 +129,16 @@
```none
Successfully built 3d872560918e
+ Successfully tagged docker-dev:dry-run-test
docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/moby/moby/bundles" -t "docker-dev:dry-run-test" bash
- root@f31fa223770f:/go/src/github.com/moby/moby#
+ root@f31fa223770f:/go/src/github.com/docker/docker#
```
At this point, your prompt reflects the container's BASH shell.
-5. List the contents of the current directory (`/go/src/github.com/moby/moby`).
+5. List the contents of the current directory (`/go/src/github.com/docker/docker`).
- You should see the image's source from the `/go/src/github.com/moby/moby`
+ You should see the image's source from the `/go/src/github.com/docker/docker`
directory.
![List example](images/list_example.png)
@@ -147,7 +146,7 @@
6. Make a `dockerd` binary.
```none
- root@a8b2885ab900:/go/src/github.com/moby/moby# hack/make.sh binary
+ root@a8b2885ab900:/go/src/github.com/docker/docker# hack/make.sh binary
Removing bundles/
---> Making bundle: binary (in bundles/binary)
@@ -161,7 +160,7 @@
`/usr/local/bin/` directory.
```none
- root@a8b2885ab900:/go/src/github.com/moby/moby# make install
+ root@a8b2885ab900:/go/src/github.com/docker/docker# make install
```
8. Start the Engine daemon running in the background.
@@ -187,13 +186,65 @@
hack/make.sh binary install-binary run
```
-9. Inside your container, check your Docker version.
+9. Inside your container, check your Docker versions:
```none
- root@5f8630b873fe:/go/src/github.com/moby/moby# docker --version
- Docker version 1.12.0-dev, build 6e728fb
+ # docker version
+ Client:
+ Version: 17.06.0-ce
+ API version: 1.30
+ Go version: go1.8.3
+ Git commit: 02c1d87
+ Built: Fri Jun 23 21:15:15 2017
+ OS/Arch: linux/amd64
+
+ Server:
+ Version: dev
+ API version: 1.35 (minimum version 1.12)
+ Go version: go1.9.2
+ Git commit: 4aa6362da
+ Built: Sat Dec 2 05:22:42 2017
+ OS/Arch: linux/amd64
+ Experimental: false
```
+ Notice the split versions between client and server, which might be
+ unexpected. In more recent times the Docker CLI component (which provides the
+ `docker` command) has split out from the Moby project and is now maintained in:
+
+ * [docker/cli](https://github.com/docker/cli) - The Docker CLI source-code;
+ * [docker/docker-ce](https://github.com/docker/docker-ce) - The Docker CE
+ edition project, which assembles engine, CLI and other components.
+
+ The Moby project now defaults to a [fixed
+ version](https://github.com/docker/docker-ce/commits/v17.06.0-ce) of the
+ `docker` CLI for integration tests.
+
+ You may have noticed the following message when starting the container with the `shell` command:
+
+ ```none
+ Makefile:123: The docker client CLI has moved to github.com/docker/cli. For a dev-test cycle involving the CLI, run:
+ DOCKER_CLI_PATH=/host/path/to/cli/binary make shell
+ then change the cli and compile into a binary at the same location.
+ ```
+
+ By setting `DOCKER_CLI_PATH` you can supply a newer `docker` CLI to the
+ server development container for testing and for `integration-cli`
+ test-execution:
+
+ ```none
+ make DOCKER_CLI_PATH=/home/ubuntu/git/docker-ce/components/packaging/static/build/linux/docker/docker BIND_DIR=. shell
+ ...
+ # which docker
+ /usr/local/cli/docker
+ # docker --version
+ Docker version 17.09.0-dev, build
+ ```
+
+ This Docker CLI should be built from the [docker-ce
+ project](https://github.com/docker/docker-ce) and needs to be a Linux
+ binary.
+
Inside the container you are running a development version. This is the version
on the current branch. It reflects the value of the `VERSION` file at the
root of your `docker-fork` repository.
@@ -201,13 +252,13 @@
10. Run the `hello-world` image.
```none
- root@5f8630b873fe:/go/src/github.com/moby/moby# docker run hello-world
+ root@5f8630b873fe:/go/src/github.com/docker/docker# docker run hello-world
```
11. List the image you just downloaded.
```none
- root@5f8630b873fe:/go/src/github.com/moby/moby# docker images
+ root@5f8630b873fe:/go/src/github.com/docker/docker# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello-world latest c54a2cc56cbb 3 months ago 1.85 kB
```
@@ -296,7 +347,7 @@
10. To view your change, run the `dockerd --help` command in the docker development container shell.
```bash
- root@b0cb4f22715d:/go/src/github.com/moby/moby# dockerd --help
+ root@b0cb4f22715d:/go/src/github.com/docker/docker# dockerd --help
Usage: dockerd COMMAND
diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits
index 1b38076..3b52082 100644
--- a/hack/dockerfile/binaries-commits
+++ b/hack/dockerfile/binaries-commits
@@ -4,7 +4,7 @@
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
-CONTAINERD_COMMIT=6bff39c643886dfa3d546e83a90a527b64ddeacf
+CONTAINERD_COMMIT=89623f28b87a6004d4b785663257362d1658a729 # v1.0.0
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
diff --git a/hack/make.ps1 b/hack/make.ps1
index 3380a5b..42a2b31 100644
--- a/hack/make.ps1
+++ b/hack/make.ps1
@@ -365,7 +365,7 @@
# Run autogen if building binaries or running unit tests.
if ($Client -or $Daemon -or $TestUnit) {
Write-Host "INFO: Invoking autogen..."
- Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion }
+ Try { .\hack\make\.go-autogen.ps1 -CommitString $gitCommit -DockerVersion $dockerVersion -Platform "$env:PLATFORM" }
Catch [Exception] { Throw $_ }
}
diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen
index b68e3a7..850c3ec 100644
--- a/hack/make/.go-autogen
+++ b/hack/make/.go-autogen
@@ -18,6 +18,7 @@
BuildTime string = "$BUILDTIME"
IAmStatic string = "${IAMSTATIC:-true}"
ContainerdCommitID string = "${CONTAINERD_COMMIT}"
+ PlatformName string = "${PLATFORM}"
)
// AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen
diff --git a/hack/make/.go-autogen.ps1 b/hack/make/.go-autogen.ps1
index 768badb..cc14e9e 100644
--- a/hack/make/.go-autogen.ps1
+++ b/hack/make/.go-autogen.ps1
@@ -14,7 +14,8 @@
param(
[Parameter(Mandatory=$true)][string]$CommitString,
- [Parameter(Mandatory=$true)][string]$DockerVersion
+ [Parameter(Mandatory=$true)][string]$DockerVersion,
+ [Parameter(Mandatory=$false)][string]$Platform
)
$ErrorActionPreference = "Stop"
@@ -43,6 +44,7 @@
GitCommit string = "'+$CommitString+'"
Version string = "'+$DockerVersion+'"
BuildTime string = "'+$buildDateTime+'"
+ PlatformName string = "'+$Platform+'"
)
// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1
diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go
index 60ca4b9..e7d77f0 100644
--- a/integration-cli/docker_api_info_test.go
+++ b/integration-cli/docker_api_info_test.go
@@ -1,13 +1,10 @@
package main
import (
- "encoding/json"
"net/http"
"fmt"
- "github.com/docker/docker/api/types"
-
"github.com/docker/docker/client"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/request"
@@ -48,25 +45,6 @@
}
}
-// TestInfoAPIRuncCommit tests that dockerd is able to obtain RunC version
-// information, and that the version matches the expected version
-func (s *DockerSuite) TestInfoAPIRuncCommit(c *check.C) {
- testRequires(c, DaemonIsLinux) // Windows does not have RunC version information
-
- res, body, err := request.Get("/v1.30/info")
- c.Assert(res.StatusCode, checker.Equals, http.StatusOK)
- c.Assert(err, checker.IsNil)
-
- b, err := request.ReadBody(body)
- c.Assert(err, checker.IsNil)
-
- var i types.Info
-
- c.Assert(json.Unmarshal(b, &i), checker.IsNil)
- c.Assert(i.RuncCommit.ID, checker.Not(checker.Equals), "N/A")
- c.Assert(i.RuncCommit.ID, checker.Equals, i.RuncCommit.Expected)
-}
-
func (s *DockerSuite) TestInfoAPIVersioned(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go
index 0672e32..89c2865 100644
--- a/integration-cli/docker_api_logs_test.go
+++ b/integration-cli/docker_api_logs_test.go
@@ -151,7 +151,7 @@
func (s *DockerSuite) TestLogsAPIUntil(c *check.C) {
name := "logsuntil"
- dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 0.5; done")
+ dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 1; done")
client, err := request.NewClient()
if err != nil {
@@ -172,6 +172,8 @@
// Get timestamp of second log line
allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true})
+ c.Assert(len(allLogs), checker.GreaterOrEqualThan, 3)
+
t, err := time.Parse(time.RFC3339Nano, strings.Split(allLogs[1], " ")[0])
c.Assert(err, checker.IsNil)
until := t.Format(time.RFC3339Nano)
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 89e62c1..e60f4d5 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -4860,7 +4860,7 @@
}
}
-func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageArg(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `FROM busybox
ARG foo=abc
@@ -4884,7 +4884,7 @@
c.Assert(result.Stdout(), checker.Contains, "bar=def")
}
-func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageGlobalArg(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `ARG tag=nosuchtag
FROM busybox:${tag}
@@ -4909,7 +4909,7 @@
c.Assert(result.Stdout(), checker.Contains, "tag=latest")
}
-func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageUnusedArg(c *check.C) {
imgName := "multifromunusedarg"
dockerfile := `FROM busybox
ARG foo
@@ -5727,7 +5727,7 @@
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
-func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCache(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
@@ -5888,7 +5888,7 @@
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n")
}
-func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCopyFromSyntax(c *check.C) {
dockerfile := `
FROM busybox AS first
COPY foo bar
@@ -5946,7 +5946,7 @@
cli.DockerCmd(c, "run", "build4", "cat", "baz").Assert(c, icmd.Expected{Out: "pqr"})
}
-func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageCopyFromErrors(c *check.C) {
testCases := []struct {
dockerfile string
expectedError string
@@ -5993,7 +5993,7 @@
}
}
-func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageMultipleBuilds(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar`
@@ -6026,7 +6026,7 @@
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
-func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageImplicitFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY --from=busybox /etc/passwd /mypasswd
@@ -6053,7 +6053,7 @@
}
}
-func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) {
+func (s *DockerRegistrySuite) TestBuildMultiStageImplicitPull(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL)
dockerfile := `
@@ -6083,7 +6083,7 @@
cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"})
}
-func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageNameVariants(c *check.C) {
dockerfile := `
FROM busybox as foo
COPY foo /
@@ -6094,7 +6094,7 @@
FROM foo
COPY --from=foo1 foo f1
COPY --from=FOo2 foo f2
- ` // foo2 case also tests that names are canse insensitive
+ ` // foo2 case also tests that names are case insensitive
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
@@ -6108,7 +6108,7 @@
cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"})
}
-func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) {
+func (s *DockerTrustSuite) TestBuildMultiStageTrusted(c *check.C) {
img1 := s.setupTrustedImage(c, "trusted-build1")
img2 := s.setupTrustedImage(c, "trusted-build2")
dockerFile := fmt.Sprintf(`
@@ -6130,7 +6130,7 @@
dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"})
}
-func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) {
+func (s *DockerSuite) TestBuildMultiStageMultipleBuildsWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
@@ -6218,7 +6218,7 @@
}
// #33176
-func (s *DockerSuite) TestBuildCopyFromResetScratch(c *check.C) {
+func (s *DockerSuite) TestBuildMulitStageResetScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go
index 58a50ce..057c2d6 100644
--- a/integration-cli/docker_cli_commit_test.go
+++ b/integration-cli/docker_cli_commit_test.go
@@ -121,11 +121,19 @@
"test", "test-commit")
imageID = strings.TrimSpace(imageID)
+ // The ordering here is due to `PATH` being overridden from the container's
+ // ENV. On windows, the container doesn't have a `PATH` ENV variable so
+ // the ordering is the same as the cli.
+ expectedEnv := "[PATH=/foo DEBUG=true test=1]"
+ if testEnv.DaemonPlatform() == "windows" {
+ expectedEnv = "[DEBUG=true test=1 PATH=/foo]"
+ }
+
prefix, slash := getPrefixAndSlashFromDaemonPlatform()
prefix = strings.ToUpper(prefix) // Force C: as that's how WORKDIR is normalized on Windows
expected := map[string]string{
"Config.ExposedPorts": "map[8080/tcp:{}]",
- "Config.Env": "[DEBUG=true test=1 PATH=/foo]",
+ "Config.Env": expectedEnv,
"Config.Labels": "map[foo:bar]",
"Config.Cmd": "[/bin/sh]",
"Config.WorkingDir": prefix + slash + "opt",
diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go
index f5fe0da..8e12314 100644
--- a/integration-cli/docker_cli_create_test.go
+++ b/integration-cli/docker_cli_create_test.go
@@ -268,7 +268,6 @@
dockerCmd(c, "create", imageID)
dockerCmd(c, "create", truncatedImageID)
- dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID))
// Ensure this fails
out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID))
@@ -280,7 +279,10 @@
c.Fatalf(`Expected %q in output; got: %s`, expected, out)
}
- out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID))
+ if i := strings.IndexRune(imageID, ':'); i >= 0 {
+ imageID = imageID[i+1:]
+ }
+ out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", imageID))
if exit == 0 {
c.Fatalf("expected non-zero exit code; received %d", exit)
}
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index 6865b92..fb61626 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -1451,7 +1451,7 @@
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
+ "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
// restart daemon.
d.Restart(c)
@@ -2011,7 +2011,7 @@
// kill the container
icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
+ "--namespace", moby_daemon.ContainersNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
// Give time to containerd to process the command if we don't
// the exit event might be received after we do the inspect
@@ -2106,7 +2106,7 @@
result := icmd.RunCommand(
ctrBinary,
"--address", "/var/run/docker/containerd/docker-containerd.sock",
- "--namespace", moby_daemon.MainNamespace,
+ "--namespace", moby_daemon.ContainersNamespace,
"tasks", "resume", cid)
result.Assert(t, icmd.Success)
diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go
index dff54a4..b75dcc1 100644
--- a/integration-cli/docker_cli_events_test.go
+++ b/integration-cli/docker_cli_events_test.go
@@ -81,50 +81,6 @@
}
}
-func (s *DockerSuite) TestEventsLimit(c *check.C) {
- // Windows: Limit to 4 goroutines creating containers in order to prevent
- // timeouts creating so many containers simultaneously. This is a due to
- // a bug in the Windows platform. It will be fixed in a Windows Update.
- numContainers := 17
- eventPerContainer := 7 // create, attach, network connect, start, die, network disconnect, destroy
- numConcurrentContainers := numContainers
- if testEnv.DaemonPlatform() == "windows" {
- numConcurrentContainers = 4
- }
- sem := make(chan bool, numConcurrentContainers)
- errChan := make(chan error, numContainers)
-
- startTime := daemonUnixTime(c)
-
- args := []string{"run", "--rm", "busybox", "true"}
- for i := 0; i < numContainers; i++ {
- sem <- true
- go func(i int) {
- defer func() { <-sem }()
- out, err := exec.Command(dockerBinary, args...).CombinedOutput()
- if err != nil {
- err = fmt.Errorf("%v: %s", err, string(out))
- }
- errChan <- err
- }(i)
- }
-
- // Wait for all goroutines to finish
- for i := 0; i < cap(sem); i++ {
- sem <- true
- }
- close(errChan)
-
- for err := range errChan {
- c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " ")))
- }
-
- out, _ := dockerCmd(c, "events", "--since="+startTime, "--until", daemonUnixTime(c))
- events := strings.Split(out, "\n")
- nEvents := len(events) - 1
- c.Assert(nEvents, checker.Equals, numContainers*eventPerContainer, check.Commentf("events should be limited to 256, but received %d", nEvents))
-}
-
func (s *DockerSuite) TestEventsContainerEvents(c *check.C) {
dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true")
diff --git a/integration-cli/docker_cli_logout_test.go b/integration-cli/docker_cli_logout_test.go
index 5076ceb..e0752f4 100644
--- a/integration-cli/docker_cli_logout_test.go
+++ b/integration-cli/docker_cli_logout_test.go
@@ -13,9 +13,7 @@
)
func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) {
-
- // @TODO TestLogoutWithExternalAuth expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported
- s.d.StartWithBusybox(c, "--disable-legacy-registry=false")
+ s.d.StartWithBusybox(c)
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
@@ -62,7 +60,7 @@
// check I cannot pull anymore
out, err := s.d.Cmd("--config", tmp, "pull", repoName)
c.Assert(err, check.NotNil, check.Commentf(out))
- c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found")
+ c.Assert(out, checker.Contains, "no basic auth credentials")
}
// #23100
diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go
index 613cdb3..0e88b1e 100644
--- a/integration-cli/docker_cli_pull_test.go
+++ b/integration-cli/docker_cli_pull_test.go
@@ -259,18 +259,6 @@
c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected"))
}
-func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) {
- // @TODO TestPullNoCredentialsNotFound expects docker to fall back to a v1 registry, so has to be updated for v17.12, when v1 registries are no longer supported
- s.d.StartWithBusybox(c, "--disable-legacy-registry=false")
-
- // we don't care about the actual image, we just want to see image not found
- // because that means v2 call returned 401 and we fell back to v1 which usually
- // gives a 404 (in this case the test registry doesn't handle v1 at all)
- out, err := s.d.Cmd("pull", privateRegistryURL+"/busybox")
- c.Assert(err, check.NotNil, check.Commentf(out))
- c.Assert(out, checker.Contains, "Error: image busybox:latest not found")
-}
-
// Regression test for https://github.com/docker/docker/issues/26429
func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) {
testRequires(c, DaemonIsWindows, Network)
diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go
index 9488fff..283f576 100644
--- a/integration-cli/docker_cli_swarm_test.go
+++ b/integration-cli/docker_cli_swarm_test.go
@@ -849,7 +849,7 @@
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
// We need to get the container id.
- out, err = d.Cmd("ps", "-a", "-q", "--no-trunc")
+ out, err = d.Cmd("ps", "-q", "--no-trunc")
c.Assert(err, checker.IsNil)
id := strings.TrimSpace(out)
@@ -872,7 +872,7 @@
waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
// We need to get the container id.
- out, err = d.Cmd("ps", "-a", "-q", "--no-trunc")
+ out, err = d.Cmd("ps", "-q", "--no-trunc")
c.Assert(err, checker.IsNil)
id = strings.TrimSpace(out)
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
index ee94a9b..278e348 100644
--- a/integration-cli/docker_cli_tag_test.go
+++ b/integration-cli/docker_cli_tag_test.go
@@ -1,13 +1,10 @@
package main
import (
- "fmt"
"strings"
"github.com/docker/docker/integration-cli/checker"
- "github.com/docker/docker/integration-cli/cli/build"
"github.com/docker/docker/internal/testutil"
- "github.com/docker/docker/pkg/stringid"
"github.com/go-check/check"
)
@@ -140,29 +137,3 @@
c.Fatal("tagging with image named \"sha256\" should have failed")
}
}
-
-// ensure tags cannot create ambiguity with image ids
-func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) {
- buildImageSuccessfully(c, "notbusybox:latest", build.WithDockerfile(`FROM busybox
- MAINTAINER dockerio`))
- imageID := getIDByName(c, "notbusybox:latest")
- truncatedImageID := stringid.TruncateID(imageID)
- truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID)
-
- id := inspectField(c, truncatedTag, "Id")
-
- // Ensure inspect by image id returns image for image id
- c.Assert(id, checker.Equals, imageID)
- c.Logf("Built image: %s", imageID)
-
- // test setting tag fails
- _, _, err := dockerCmdWithError("tag", "busybox:latest", truncatedTag)
- if err != nil {
- c.Fatalf("Error tagging with an image id: %s", err)
- }
-
- id = inspectField(c, truncatedTag, "Id")
-
- // Ensure id is imageID and not busybox:latest
- c.Assert(id, checker.Not(checker.Equals), imageID)
-}
diff --git a/integration-cli/docker_cli_v2_only_test.go b/integration-cli/docker_cli_v2_only_test.go
index b82cdbd..3757341 100644
--- a/integration-cli/docker_cli_v2_only_test.go
+++ b/integration-cli/docker_cli_v2_only_test.go
@@ -22,7 +22,7 @@
return f.Name(), nil
}
-// TestV2Only ensures that a daemon by default does not
+// TestV2Only ensures that a daemon does not
// attempt to contact any v1 registry endpoints.
func (s *DockerRegistrySuite) TestV2Only(c *check.C) {
reg, err := registry.NewMock(c)
@@ -56,65 +56,3 @@
s.d.Cmd("push", repoName)
s.d.Cmd("pull", repoName)
}
-
-// TestV1 starts a daemon with legacy registries enabled
-// and ensure v1 endpoints are hit for the following operations:
-// login, push, pull, build & run
-func (s *DockerRegistrySuite) TestV1(c *check.C) {
- reg, err := registry.NewMock(c)
- defer reg.Close()
- c.Assert(err, check.IsNil)
-
- v2Pings := 0
- reg.RegisterHandler("/v2/", func(w http.ResponseWriter, r *http.Request) {
- v2Pings++
- // V2 ping 404 causes fallback to v1
- w.WriteHeader(404)
- })
-
- v1Pings := 0
- reg.RegisterHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) {
- v1Pings++
- })
-
- v1Logins := 0
- reg.RegisterHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) {
- v1Logins++
- })
-
- v1Repo := 0
- reg.RegisterHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) {
- v1Repo++
- })
-
- reg.RegisterHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) {
- v1Repo++
- })
-
- s.d.Start(c, "--insecure-registry", reg.URL(), "--disable-legacy-registry=false")
-
- tmp, err := ioutil.TempDir("", "integration-cli-")
- c.Assert(err, check.IsNil)
- defer os.RemoveAll(tmp)
-
- dockerfileName, err := makefile(tmp, fmt.Sprintf("FROM %s/busybox", reg.URL()))
- c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile"))
-
- s.d.Cmd("build", "--file", dockerfileName, tmp)
- c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build"))
-
- repoName := fmt.Sprintf("%s/busybox", reg.URL())
- s.d.Cmd("run", repoName)
- c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run"))
-
- s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.URL())
- c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt"))
-
- s.d.Cmd("tag", "busybox", repoName)
- s.d.Cmd("push", repoName)
-
- c.Assert(v1Repo, check.Equals, 2)
-
- s.d.Cmd("pull", repoName)
- c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull"))
-}
diff --git a/integration/build/build_test.go b/integration/build/build_test.go
index b447b62..1271dae 100644
--- a/integration/build/build_test.go
+++ b/integration/build/build_test.go
@@ -197,3 +197,73 @@
resp.Body.Close()
require.NoError(t, err)
}
+
+// TestBuildMultiStageOnBuild checks that ONBUILD commands are applied to
+// multiple subsequent stages
+// #35652
+func TestBuildMultiStageOnBuild(t *testing.T) {
+ defer setupTest(t)()
+ // test both metadata and layer based commands as they may be implemented differently
+ dockerfile := `FROM busybox AS stage1
+ONBUILD RUN echo 'foo' >somefile
+ONBUILD ENV bar=baz
+
+FROM stage1
+RUN cat somefile # fails if ONBUILD RUN fails
+
+FROM stage1
+RUN cat somefile`
+
+ ctx := context.Background()
+ source := fakecontext.New(t, "",
+ fakecontext.WithDockerfile(dockerfile))
+ defer source.Close()
+
+ apiclient := testEnv.APIClient()
+ resp, err := apiclient.ImageBuild(ctx,
+ source.AsTarReader(t),
+ types.ImageBuildOptions{
+ Remove: true,
+ ForceRemove: true,
+ })
+
+ out := bytes.NewBuffer(nil)
+ require.NoError(t, err)
+ _, err = io.Copy(out, resp.Body)
+ resp.Body.Close()
+ require.NoError(t, err)
+
+ assert.Contains(t, out.String(), "Successfully built")
+
+ imageIDs, err := getImageIDsFromBuild(out.Bytes())
+ require.NoError(t, err)
+ assert.Equal(t, 3, len(imageIDs))
+
+ image, _, err := apiclient.ImageInspectWithRaw(context.Background(), imageIDs[2])
+ require.NoError(t, err)
+ assert.Contains(t, image.Config.Env, "bar=baz")
+}
+
+type buildLine struct {
+ Stream string
+ Aux struct {
+ ID string
+ }
+}
+
+func getImageIDsFromBuild(output []byte) ([]string, error) {
+ ids := []string{}
+ for _, line := range bytes.Split(output, []byte("\n")) {
+ if len(line) == 0 {
+ continue
+ }
+ entry := buildLine{}
+ if err := json.Unmarshal(line, &entry); err != nil {
+ return nil, err
+ }
+ if entry.Aux.ID != "" {
+ ids = append(ids, entry.Aux.ID)
+ }
+ }
+ return ids, nil
+}
diff --git a/integration/container/exec_test.go b/integration/container/exec_test.go
new file mode 100644
index 0000000..22d7ec0
--- /dev/null
+++ b/integration/container/exec_test.go
@@ -0,0 +1,60 @@
+package container
+
+import (
+ "context"
+ "io/ioutil"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/stretchr/testify/require"
+)
+
+func TestExec(t *testing.T) {
+ defer setupTest(t)()
+ ctx := context.Background()
+ client := request.NewAPIClient(t)
+
+ container, err := client.ContainerCreate(ctx,
+ &container.Config{
+ Image: "busybox",
+ Tty: true,
+ WorkingDir: "/root",
+ Cmd: strslice.StrSlice([]string{"top"}),
+ },
+ &container.HostConfig{},
+ &network.NetworkingConfig{},
+ "foo",
+ )
+ require.NoError(t, err)
+ err = client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{})
+ require.NoError(t, err)
+
+ id, err := client.ContainerExecCreate(ctx, container.ID,
+ types.ExecConfig{
+ WorkingDir: "/tmp",
+ Env: strslice.StrSlice([]string{"FOO=BAR"}),
+ AttachStdout: true,
+ Cmd: strslice.StrSlice([]string{"sh", "-c", "env"}),
+ },
+ )
+ require.NoError(t, err)
+
+ resp, err := client.ContainerExecAttach(ctx, id.ID,
+ types.ExecStartCheck{
+ Detach: false,
+ Tty: false,
+ },
+ )
+ require.NoError(t, err)
+ defer resp.Close()
+ r, err := ioutil.ReadAll(resp.Reader)
+ require.NoError(t, err)
+ out := string(r)
+ require.NoError(t, err)
+ require.Contains(t, out, "PWD=/tmp", "exec command not running in expected /tmp working directory")
+ require.Contains(t, out, "FOO=BAR", "exec command not running with expected environment variable FOO")
+}
diff --git a/integration/container/health_test.go b/integration/container/health_test.go
new file mode 100644
index 0000000..8ed86a8
--- /dev/null
+++ b/integration/container/health_test.go
@@ -0,0 +1,61 @@
+package container
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/api/types/strslice"
+ "github.com/docker/docker/client"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/gotestyourself/gotestyourself/poll"
+ "github.com/stretchr/testify/require"
+)
+
+// TestHealthCheckWorkdir verifies that health-checks inherit the containers'
+// working-dir.
+func TestHealthCheckWorkdir(t *testing.T) {
+ defer setupTest(t)()
+ ctx := context.Background()
+ client := request.NewAPIClient(t)
+
+ c, err := client.ContainerCreate(ctx,
+ &container.Config{
+ Image: "busybox",
+ Tty: true,
+ WorkingDir: "/foo",
+ Cmd: strslice.StrSlice([]string{"top"}),
+ Healthcheck: &container.HealthConfig{
+ Test: []string{"CMD-SHELL", "if [ \"$PWD\" = \"/foo\" ]; then exit 0; else exit 1; fi;"},
+ Interval: 50 * time.Millisecond,
+ Retries: 3,
+ },
+ },
+ &container.HostConfig{},
+ &network.NetworkingConfig{},
+ "healthtest",
+ )
+ require.NoError(t, err)
+ err = client.ContainerStart(ctx, c.ID, types.ContainerStartOptions{})
+ require.NoError(t, err)
+
+ poll.WaitOn(t, pollForHealthStatus(ctx, client, c.ID, types.Healthy), poll.WithDelay(100*time.Millisecond))
+}
+
+func pollForHealthStatus(ctx context.Context, client client.APIClient, containerID string, healthStatus string) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ inspect, err := client.ContainerInspect(ctx, containerID)
+
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case inspect.State.Health.Status == healthStatus:
+ return poll.Success()
+ default:
+ return poll.Continue("waiting for container to become %s", healthStatus)
+ }
+ }
+}
diff --git a/integration/container/restart_test.go b/integration/container/restart_test.go
new file mode 100644
index 0000000..fe80f09
--- /dev/null
+++ b/integration/container/restart_test.go
@@ -0,0 +1,112 @@
+package container
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/integration-cli/daemon"
+)
+
+func TestDaemonRestartKillContainers(t *testing.T) {
+ type testCase struct {
+ desc string
+ config *container.Config
+ hostConfig *container.HostConfig
+
+ xRunning bool
+ xRunningLiveRestore bool
+ }
+
+ for _, c := range []testCase{
+ {
+ desc: "container without restart policy",
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
+ xRunningLiveRestore: true,
+ },
+ {
+ desc: "container with restart=always",
+ config: &container.Config{Image: "busybox", Cmd: []string{"top"}},
+ hostConfig: &container.HostConfig{RestartPolicy: container.RestartPolicy{Name: "always"}},
+ xRunning: true,
+ xRunningLiveRestore: true,
+ },
+ } {
+ for _, liveRestoreEnabled := range []bool{false, true} {
+ for fnName, stopDaemon := range map[string]func(*testing.T, *daemon.Daemon){
+ "kill-daemon": func(t *testing.T, d *daemon.Daemon) {
+ if err := d.Kill(); err != nil {
+ t.Fatal(err)
+ }
+ },
+ "stop-daemon": func(t *testing.T, d *daemon.Daemon) {
+ d.Stop(t)
+ },
+ } {
+ t.Run(fmt.Sprintf("live-restore=%v/%s/%s", liveRestoreEnabled, c.desc, fnName), func(t *testing.T) {
+ c := c
+ liveRestoreEnabled := liveRestoreEnabled
+ stopDaemon := stopDaemon
+
+ t.Parallel()
+
+ d := daemon.New(t, "", "dockerd", daemon.Config{})
+ client, err := d.NewClient()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var args []string
+ if liveRestoreEnabled {
+ args = []string{"--live-restore"}
+ }
+
+ d.StartWithBusybox(t, args...)
+ defer d.Stop(t)
+ ctx := context.Background()
+
+ resp, err := client.ContainerCreate(ctx, c.config, c.hostConfig, nil, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer client.ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{Force: true})
+
+ if err := client.ContainerStart(ctx, resp.ID, types.ContainerStartOptions{}); err != nil {
+ t.Fatal(err)
+ }
+
+ stopDaemon(t, d)
+ d.Start(t, args...)
+
+ expected := c.xRunning
+ if liveRestoreEnabled {
+ expected = c.xRunningLiveRestore
+ }
+
+ var running bool
+ for i := 0; i < 30; i++ {
+ inspect, err := client.ContainerInspect(ctx, resp.ID)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ running = inspect.State.Running
+ if running == expected {
+ break
+ }
+ time.Sleep(2 * time.Second)
+
+ }
+
+ if running != expected {
+ t.Fatalf("got unexpected running state, expected %v, got: %v", expected, running)
+ }
+ // TODO(cpuguy83): test pause states... this seems to be rather undefined currently
+ })
+ }
+ }
+ }
+}
diff --git a/integration/image/commit_test.go b/integration/image/commit_test.go
new file mode 100644
index 0000000..13edbe1
--- /dev/null
+++ b/integration/image/commit_test.go
@@ -0,0 +1,47 @@
+package image
+
+import (
+ "context"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/integration/util/request"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCommitInheritsEnv(t *testing.T) {
+ defer setupTest(t)()
+ client := request.NewAPIClient(t)
+ ctx := context.Background()
+
+ createResp1, err := client.ContainerCreate(ctx, &container.Config{Image: "busybox"}, nil, nil, "")
+ require.NoError(t, err)
+
+ commitResp1, err := client.ContainerCommit(ctx, createResp1.ID, types.ContainerCommitOptions{
+ Changes: []string{"ENV PATH=/bin"},
+ Reference: "test-commit-image",
+ })
+ require.NoError(t, err)
+
+ image1, _, err := client.ImageInspectWithRaw(ctx, commitResp1.ID)
+ require.NoError(t, err)
+
+ expectedEnv1 := []string{"PATH=/bin"}
+ assert.Equal(t, expectedEnv1, image1.Config.Env)
+
+ createResp2, err := client.ContainerCreate(ctx, &container.Config{Image: image1.ID}, nil, nil, "")
+ require.NoError(t, err)
+
+ commitResp2, err := client.ContainerCommit(ctx, createResp2.ID, types.ContainerCommitOptions{
+ Changes: []string{"ENV PATH=/usr/bin:$PATH"},
+ Reference: "test-commit-image",
+ })
+ require.NoError(t, err)
+
+ image2, _, err := client.ImageInspectWithRaw(ctx, commitResp2.ID)
+ require.NoError(t, err)
+ expectedEnv2 := []string{"PATH=/usr/bin:/bin"}
+ assert.Equal(t, expectedEnv2, image2.Config.Env)
+}
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
index e94185a..6cfb27e 100644
--- a/integration/service/create_test.go
+++ b/integration/service/create_test.go
@@ -11,12 +11,11 @@
"github.com/docker/docker/client"
"github.com/docker/docker/integration-cli/request"
"github.com/gotestyourself/gotestyourself/poll"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
-func TestCreateWithLBSandbox(t *testing.T) {
+func TestCreateServiceMultipleTimes(t *testing.T) {
defer setupTest(t)()
d := newSwarm(t)
defer d.Stop(t)
@@ -33,9 +32,8 @@
require.NoError(t, err)
overlayID := netResp.ID
- var instances uint64 = 1
+ var instances uint64 = 4
serviceSpec := swarmServiceSpec("TestService", instances)
-
serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: overlayName})
serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
@@ -56,14 +54,26 @@
_, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
require.NoError(t, err)
- network, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{})
- require.NoError(t, err)
- assert.Contains(t, network.Containers, overlayName+"-sbox")
-
err = client.ServiceRemove(context.Background(), serviceID)
require.NoError(t, err)
poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
+ poll.WaitOn(t, noTasks(client), pollSettings)
+
+ serviceResp, err = client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
+ QueryRegistry: false,
+ })
+ require.NoError(t, err)
+
+ serviceID2 := serviceResp.ID
+ poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), pollSettings)
+
+ err = client.ServiceRemove(context.Background(), serviceID2)
+ require.NoError(t, err)
+
+ poll.WaitOn(t, serviceIsRemoved(client, serviceID2), pollSettings)
+ poll.WaitOn(t, noTasks(client), pollSettings)
+
err = client.NetworkRemove(context.Background(), overlayID)
require.NoError(t, err)
@@ -112,6 +122,23 @@
}
}
+func noTasks(client client.ServiceAPIClient) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ filter := filters.NewArgs()
+ tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
+ Filters: filter,
+ })
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case len(tasks) == 0:
+ return poll.Success()
+ default:
+ return poll.Continue("task count at %d waiting for 0", len(tasks))
+ }
+ }
+}
+
func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
filter := filters.NewArgs()
diff --git a/integration/system/info_linux_test.go b/integration/system/info_linux_test.go
new file mode 100644
index 0000000..bea14ea
--- /dev/null
+++ b/integration/system/info_linux_test.go
@@ -0,0 +1,34 @@
+// +build !windows
+
+package system
+
+import (
+ "testing"
+
+ "github.com/docker/docker/integration/util/request"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/net/context"
+)
+
+func TestInfo_BinaryCommits(t *testing.T) {
+ client := request.NewAPIClient(t)
+
+ info, err := client.Info(context.Background())
+ require.NoError(t, err)
+
+ assert.NotNil(t, info.ContainerdCommit)
+ assert.NotEqual(t, "N/A", info.ContainerdCommit.ID)
+ assert.Equal(t, testEnv.DaemonInfo.ContainerdCommit.Expected, info.ContainerdCommit.Expected)
+ assert.Equal(t, info.ContainerdCommit.Expected, info.ContainerdCommit.ID)
+
+ assert.NotNil(t, info.InitCommit)
+ assert.NotEqual(t, "N/A", info.InitCommit.ID)
+ assert.Equal(t, testEnv.DaemonInfo.InitCommit.Expected, info.InitCommit.Expected)
+ assert.Equal(t, info.InitCommit.Expected, info.InitCommit.ID)
+
+ assert.NotNil(t, info.RuncCommit)
+ assert.NotEqual(t, "N/A", info.RuncCommit.ID)
+ assert.Equal(t, testEnv.DaemonInfo.RuncCommit.Expected, info.RuncCommit.Expected)
+ assert.Equal(t, info.RuncCommit.Expected, info.RuncCommit.ID)
+}
diff --git a/layer/layer_test.go b/layer/layer_test.go
index 6936fae..f632d44 100644
--- a/layer/layer_test.go
+++ b/layer/layer_test.go
@@ -23,7 +23,7 @@
func init() {
graphdriver.ApplyUncompressedLayer = archive.UnpackLayer
defaultArchiver := archive.NewDefaultArchiver()
- vfs.CopyWithTar = defaultArchiver.CopyWithTar
+ vfs.CopyDir = defaultArchiver.CopyWithTar
}
func newVFSGraphDriver(td string) (graphdriver.Driver, error) {
diff --git a/libcontainerd/client_daemon.go b/libcontainerd/client_daemon.go
index 5c26bc2..a9f7c11 100644
--- a/libcontainerd/client_daemon.go
+++ b/libcontainerd/client_daemon.go
@@ -27,6 +27,7 @@
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/linux/runctypes"
"github.com/containerd/typeurl"
@@ -42,7 +43,7 @@
const InitProcessName = "init"
type container struct {
- sync.Mutex
+ mu sync.Mutex
bundleDir string
ctr containerd.Container
@@ -51,6 +52,54 @@
oomKilled bool
}
+func (c *container) setTask(t containerd.Task) {
+ c.mu.Lock()
+ c.task = t
+ c.mu.Unlock()
+}
+
+func (c *container) getTask() containerd.Task {
+ c.mu.Lock()
+ t := c.task
+ c.mu.Unlock()
+ return t
+}
+
+func (c *container) addProcess(id string, p containerd.Process) {
+ c.mu.Lock()
+ if c.execs == nil {
+ c.execs = make(map[string]containerd.Process)
+ }
+ c.execs[id] = p
+ c.mu.Unlock()
+}
+
+func (c *container) deleteProcess(id string) {
+ c.mu.Lock()
+ delete(c.execs, id)
+ c.mu.Unlock()
+}
+
+func (c *container) getProcess(id string) containerd.Process {
+ c.mu.Lock()
+ p := c.execs[id]
+ c.mu.Unlock()
+ return p
+}
+
+func (c *container) setOOMKilled(killed bool) {
+ c.mu.Lock()
+ c.oomKilled = killed
+ c.mu.Unlock()
+}
+
+func (c *container) getOOMKilled() bool {
+ c.mu.Lock()
+ killed := c.oomKilled
+ c.mu.Unlock()
+ return killed
+}
+
type client struct {
sync.RWMutex // protects containers map
@@ -160,10 +209,10 @@
// Start create and start a task for the specified containerd id
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) {
ctr := c.getContainer(id)
- switch {
- case ctr == nil:
+ if ctr == nil {
return -1, errors.WithStack(newNotFoundError("no such container"))
- case ctr.task != nil:
+ }
+ if t := ctr.getTask(); t != nil {
return -1, errors.WithStack(newConflictError("container already started"))
}
@@ -227,9 +276,7 @@
return -1, err
}
- c.Lock()
- c.containers[id].task = t
- c.Unlock()
+ ctr.setTask(t)
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
@@ -239,9 +286,7 @@
c.logger.WithError(err).WithField("container", id).
Error("failed to delete task after fail start")
}
- c.Lock()
- c.containers[id].task = nil
- c.Unlock()
+ ctr.setTask(nil)
return -1, err
}
@@ -250,12 +295,15 @@
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
ctr := c.getContainer(containerID)
- switch {
- case ctr == nil:
+ if ctr == nil {
return -1, errors.WithStack(newNotFoundError("no such container"))
- case ctr.task == nil:
+ }
+ t := ctr.getTask()
+ if t == nil {
return -1, errors.WithStack(newInvalidParameterError("container is not running"))
- case ctr.execs != nil && ctr.execs[processID] != nil:
+ }
+
+ if p := ctr.getProcess(processID); p != nil {
return -1, errors.WithStack(newConflictError("id already in use"))
}
@@ -278,7 +326,7 @@
}
}()
- p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
+ p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
return rio, err
})
@@ -291,21 +339,14 @@
return -1, err
}
- ctr.Lock()
- if ctr.execs == nil {
- ctr.execs = make(map[string]containerd.Process)
- }
- ctr.execs[processID] = p
- ctr.Unlock()
+ ctr.addProcess(processID, p)
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
if err = p.Start(ctx); err != nil {
p.Delete(context.Background())
- ctr.Lock()
- delete(ctr.execs, processID)
- ctr.Unlock()
+ ctr.deleteProcess(processID)
return -1, err
}
@@ -317,7 +358,7 @@
if err != nil {
return err
}
- return p.Kill(ctx, syscall.Signal(signal))
+ return wrapError(p.Kill(ctx, syscall.Signal(signal)))
}
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
@@ -431,12 +472,9 @@
return 255, time.Now(), nil
}
- c.Lock()
- if ctr, ok := c.containers[containerID]; ok {
- ctr.task = nil
+ if ctr := c.getContainer(containerID); ctr != nil {
+ ctr.setTask(nil)
}
- c.Unlock()
-
return status.ExitCode(), status.ExitTime(), nil
}
@@ -470,7 +508,12 @@
return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
}
- s, err := ctr.task.Status(ctx)
+ t := ctr.getTask()
+ if t == nil {
+ return StatusUnknown, errors.WithStack(newNotFoundError("no such task"))
+ }
+
+ s, err := t.Status(ctx)
if err != nil {
return StatusUnknown, err
}
@@ -546,26 +589,22 @@
func (c *client) getProcess(containerID, processID string) (containerd.Process, error) {
ctr := c.getContainer(containerID)
- switch {
- case ctr == nil:
+ if ctr == nil {
return nil, errors.WithStack(newNotFoundError("no such container"))
- case ctr.task == nil:
- return nil, errors.WithStack(newNotFoundError("container is not running"))
- case processID == InitProcessName:
- return ctr.task, nil
- default:
- ctr.Lock()
- defer ctr.Unlock()
- if ctr.execs == nil {
- return nil, errors.WithStack(newNotFoundError("no execs"))
- }
}
- p := ctr.execs[processID]
+ t := ctr.getTask()
+ if t == nil {
+ return nil, errors.WithStack(newNotFoundError("container is not running"))
+ }
+ if processID == InitProcessName {
+ return t, nil
+ }
+
+ p := ctr.getProcess(processID)
if p == nil {
return nil, errors.WithStack(newNotFoundError("no such exec"))
}
-
return p, nil
}
@@ -623,12 +662,7 @@
}
if et == EventExit && ei.ProcessID != ei.ContainerID {
- var p containerd.Process
- ctr.Lock()
- if ctr.execs != nil {
- p = ctr.execs[ei.ProcessID]
- }
- ctr.Unlock()
+ p := ctr.getProcess(ei.ProcessID)
if p == nil {
c.logger.WithError(errors.New("no such process")).
WithFields(logrus.Fields{
@@ -644,9 +678,8 @@
"process": ei.ProcessID,
}).Warn("failed to delete process")
}
- c.Lock()
- delete(ctr.execs, ei.ProcessID)
- c.Unlock()
+ ctr.deleteProcess(ei.ProcessID)
+
ctr := c.getContainer(ei.ContainerID)
if ctr == nil {
c.logger.WithFields(logrus.Fields{
@@ -681,7 +714,10 @@
}()
eventStream, err = c.remote.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
- Filters: []string{"namespace==" + c.namespace + ",topic~=/tasks/.+"},
+ Filters: []string{
+ "namespace==" + c.namespace,
+ "topic~=/tasks/",
+ },
}, grpc.FailFast(false))
if err != nil {
return
@@ -780,10 +816,10 @@
}
if oomKilled {
- ctr.oomKilled = true
+ ctr.setOOMKilled(true)
oomKilled = false
}
- ei.OOMKilled = ctr.oomKilled
+ ei.OOMKilled = ctr.getOOMKilled()
c.processEvent(ctr, et, ei)
}
@@ -813,12 +849,19 @@
}
func wrapError(err error) error {
- if err != nil {
- msg := err.Error()
- for _, s := range []string{"container does not exist", "not found", "no such container"} {
- if strings.Contains(msg, s) {
- return wrapNotFoundError(err)
- }
+ if err == nil {
+ return nil
+ }
+
+ switch {
+ case errdefs.IsNotFound(err):
+ return wrapNotFoundError(err)
+ }
+
+ msg := err.Error()
+ for _, s := range []string{"container does not exist", "not found", "no such container"} {
+ if strings.Contains(msg, s) {
+ return wrapNotFoundError(err)
}
}
return err
diff --git a/libcontainerd/remote_daemon.go b/libcontainerd/remote_daemon.go
index e6fd05f..609bcfb 100644
--- a/libcontainerd/remote_daemon.go
+++ b/libcontainerd/remote_daemon.go
@@ -278,7 +278,7 @@
select {
case <-r.shutdownContext.Done():
- r.logger.Info("stopping healtcheck following graceful shutdown")
+ r.logger.Info("stopping healthcheck following graceful shutdown")
client.Close()
return
default:
diff --git a/opts/hosts_windows.go b/opts/hosts_windows.go
index 7c239e0..684f0e1 100644
--- a/opts/hosts_windows.go
+++ b/opts/hosts_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package opts
// DefaultHost constant defines the default host string used by docker on Windows
diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go
index a22410c..66243a6 100644
--- a/pkg/archive/archive_windows.go
+++ b/pkg/archive/archive_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package archive
import (
diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go
index f2325ab..75c010e 100644
--- a/pkg/chrootarchive/archive_unix.go
+++ b/pkg/chrootarchive/archive_unix.go
@@ -66,10 +66,12 @@
cmd.Stderr = output
if err := cmd.Start(); err != nil {
+ w.Close()
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
}
//write the options to the pipe for the untar exec to read
if err := json.NewEncoder(w).Encode(options); err != nil {
+ w.Close()
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
}
w.Close()
diff --git a/pkg/directory/directory_windows.go b/pkg/directory/directory_windows.go
index 6fb0917..efe05ce 100644
--- a/pkg/directory/directory_windows.go
+++ b/pkg/directory/directory_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package directory
import (
diff --git a/pkg/dmesg/dmesg_linux.go b/pkg/dmesg/dmesg_linux.go
index 7df7f3d..2fb494e 100644
--- a/pkg/dmesg/dmesg_linux.go
+++ b/pkg/dmesg/dmesg_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package dmesg
import (
diff --git a/pkg/fsutils/fsutils_linux.go b/pkg/fsutils/fsutils_linux.go
index e6094b5..7596259 100644
--- a/pkg/fsutils/fsutils_linux.go
+++ b/pkg/fsutils/fsutils_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package fsutils
import (
diff --git a/pkg/homedir/homedir_linux.go b/pkg/homedir/homedir_linux.go
index 012fe52..a7cd2e1 100644
--- a/pkg/homedir/homedir_linux.go
+++ b/pkg/homedir/homedir_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package homedir
import (
diff --git a/pkg/idtools/idtools_windows.go b/pkg/idtools/idtools_windows.go
index 94ca33a..ec49177 100644
--- a/pkg/idtools/idtools_windows.go
+++ b/pkg/idtools/idtools_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package idtools
import (
diff --git a/pkg/ioutils/temp_windows.go b/pkg/ioutils/temp_windows.go
index c258e5f..fb14c95 100644
--- a/pkg/ioutils/temp_windows.go
+++ b/pkg/ioutils/temp_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package ioutils
import (
diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go
index be69fee..dde889e 100644
--- a/pkg/mount/mountinfo_linux.go
+++ b/pkg/mount/mountinfo_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package mount
import (
diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go
index 8ceec84..f3c13e5 100644
--- a/pkg/mount/sharedsubtree_linux.go
+++ b/pkg/mount/sharedsubtree_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package mount
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go
index e598672..93620ee 100644
--- a/pkg/parsers/kernel/kernel_windows.go
+++ b/pkg/parsers/kernel/kernel_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package kernel
import (
diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go
index 05319ea..d3f1061 100644
--- a/pkg/reexec/command_linux.go
+++ b/pkg/reexec/command_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package reexec
import (
diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go
index ca871c4..c320876 100644
--- a/pkg/reexec/command_windows.go
+++ b/pkg/reexec/command_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package reexec
import (
diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go
index 440f270..c84a63e 100644
--- a/pkg/signal/signal_windows.go
+++ b/pkg/signal/signal_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package signal
import (
diff --git a/pkg/sysinfo/numcpu_linux.go b/pkg/sysinfo/numcpu_linux.go
index f1d2d9d..5739b33 100644
--- a/pkg/sysinfo/numcpu_linux.go
+++ b/pkg/sysinfo/numcpu_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package sysinfo
import (
diff --git a/pkg/sysinfo/numcpu_windows.go b/pkg/sysinfo/numcpu_windows.go
index 1d89dd5..3516182 100644
--- a/pkg/sysinfo/numcpu_windows.go
+++ b/pkg/sysinfo/numcpu_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package sysinfo
import (
diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go
index 4e6255b..8889318 100644
--- a/pkg/sysinfo/sysinfo_windows.go
+++ b/pkg/sysinfo/sysinfo_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package sysinfo
// New returns an empty SysInfo for windows for now.
diff --git a/pkg/system/chtimes_windows.go b/pkg/system/chtimes_windows.go
index 45428c1..a1f4fd5 100644
--- a/pkg/system/chtimes_windows.go
+++ b/pkg/system/chtimes_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
import (
diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go
index a61b53d..b1e46d9 100644
--- a/pkg/system/filesys_windows.go
+++ b/pkg/system/filesys_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
import (
diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go
index 2e863c0..ba2692a 100644
--- a/pkg/system/mknod_windows.go
+++ b/pkg/system/mknod_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
// Mknod is not implemented on Windows.
diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go
index 13f1de1..71fc0f1 100644
--- a/pkg/system/umask_windows.go
+++ b/pkg/system/umask_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package system
// Umask is not supported on the windows platform.
diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go
index 5abf5e7..908131eb 100644
--- a/pkg/tarsum/fileinfosums.go
+++ b/pkg/tarsum/fileinfosums.go
@@ -1,6 +1,10 @@
package tarsum
-import "sort"
+import (
+ "runtime"
+ "sort"
+ "strings"
+)
// FileInfoSumInterface provides an interface for accessing file checksum
// information within a tar file. This info is accessed through interface
@@ -35,8 +39,11 @@
// GetFile returns the first FileInfoSumInterface with a matching name.
func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
+ // We do case insensitive matching on Windows as c:\APP and c:\app are
+ // the same. See issue #33107.
for i := range fis {
- if fis[i].Name() == name {
+ if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) ||
+ (runtime.GOOS != "windows" && fis[i].Name() == name) {
return fis[i]
}
}
diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go
index b6819b3..284ac63 100644
--- a/pkg/term/term_windows.go
+++ b/pkg/term/term_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package term
import (
diff --git a/plugin/backend_linux.go b/plugin/backend_linux.go
index 28a6c18..1f2830a 100644
--- a/plugin/backend_linux.go
+++ b/plugin/backend_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package plugin
import (
diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go
index 9839467..5343b85 100644
--- a/plugin/executor/containerd/containerd.go
+++ b/plugin/executor/containerd/containerd.go
@@ -16,7 +16,7 @@
)
// PluginNamespace is the name used for the plugins namespace
-var PluginNamespace = "moby-plugins"
+var PluginNamespace = "plugins.moby"
// ExitHandler represents an object that is called when the exit event is received from containerd
type ExitHandler interface {
diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go
index eff21e1..59066c1 100644
--- a/plugin/manager_linux.go
+++ b/plugin/manager_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package plugin
import (
diff --git a/plugin/manager_linux_test.go b/plugin/manager_linux_test.go
new file mode 100644
index 0000000..3259ca8
--- /dev/null
+++ b/plugin/manager_linux_test.go
@@ -0,0 +1,79 @@
+package plugin
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/pkg/mount"
+ "github.com/docker/docker/pkg/system"
+ "github.com/docker/docker/plugin/v2"
+)
+
+func TestManagerWithPluginMounts(t *testing.T) {
+ root, err := ioutil.TempDir("", "test-store-with-plugin-mounts")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer system.EnsureRemoveAll(root)
+
+ s := NewStore()
+ managerRoot := filepath.Join(root, "manager")
+ p1 := newTestPlugin(t, "test1", "testcap", managerRoot)
+
+ p2 := newTestPlugin(t, "test2", "testcap", managerRoot)
+ p2.PluginObj.Enabled = true
+
+ m, err := NewManager(
+ ManagerConfig{
+ Store: s,
+ Root: managerRoot,
+ ExecRoot: filepath.Join(root, "exec"),
+ CreateExecutor: func(*Manager) (Executor, error) { return nil, nil },
+ LogPluginEvent: func(_, _, _ string) {},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := s.Add(p1); err != nil {
+ t.Fatal(err)
+ }
+ if err := s.Add(p2); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a mount to simulate a plugin that has created it's own mounts
+ p2Mount := filepath.Join(p2.Rootfs, "testmount")
+ if err := os.MkdirAll(p2Mount, 0755); err != nil {
+ t.Fatal(err)
+ }
+ if err := mount.Mount("tmpfs", p2Mount, "tmpfs", ""); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := m.Remove(p1.Name(), &types.PluginRmConfig{ForceRemove: true}); err != nil {
+ t.Fatal(err)
+ }
+ if mounted, err := mount.Mounted(p2Mount); !mounted || err != nil {
+ t.Fatalf("expected %s to be mounted, err: %v", p2Mount, err)
+ }
+}
+
+func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
+ rootfs := filepath.Join(root, name)
+ if err := os.MkdirAll(rootfs, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ p := v2.Plugin{PluginObj: types.Plugin{Name: name}}
+ p.Rootfs = rootfs
+ iType := types.PluginInterfaceType{Capability: cap, Prefix: "docker", Version: "1.0"}
+ i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
+ p.PluginObj.Config.Interface = i
+ p.PluginObj.ID = name
+
+ return &p
+}
diff --git a/plugin/manager_solaris.go b/plugin/manager_solaris.go
deleted file mode 100644
index ac03d6e..0000000
--- a/plugin/manager_solaris.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package plugin
-
-import (
- "fmt"
-
- "github.com/docker/docker/plugin/v2"
- specs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
- return fmt.Errorf("Not implemented")
-}
-
-func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) {
- return nil, fmt.Errorf("Not implemented")
-}
-
-func (pm *Manager) disable(p *v2.Plugin, c *controller) error {
- return fmt.Errorf("Not implemented")
-}
-
-func (pm *Manager) restore(p *v2.Plugin) error {
- return fmt.Errorf("Not implemented")
-}
-
-// Shutdown plugins
-func (pm *Manager) Shutdown() {
-}
-
-func setupRoot(root string) error { return nil }
diff --git a/plugin/manager_windows.go b/plugin/manager_windows.go
index 56a7ee3..ac03d6e 100644
--- a/plugin/manager_windows.go
+++ b/plugin/manager_windows.go
@@ -1,5 +1,3 @@
-// +build windows
-
package plugin
import (
diff --git a/plugin/v2/plugin_linux.go b/plugin/v2/plugin_linux.go
index be82363..9590df4 100644
--- a/plugin/v2/plugin_linux.go
+++ b/plugin/v2/plugin_linux.go
@@ -1,5 +1,3 @@
-// +build linux
-
package v2
import (
diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md
index 6ae7baf..4b52989 100644
--- a/project/GOVERNANCE.md
+++ b/project/GOVERNANCE.md
@@ -1,17 +1,120 @@
-# Docker Governance Advisory Board Meetings
+# Moby project governance
-In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public.
-All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership.
+Moby projects are governed by the [Moby Technical Steering Committee (TSC)](https://github.com/moby/tsc).
+See the Moby TSC [charter](https://github.com/moby/tsc/blob/master/README.md) for
+further information on the role of the TSC and procedures for escalation
+of technical issues or concerns.
-The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at
-[Google Docs Folder](https://goo.gl/Alfj8r)
+Contact [any Moby TSC member](https://github.com/moby/tsc/blob/master/MEMBERS.md) with your questions/concerns about the governance or a specific technical
+issue that you feel requires escalation.
-These include:
+## Project maintainers
-* First Meeting Notes
-* DGAB Charter
-* Presentation 1: Introductory Presentation, including State of The Project
-* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal
-* Presentation 3: Long Term Roadmap/Statement of Direction
-
+The current maintainers of the moby/moby repository are listed in the
+[MAINTAINERS](/MAINTAINERS) file.
+There are different types of maintainers, with different responsibilities, but
+all maintainers have 3 things in common:
+
+ 1. They share responsibility in the project's success.
+ 2. They have made a long-term, recurring time investment to improve the project.
+ 3. They spend that time doing whatever needs to be done, not necessarily what is the most interesting or fun.
+
+Maintainers are often under-appreciated, because their work is less visible.
+It's easy to recognize a really cool and technically advanced feature. It's harder
+to appreciate the absence of bugs, the slow but steady improvement in stability,
+or the reliability of a release process. But those things distinguish a good
+project from a great one.
+
+### Adding maintainers
+
+Maintainers are first and foremost contributors who have shown their
+commitment to the long term success of a project. Contributors who want to
+become maintainers first demonstrate commitment to the project by contributing
+code, reviewing others' work, and triaging issues on a regular basis for at
+least three months.
+
+The contributions alone don't make you a maintainer. You need to earn the
+trust of the current maintainers and other project contributors, that your
+decisions and actions are in the best interest of the project.
+
+Periodically, the existing maintainers curate a list of contributors who have
+shown regular activity on the project over the prior months. From this
+list, maintainer candidates are selected and proposed on the maintainers
+mailing list.
+
+After a candidate is announced on the maintainers mailing list, the
+existing maintainers discuss the candidate over the next 5 business days,
+provide feedback, and vote. At least 66% of the current maintainers must
+vote in the affirmative.
+
+If a candidate is approved, a maintainer contacts the candidate to
+invite them to open a pull request that adds the contributor to
+the MAINTAINERS file. The candidate becomes a maintainer once the pull
+request is merged.
+
+### Removing maintainers
+
+Maintainers can be removed from the project, either at their own request
+or due to [project inactivity](#inactive-maintainer-policy).
+
+#### How to step down
+
+Life priorities, interests, and passions can change. If you're a maintainer but
+feel you must remove yourself from the list, inform other maintainers that you
+intend to step down, and if possible, help find someone to pick up your work.
+At the very least, ensure your work can be continued where you left off.
+
+After you've informed other maintainers, create a pull request to remove
+yourself from the MAINTAINERS file.
+
+#### Inactive maintainer policy
+
+An existing maintainer can be removed if they do not show significant activity
+on the project. Periodically, the maintainers review the list of maintainers
+and their activity over the last three months.
+
+If a maintainer has shown insufficient activity over this period, a project
+representative will contact the maintainer to ask if they want to continue
+being a maintainer. If the maintainer decides to step down as a maintainer,
+they open a pull request to be removed from the MAINTAINERS file.
+
+If the maintainer wants to continue in this role, but is unable to perform the
+required duties, they can be removed with a vote by at least 66% of the current
+maintainers. The maintainer under discussion will not be allowed to vote. An
+e-mail is sent to the mailing list, inviting maintainers of the project to
+vote. The voting period is five business days. Issues related to a maintainer's
+performance should be discussed with them among the other maintainers so that
+they are not surprised by a pull request removing them. This discussion should
+be handled objectively with no ad hominem attacks.
+
+## Project decision making
+
+Short answer: **Everything is a pull request**.
+
+The Moby core engine project is an open-source project with an open design
+philosophy. This means that the repository is the source of truth for **every**
+aspect of the project, including its philosophy, design, road map, and APIs.
+*If it's part of the project, it's in the repo. If it's in the repo, it's part
+of the project.*
+
+As a result, each decision can be expressed as a change to the repository. An
+implementation change is expressed as a change to the source code. An API
+change is a change to the API specification. A philosophy change is a change
+to the philosophy manifesto, and so on.
+
+All decisions affecting the moby/moby repository, both big and small, follow
+the same steps:
+
+ * **Step 1**: Open a pull request. Anyone can do this.
+
+ * **Step 2**: Discuss the pull request. Anyone can do this.
+
+ * **Step 3**: Maintainers merge, close or reject the pull request.
+
+Pull requests are reviewed by the current maintainers of the moby/moby
+repository. Weekly meetings are organized to are organized to synchronously
+discuss tricky PRs, as well as design and architecture decisions.. When
+technical agreement cannot be reached among the maintainers of the project,
+escalation or concerns can be raised by opening an issue to be handled
+by the [Moby Technical Steering Committee](https://github.com/moby/tsc).
diff --git a/vendor.conf b/vendor.conf
index 84945ec..cf6bea5 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -30,7 +30,7 @@
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
#get libnetwork packages
-github.com/docker/libnetwork f7d21337cf1eb628ad54eecac0881fa23ec266df
+github.com/docker/libnetwork 26531e56a76d7334e594098d7cfab88285d9065c
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -42,7 +42,7 @@
github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
-github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
@@ -77,12 +77,12 @@
github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
# gelf logging driver deps
-github.com/Graylog2/go-gelf v2
+github.com/Graylog2/go-gelf 4143646226541087117ff2f83334ea48b3201841
-github.com/fluent/fluent-logger-golang v1.2.1
+github.com/fluent/fluent-logger-golang v1.3.0
# fluent-logger-golang deps
github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
-github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad
# fsnotify
github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1
@@ -103,7 +103,7 @@
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
-github.com/containerd/containerd 6bff39c643886dfa3d546e83a90a527b64ddeacf
+github.com/containerd/containerd 89623f28b87a6004d4b785663257362d1658a729 # v1.0.0
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
@@ -111,10 +111,10 @@
github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/dmcgowan/go-tar go1.10
-github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1
+github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
# cluster
-github.com/docker/swarmkit de950a7ed842c7b7e47e9451cde9bf8f96031894
+github.com/docker/swarmkit 713d79dc8799b33465c58ed120b870c52eb5eb4f
github.com/gogo/protobuf v0.4
github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
@@ -148,7 +148,7 @@
# archive/tar
# mkdir -p ./vendor/archive
# git clone git://github.com/tonistiigi/go-1.git ./go
-# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore
+# git --git-dir ./go/.git --work-tree ./go checkout revert-prefix-ignore-1.9
# cp -a go/src/archive/tar ./vendor/archive/tar
# rm -rf ./go
# vndr
diff --git a/vendor/archive/tar/common.go b/vendor/archive/tar/common.go
index d2ae66d..d49c5c3 100644
--- a/vendor/archive/tar/common.go
+++ b/vendor/archive/tar/common.go
@@ -158,11 +158,15 @@
// sysStat, if non-nil, populates h from system-dependent fields of fi.
var sysStat func(fi os.FileInfo, h *Header) error
-// Mode constants from the tar spec.
const (
- c_ISUID = 04000 // Set uid
- c_ISGID = 02000 // Set gid
- c_ISVTX = 01000 // Save text (sticky bit)
+ // Mode constants from the USTAR spec:
+ // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06
+ c_ISUID = 04000 // Set uid
+ c_ISGID = 02000 // Set gid
+ c_ISVTX = 01000 // Save text (sticky bit)
+
+ // Common Unix mode constants; these are not defined in any common tar standard.
+ // Header.FileInfo understands these, but FileInfoHeader will never produce these.
c_ISDIR = 040000 // Directory
c_ISFIFO = 010000 // FIFO
c_ISREG = 0100000 // Regular file
@@ -208,30 +212,24 @@
}
switch {
case fm.IsRegular():
- h.Mode |= c_ISREG
h.Typeflag = TypeReg
h.Size = fi.Size()
case fi.IsDir():
h.Typeflag = TypeDir
- h.Mode |= c_ISDIR
h.Name += "/"
case fm&os.ModeSymlink != 0:
h.Typeflag = TypeSymlink
- h.Mode |= c_ISLNK
h.Linkname = link
case fm&os.ModeDevice != 0:
if fm&os.ModeCharDevice != 0 {
- h.Mode |= c_ISCHR
h.Typeflag = TypeChar
} else {
- h.Mode |= c_ISBLK
h.Typeflag = TypeBlock
}
case fm&os.ModeNamedPipe != 0:
h.Typeflag = TypeFifo
- h.Mode |= c_ISFIFO
case fm&os.ModeSocket != 0:
- h.Mode |= c_ISSOCK
+ return nil, fmt.Errorf("archive/tar: sockets not supported")
default:
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
}
diff --git a/vendor/archive/tar/writer.go b/vendor/archive/tar/writer.go
index 596fb8b..c51c243 100644
--- a/vendor/archive/tar/writer.go
+++ b/vendor/archive/tar/writer.go
@@ -121,9 +121,15 @@
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
if needsPaxHeader {
paxHeaders[paxKeyword] = s
- return
}
- f.formatString(b, s)
+
+ // Write string in a best-effort manner to satisfy readers that expect
+ // the field to be non-empty.
+ s = toASCII(s)
+ if len(s) > len(b) {
+ s = s[:len(b)]
+ }
+ f.formatString(b, s) // Should never error
}
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
// Try octal first.
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go b/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
index 8f22c9a..74255ec 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
@@ -5,6 +5,7 @@
"encoding/json"
"fmt"
"net"
+ "time"
)
type TCPReader struct {
@@ -13,16 +14,21 @@
messages chan []byte
}
-func newTCPReader(addr string) (*TCPReader, chan string, error) {
+type connChannels struct {
+ drop chan string
+ confirm chan string
+}
+
+func newTCPReader(addr string) (*TCPReader, chan string, chan string, error) {
var err error
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
- return nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
+ return nil, nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
}
listener, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
- return nil, nil, fmt.Errorf("ListenTCP: %s", err)
+ return nil, nil, nil, fmt.Errorf("ListenTCP: %s", err)
}
r := &TCPReader{
@@ -30,26 +36,61 @@
messages: make(chan []byte, 100), // Make a buffered channel with at most 100 messages
}
- signal := make(chan string, 1)
+ closeSignal := make(chan string, 1)
+ doneSignal := make(chan string, 1)
- go r.listenUntilCloseSignal(signal)
+ go r.listenUntilCloseSignal(closeSignal, doneSignal)
- return r, signal, nil
+ return r, closeSignal, doneSignal, nil
}
-func (r *TCPReader) listenUntilCloseSignal(signal chan string) {
- defer func() { signal <- "done" }()
- defer r.listener.Close()
+func (r *TCPReader) accepter(connections chan net.Conn) {
for {
conn, err := r.listener.Accept()
if err != nil {
break
}
- go handleConnection(conn, r.messages)
+ connections <- conn
+ }
+}
+
+func (r *TCPReader) listenUntilCloseSignal(closeSignal chan string, doneSignal chan string) {
+ defer func() { doneSignal <- "done" }()
+ defer r.listener.Close()
+ var conns []connChannels
+ connectionsChannel := make(chan net.Conn, 1)
+ go r.accepter(connectionsChannel)
+ for {
select {
- case sig := <-signal:
- if sig == "stop" {
- break
+ case conn := <-connectionsChannel:
+ dropSignal := make(chan string, 1)
+ dropConfirm := make(chan string, 1)
+ channels := connChannels{drop: dropSignal, confirm: dropConfirm}
+ go handleConnection(conn, r.messages, dropSignal, dropConfirm)
+ conns = append(conns, channels)
+ default:
+ }
+
+ select {
+ case sig := <-closeSignal:
+ if sig == "stop" || sig == "drop" {
+ if len(conns) >= 1 {
+ for _, s := range conns {
+ if s.drop != nil {
+ s.drop <- "drop"
+ <-s.confirm
+ conns = append(conns[:0], conns[1:]...)
+ }
+ }
+ if sig == "stop" {
+ return
+ }
+ } else if sig == "stop" {
+ closeSignal <- "stop"
+ }
+ if sig == "drop" {
+ doneSignal <- "done"
+ }
}
default:
}
@@ -60,19 +101,41 @@
return r.listener.Addr().String()
}
-func handleConnection(conn net.Conn, messages chan<- []byte) {
+func handleConnection(conn net.Conn, messages chan<- []byte, dropSignal chan string, dropConfirm chan string) {
+ defer func() { dropConfirm <- "done" }()
defer conn.Close()
reader := bufio.NewReader(conn)
var b []byte
var err error
+ drop := false
+ canDrop := false
for {
+ conn.SetDeadline(time.Now().Add(2 * time.Second))
if b, err = reader.ReadBytes(0); err != nil {
- continue
- }
- if len(b) > 0 {
+ if drop {
+ return
+ }
+ } else if len(b) > 0 {
messages <- b
+ canDrop = true
+ if drop {
+ return
+ }
+ } else if drop {
+ return
+ }
+ select {
+ case sig := <-dropSignal:
+ if sig == "drop" {
+ drop = true
+ time.Sleep(1 * time.Second)
+ if canDrop {
+ return
+ }
+ }
+ default:
}
}
}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go b/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
index ab95cbc..da1390d 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
@@ -75,12 +75,17 @@
func (w *TCPWriter) writeToSocketWithReconnectAttempts(zBytes []byte) (n int, err error) {
var errConn error
+ var i int
w.mu.Lock()
- for i := 0; n <= w.MaxReconnect; i++ {
+ for i = 0; i <= w.MaxReconnect; i++ {
errConn = nil
- n, err = w.conn.Write(zBytes)
+ if w.conn != nil {
+ n, err = w.conn.Write(zBytes)
+ } else {
+ err = fmt.Errorf("Connection was nil, will attempt reconnect")
+ }
if err != nil {
time.Sleep(w.ReconnectDelay * time.Second)
w.conn, errConn = net.Dial("tcp", w.addr)
@@ -90,6 +95,9 @@
}
w.mu.Unlock()
+ if i > w.MaxReconnect {
+ return 0, fmt.Errorf("Maximum reconnection attempts was reached; giving up")
+ }
if errConn != nil {
return 0, fmt.Errorf("Write Failed: %s\nReconnection failed: %s", err, errConn)
}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
index 93c3692..153be2c 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
@@ -27,5 +27,8 @@
// Close connection and interrupt blocked Read or Write operations
func (w *GelfWriter) Close() error {
+ if w.conn == nil {
+ return nil
+ }
return w.conn.Close()
}
diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md
index 6e59def..84d1eec 100644
--- a/vendor/github.com/containerd/containerd/README.md
+++ b/vendor/github.com/containerd/containerd/README.md
@@ -13,7 +13,37 @@
## Getting Started
-If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
+See our documentation on [containerd.io](containerd.io):
+* [for ops and admins](docs/ops.md)
+* [namespaces](docs/namespaces.md)
+* [client options](docs/client-opts.md)
+
+See how to build containerd from source at [BUILDING](BUILDING.md).
+
+If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
+
+
+## Runtime Requirements
+
+Runtime requirements for containerd are very minimal. Most interactions with
+the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
+OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
+
+There are specific features
+used by containerd core code and snapshotters that will require a minimum kernel
+version on Linux. With the understood caveat of distro kernel versioning, a
+reasonable starting point for Linux is a minimum 4.x kernel version.
+
+The overlay filesystem snapshotter, used by default, uses features that were
+finalized in the 4.x kernel series. If you choose to use btrfs, there may
+be more flexibility in kernel version (minimum recommended is 3.18), but will
+require the btrfs kernel module and btrfs tools to be installed on your Linux
+distribution.
+
+To use Linux checkpoint and restore features, you will need `criu` installed on
+your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
+
+Build requirements for developers are listed in [BUILDING](BUILDING.md).
## Features
@@ -23,7 +53,11 @@
```go
-import "github.com/containerd/containerd"
+import (
+ "github.com/containerd/containerd"
+ "github.com/containerd/containerd/cio"
+)
+
func main() {
client, err := containerd.New("/run/containerd/containerd.sock")
@@ -39,7 +73,7 @@
To set a namespace for requests to the API:
```go
-context = context.Background()
+context = context.Background()
// create a context for docker
docker = namespaces.WithNamespace(context, "docker")
@@ -93,7 +127,6 @@
redis, err := client.NewContainer(context, "redis-master",
containerd.WithNewSnapshot("redis-rootfs", image),
containerd.WithNewSpec(oci.WithImageConfig(image)),
-
)
// use a readonly filesystem with multiple containers
@@ -112,7 +145,7 @@
```go
// create a new task
-task, err := redis.NewTask(context, containerd.Stdio)
+task, err := redis.NewTask(context, cio.Stdio)
defer task.Delete(context)
// the task is now running and has a pid that can be use to setup networking
@@ -144,37 +177,12 @@
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
defer container.Delete(context)
-task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
+task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
defer task.Delete(context)
err := task.Start(context)
```
-## Developer Quick-Start
-
-To build the daemon and `ctr` simple test client, the following build system dependencies are required:
-
-* Go 1.9.x or above
-* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
-* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency.
-
-For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host:
-
-```
-$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip
-$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local
-```
-
-With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
-
-> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
-> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
-> Makefile target will disable the btrfs driver within the containerd Go build.
-
-Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
-
-Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
-
### Releases and API Stability
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go
index 5c20335..39547f5 100644
--- a/vendor/github.com/containerd/containerd/client.go
+++ b/vendor/github.com/containerd/containerd/client.go
@@ -7,6 +7,7 @@
"net/http"
"runtime"
"strconv"
+ "strings"
"sync"
"time"
@@ -29,7 +30,6 @@
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
- "github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1"
@@ -334,6 +334,14 @@
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
if err != nil {
+ // TODO(estesp): until we have a more complete method for index push, we need to report
+ // missing dependencies in an index/manifest list by sensing the "400 Bad Request"
+ // as a marker for this problem
+ if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
+ manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
+ errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
+ return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
+ }
return err
}
}
@@ -494,95 +502,27 @@
}, nil
}
-type imageFormat string
-
-const (
- ociImageFormat imageFormat = "oci"
-)
-
type importOpts struct {
- format imageFormat
- refObject string
- labels map[string]string
}
// ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error
-// WithImportLabel sets a label to be associated with an imported image
-func WithImportLabel(key, value string) ImportOpt {
- return func(opts *importOpts) error {
- if opts.labels == nil {
- opts.labels = make(map[string]string)
- }
-
- opts.labels[key] = value
- return nil
- }
-}
-
-// WithImportLabels associates a set of labels to an imported image
-func WithImportLabels(labels map[string]string) ImportOpt {
- return func(opts *importOpts) error {
- if opts.labels == nil {
- opts.labels = make(map[string]string)
- }
-
- for k, v := range labels {
- opts.labels[k] = v
- }
- return nil
- }
-}
-
-// WithOCIImportFormat sets the import format for an OCI image format
-func WithOCIImportFormat() ImportOpt {
- return func(c *importOpts) error {
- if c.format != "" {
- return errors.New("format already set")
- }
- c.format = ociImageFormat
- return nil
- }
-}
-
-// WithRefObject specifies the ref object to import.
-// If refObject is empty, it is copied from the ref argument of Import().
-func WithRefObject(refObject string) ImportOpt {
- return func(c *importOpts) error {
- c.refObject = refObject
- return nil
- }
-}
-
-func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
+func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
var iopts importOpts
for _, o := range opts {
if err := o(&iopts); err != nil {
return iopts, err
}
}
- // use OCI as the default format
- if iopts.format == "" {
- iopts.format = ociImageFormat
- }
- // if refObject is not explicitly specified, use the one specified in ref
- if iopts.refObject == "" {
- refSpec, err := reference.Parse(ref)
- if err != nil {
- return iopts, err
- }
- iopts.refObject = refSpec.Object
- }
return iopts, nil
}
// Import imports an image from a Tar stream using reader.
-// OCI format is assumed by default.
-//
-// Note that unreferenced blobs are imported to the content store as well.
-func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
- iopts, err := resolveImportOpt(ref, opts...)
+// Caller needs to specify importer. Future version may use oci.v1 as the default.
+// Note that unreferrenced blobs may be imported to the content store as well.
+func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
+ _, err := resolveImportOpt(opts...) // unused now
if err != nil {
return nil, err
}
@@ -593,58 +533,66 @@
}
defer done()
- switch iopts.format {
- case ociImageFormat:
- return c.importFromOCITar(ctx, ref, reader, iopts)
- default:
- return nil, errors.Errorf("unsupported format: %s", iopts.format)
+ imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
+ if err != nil {
+ // is.Update() is not called on error
+ return nil, err
}
+
+ is := c.ImageService()
+ var images []Image
+ for _, imgrec := range imgrecs {
+ if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
+ if !errdefs.IsNotFound(err) {
+ return nil, err
+ }
+
+ created, err := is.Create(ctx, imgrec)
+ if err != nil {
+ return nil, err
+ }
+
+ imgrec = created
+ } else {
+ imgrec = updated
+ }
+
+ images = append(images, &image{
+ client: c,
+ i: imgrec,
+ })
+ }
+ return images, nil
}
type exportOpts struct {
- format imageFormat
}
-// ExportOpt allows callers to set export options
+// ExportOpt allows the caller to specify export-specific options
type ExportOpt func(c *exportOpts) error
-// WithOCIExportFormat sets the OCI image format as the export target
-func WithOCIExportFormat() ExportOpt {
- return func(c *exportOpts) error {
- if c.format != "" {
- return errors.New("format already set")
+func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
+ var eopts exportOpts
+ for _, o := range opts {
+ if err := o(&eopts); err != nil {
+ return eopts, err
}
- c.format = ociImageFormat
- return nil
}
+ return eopts, nil
}
-// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
-// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
-// -> application/vnd.oci.image.layer.v1.tar+gzip
-
// Export exports an image to a Tar stream.
// OCI format is used by default.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
-func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
- var eopts exportOpts
- for _, o := range opts {
- if err := o(&eopts); err != nil {
- return nil, err
- }
- }
- // use OCI as the default format
- if eopts.format == "" {
- eopts.format = ociImageFormat
+// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
+func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
+ _, err := resolveExportOpt(opts...) // unused now
+ if err != nil {
+ return nil, err
}
pr, pw := io.Pipe()
- switch eopts.format {
- case ociImageFormat:
- go func() {
- pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
- }()
- default:
- return nil, errors.Errorf("unsupported format: %s", eopts.format)
- }
+ go func() {
+ pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
+ }()
return pr, nil
}
diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go
index 2d5c9ae..716e045 100644
--- a/vendor/github.com/containerd/containerd/container.go
+++ b/vendor/github.com/containerd/containerd/container.go
@@ -162,11 +162,17 @@
}, nil
}
-func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (Task, error) {
+func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (_ Task, err error) {
i, err := ioCreate(c.id)
if err != nil {
return nil, err
}
+ defer func() {
+ if err != nil && i != nil {
+ i.Cancel()
+ i.Close()
+ }
+ }()
cfg := i.Config()
request := &tasks.CreateTaskRequest{
ContainerID: c.id,
diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go
index bb431e5..b678033 100644
--- a/vendor/github.com/containerd/containerd/container_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/container_opts_unix.go
@@ -24,7 +24,6 @@
"github.com/opencontainers/image-spec/identity"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "golang.org/x/sys/unix"
)
// WithCheckpoint allows a container to be created from the checkpointed information
@@ -193,14 +192,17 @@
if err != nil {
return err
}
- defer os.RemoveAll(root)
+ defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
- defer unix.Unmount(root, 0)
- return filepath.Walk(root, incrementFS(root, uid, gid))
+ err = filepath.Walk(root, incrementFS(root, uid, gid))
+ if uerr := mount.Unmount(root, 0); err == nil {
+ err = uerr
+ }
+ return err
}
func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go
index 56f99bb..9ff95de 100644
--- a/vendor/github.com/containerd/containerd/content/local/store.go
+++ b/vendor/github.com/containerd/containerd/content/local/store.go
@@ -62,7 +62,7 @@
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
// useful for tests or standalone implementations.
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
- if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
return nil, err
}
diff --git a/vendor/github.com/containerd/containerd/export.go b/vendor/github.com/containerd/containerd/export.go
deleted file mode 100644
index 76bebe3..0000000
--- a/vendor/github.com/containerd/containerd/export.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package containerd
-
-import (
- "archive/tar"
- "context"
- "encoding/json"
- "io"
- "sort"
-
- "github.com/containerd/containerd/content"
- "github.com/containerd/containerd/images"
- "github.com/containerd/containerd/platforms"
- ocispecs "github.com/opencontainers/image-spec/specs-go"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
- tw := tar.NewWriter(writer)
- defer tw.Close()
-
- records := []tarRecord{
- ociLayoutFile(""),
- ociIndexRecord(desc),
- }
-
- cs := c.ContentStore()
- algorithms := map[string]struct{}{}
- exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
- records = append(records, blobRecord(cs, desc))
- algorithms[desc.Digest.Algorithm().String()] = struct{}{}
- return nil, nil
- }
-
- handlers := images.Handlers(
- images.ChildrenHandler(cs, platforms.Default()),
- images.HandlerFunc(exportHandler),
- )
-
- // Walk sequentially since the number of fetchs is likely one and doing in
- // parallel requires locking the export handler
- if err := images.Walk(ctx, handlers, desc); err != nil {
- return err
- }
-
- if len(algorithms) > 0 {
- records = append(records, directoryRecord("blobs/", 0755))
- for alg := range algorithms {
- records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
- }
- }
-
- return writeTar(ctx, tw, records)
-}
-
-type tarRecord struct {
- Header *tar.Header
- CopyTo func(context.Context, io.Writer) (int64, error)
-}
-
-func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
- path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
- return tarRecord{
- Header: &tar.Header{
- Name: path,
- Mode: 0444,
- Size: desc.Size,
- Typeflag: tar.TypeReg,
- },
- CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
- r, err := cs.ReaderAt(ctx, desc.Digest)
- if err != nil {
- return 0, err
- }
- defer r.Close()
-
- // Verify digest
- dgstr := desc.Digest.Algorithm().Digester()
-
- n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
- if err != nil {
- return 0, err
- }
- if dgstr.Digest() != desc.Digest {
- return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
- }
- return n, nil
- },
- }
-}
-
-func directoryRecord(name string, mode int64) tarRecord {
- return tarRecord{
- Header: &tar.Header{
- Name: name,
- Mode: mode,
- Typeflag: tar.TypeDir,
- },
- }
-}
-
-func ociLayoutFile(version string) tarRecord {
- if version == "" {
- version = ocispec.ImageLayoutVersion
- }
- layout := ocispec.ImageLayout{
- Version: version,
- }
-
- b, err := json.Marshal(layout)
- if err != nil {
- panic(err)
- }
-
- return tarRecord{
- Header: &tar.Header{
- Name: ocispec.ImageLayoutFile,
- Mode: 0444,
- Size: int64(len(b)),
- Typeflag: tar.TypeReg,
- },
- CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
- n, err := w.Write(b)
- return int64(n), err
- },
- }
-
-}
-
-func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
- index := ocispec.Index{
- Versioned: ocispecs.Versioned{
- SchemaVersion: 2,
- },
- Manifests: manifests,
- }
-
- b, err := json.Marshal(index)
- if err != nil {
- panic(err)
- }
-
- return tarRecord{
- Header: &tar.Header{
- Name: "index.json",
- Mode: 0644,
- Size: int64(len(b)),
- Typeflag: tar.TypeReg,
- },
- CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
- n, err := w.Write(b)
- return int64(n), err
- },
- }
-}
-
-func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
- sort.Sort(tarRecordsByName(records))
-
- for _, record := range records {
- if err := tw.WriteHeader(record.Header); err != nil {
- return err
- }
- if record.CopyTo != nil {
- n, err := record.CopyTo(ctx, tw)
- if err != nil {
- return err
- }
- if n != record.Header.Size {
- return errors.Errorf("unexpected copy size for %s", record.Header.Name)
- }
- } else if record.Header.Size > 0 {
- return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
- }
- }
- return nil
-}
-
-type tarRecordsByName []tarRecord
-
-func (t tarRecordsByName) Len() int {
- return len(t)
-}
-func (t tarRecordsByName) Swap(i, j int) {
- t[i], t[j] = t[j], t[i]
-}
-func (t tarRecordsByName) Less(i, j int) bool {
- return t[i].Header.Name < t[j].Header.Name
-}
diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go
index c9b0984..c765ea0 100644
--- a/vendor/github.com/containerd/containerd/filters/parser.go
+++ b/vendor/github.com/containerd/containerd/filters/parser.go
@@ -3,7 +3,6 @@
import (
"fmt"
"io"
- "strconv"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
@@ -134,7 +133,12 @@
return selector{}, err
}
- value, err := p.value()
+ var allowAltQuotes bool
+ if op == operatorMatches {
+ allowAltQuotes = true
+ }
+
+ value, err := p.value(allowAltQuotes)
if err != nil {
if err == io.EOF {
return selector{}, io.ErrUnexpectedEOF
@@ -188,7 +192,7 @@
case tokenField:
return s, nil
case tokenQuoted:
- return p.unquote(pos, s)
+ return p.unquote(pos, s, false)
}
return "", p.mkerr(pos, "expected field or quoted")
@@ -213,21 +217,25 @@
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
}
-func (p *parser) value() (string, error) {
+func (p *parser) value(allowAltQuotes bool) (string, error) {
pos, tok, s := p.scanner.scan()
switch tok {
case tokenValue, tokenField:
return s, nil
case tokenQuoted:
- return p.unquote(pos, s)
+ return p.unquote(pos, s, allowAltQuotes)
}
return "", p.mkerr(pos, "expected value or quoted")
}
-func (p *parser) unquote(pos int, s string) (string, error) {
- uq, err := strconv.Unquote(s)
+func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
+ if !allowAlts && s[0] != '\'' && s[0] != '"' {
+ return "", p.mkerr(pos, "invalid quote encountered")
+ }
+
+ uq, err := unquote(s)
if err != nil {
return "", p.mkerr(pos, "unquoting failed: %v", err)
}
diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go
new file mode 100644
index 0000000..08698e1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/quote.go
@@ -0,0 +1,237 @@
+package filters
+
+import (
+ "unicode/utf8"
+
+ "github.com/pkg/errors"
+)
+
+// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
+// strconv package and modified to be able to handle quoting with `/` and `|`
+// as delimiters. The copyright is held by the Go authors.
+
+var errQuoteSyntax = errors.New("quote syntax error")
+
+// UnquoteChar decodes the first character or byte in the escaped string
+// or character literal represented by the string s.
+// It returns four values:
+//
+// 1) value, the decoded Unicode code point or byte value;
+// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+// 3) tail, the remainder of the string after the character; and
+// 4) an error that will be nil if the character is syntactically valid.
+//
+// The second argument, quote, specifies the type of literal being parsed
+// and therefore which escaped quote character is permitted.
+// If set to a single quote, it permits the sequence \' and disallows unescaped '.
+// If set to a double quote, it permits \" and disallows unescaped ".
+// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
+//
+// This is from Go strconv package, modified to support `|` and `/` as double
+// quotes for use with regular expressions.
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
+ err = errQuoteSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = errQuoteSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = errQuoteSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = errQuoteSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = errQuoteSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = errQuoteSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = errQuoteSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = errQuoteSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"', '|', '/':
+ if c != quote {
+ err = errQuoteSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = errQuoteSyntax
+ return
+ }
+ tail = s
+ return
+}
+
+// unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+//
+// This is modified from the standard library to support `|` and `/` as quote
+// characters for use with regular expressions.
+func unquote(s string) (string, error) {
+ n := len(s)
+ if n < 2 {
+ return "", errQuoteSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", errQuoteSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote == '`' {
+ if contains(s, '`') {
+ return "", errQuoteSyntax
+ }
+ if contains(s, '\r') {
+ // -1 because we know there is at least one \r to remove.
+ buf := make([]byte, 0, len(s)-1)
+ for i := 0; i < len(s); i++ {
+ if s[i] != '\r' {
+ buf = append(buf, s[i])
+ }
+ }
+ return string(buf), nil
+ }
+ return s, nil
+ }
+ if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
+ return "", errQuoteSyntax
+ }
+ if contains(s, '\n') {
+ return "", errQuoteSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) {
+ switch quote {
+ case '"', '/', '|': // pipe and slash are treated like double quote
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", errQuoteSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go
index 5a55e0a..3a8e723 100644
--- a/vendor/github.com/containerd/containerd/filters/scanner.go
+++ b/vendor/github.com/containerd/containerd/filters/scanner.go
@@ -87,7 +87,7 @@
return ch
}
-func (s *scanner) scan() (int, token, string) {
+func (s *scanner) scan() (nextp int, tk token, text string) {
var (
ch = s.next()
pos = s.pos
@@ -101,6 +101,7 @@
s.scanQuoted(ch)
return pos, tokenQuoted, s.input[pos:s.ppos]
case isSeparatorRune(ch):
+ s.value = false
return pos, tokenSeparator, s.input[pos:s.ppos]
case isOperatorRune(ch):
s.scanOperator()
@@ -241,7 +242,7 @@
func isQuoteRune(r rune) bool {
switch r {
- case '"': // maybe add single quoting?
+ case '/', '|', '"': // maybe add single quoting?
return true
}
diff --git a/vendor/github.com/containerd/containerd/fs/diff.go b/vendor/github.com/containerd/containerd/fs/diff.go
index 9073d0d..3a53f42 100644
--- a/vendor/github.com/containerd/containerd/fs/diff.go
+++ b/vendor/github.com/containerd/containerd/fs/diff.go
@@ -222,8 +222,10 @@
c1 = make(chan *currentPath)
c2 = make(chan *currentPath)
- f1, f2 *currentPath
- rmdir string
+ f1, f2 *currentPath
+ rmdir string
+ lastEmittedDir = string(filepath.Separator)
+ parents []os.FileInfo
)
g.Go(func() error {
defer close(c1)
@@ -258,7 +260,10 @@
continue
}
- var f os.FileInfo
+ var (
+ f os.FileInfo
+ emit = true
+ )
k, p := pathChange(f1, f2)
switch k {
case ChangeKindAdd:
@@ -294,13 +299,35 @@
f2 = nil
if same {
if !isLinked(f) {
- continue
+ emit = false
}
k = ChangeKindUnmodified
}
}
- if err := changeFn(k, p, f, nil); err != nil {
- return err
+ if emit {
+ emittedDir, emitParents := commonParents(lastEmittedDir, p, parents)
+ for _, pf := range emitParents {
+ p := filepath.Join(emittedDir, pf.Name())
+ if err := changeFn(ChangeKindUnmodified, p, pf, nil); err != nil {
+ return err
+ }
+ emittedDir = p
+ }
+
+ if err := changeFn(k, p, f, nil); err != nil {
+ return err
+ }
+
+ if f != nil && f.IsDir() {
+ lastEmittedDir = p
+ } else {
+ lastEmittedDir = emittedDir
+ }
+
+ parents = parents[:0]
+ } else if f.IsDir() {
+ lastEmittedDir, parents = commonParents(lastEmittedDir, p, parents)
+ parents = append(parents, f)
}
}
return nil
@@ -308,3 +335,47 @@
return g.Wait()
}
+
+func commonParents(base, updated string, dirs []os.FileInfo) (string, []os.FileInfo) {
+ if basePrefix := makePrefix(base); strings.HasPrefix(updated, basePrefix) {
+ var (
+ parents []os.FileInfo
+ last = base
+ )
+ for _, d := range dirs {
+ next := filepath.Join(last, d.Name())
+ if strings.HasPrefix(updated, makePrefix(last)) {
+ parents = append(parents, d)
+ last = next
+ } else {
+ break
+ }
+ }
+ return base, parents
+ }
+
+ baseS := strings.Split(base, string(filepath.Separator))
+ updatedS := strings.Split(updated, string(filepath.Separator))
+ commonS := []string{string(filepath.Separator)}
+
+ min := len(baseS)
+ if len(updatedS) < min {
+ min = len(updatedS)
+ }
+ for i := 0; i < min; i++ {
+ if baseS[i] == updatedS[i] {
+ commonS = append(commonS, baseS[i])
+ } else {
+ break
+ }
+ }
+
+ return filepath.Join(commonS...), []os.FileInfo{}
+}
+
+func makePrefix(d string) string {
+ if d == "" || d[len(d)-1] != filepath.Separator {
+ return d + string(filepath.Separator)
+ }
+ return d
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du.go b/vendor/github.com/containerd/containerd/fs/du.go
index 61f439d..26f5333 100644
--- a/vendor/github.com/containerd/containerd/fs/du.go
+++ b/vendor/github.com/containerd/containerd/fs/du.go
@@ -1,5 +1,7 @@
package fs
+import "context"
+
// Usage of disk information
type Usage struct {
Inodes int64
@@ -11,3 +13,10 @@
func DiskUsage(roots ...string) (Usage, error) {
return diskUsage(roots...)
}
+
+// DiffUsage counts the numbers of inodes and disk usage in the
+// diff between the 2 directories. The first path is intended
+// as the base directory and the second as the changed directory.
+func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
+ return diffUsage(ctx, a, b)
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_unix.go b/vendor/github.com/containerd/containerd/fs/du_unix.go
index d8654d3..6328e80 100644
--- a/vendor/github.com/containerd/containerd/fs/du_unix.go
+++ b/vendor/github.com/containerd/containerd/fs/du_unix.go
@@ -3,17 +3,19 @@
package fs
import (
+ "context"
"os"
"path/filepath"
"syscall"
)
+type inode struct {
+ // TODO(stevvooe): Can probably reduce memory usage by not tracking
+ // device, but we can leave this right for now.
+ dev, ino uint64
+}
+
func diskUsage(roots ...string) (Usage, error) {
- type inode struct {
- // TODO(stevvooe): Can probably reduce memory usage by not tracking
- // device, but we can leave this right for now.
- dev, ino uint64
- }
var (
size int64
@@ -45,3 +47,37 @@
Size: size,
}, nil
}
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+ var (
+ size int64
+ inodes = map[inode]struct{}{} // expensive!
+ )
+
+ if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if kind == ChangeKindAdd || kind == ChangeKindModify {
+ stat := fi.Sys().(*syscall.Stat_t)
+
+ inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
+ if _, ok := inodes[inoKey]; !ok {
+ inodes[inoKey] = struct{}{}
+ size += fi.Size()
+ }
+
+ return nil
+
+ }
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+
+ return Usage{
+ Inodes: int64(len(inodes)),
+ Size: size,
+ }, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_windows.go b/vendor/github.com/containerd/containerd/fs/du_windows.go
index 4a0363c..3f852fc 100644
--- a/vendor/github.com/containerd/containerd/fs/du_windows.go
+++ b/vendor/github.com/containerd/containerd/fs/du_windows.go
@@ -3,6 +3,7 @@
package fs
import (
+ "context"
"os"
"path/filepath"
)
@@ -31,3 +32,29 @@
Size: size,
}, nil
}
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+ var (
+ size int64
+ )
+
+ if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if kind == ChangeKindAdd || kind == ChangeKindModify {
+ size += fi.Size()
+
+ return nil
+
+ }
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+
+ return Usage{
+ Size: size,
+ }, nil
+}
diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go
index 202fc42..6e9f4bd 100644
--- a/vendor/github.com/containerd/containerd/image.go
+++ b/vendor/github.com/containerd/containerd/image.go
@@ -147,7 +147,7 @@
manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
if err != nil {
- return nil, errors.Wrap(err, "")
+ return nil, err
}
diffIDs, err := i.i.RootFS(ctx, cs, platform)
diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go
index e0d6990..7b4215f 100644
--- a/vendor/github.com/containerd/containerd/images/image.go
+++ b/vendor/github.com/containerd/containerd/images/image.go
@@ -187,13 +187,13 @@
return descs, nil
}
- return nil, errors.Wrap(errdefs.ErrNotFound, "could not resolve manifest")
+ return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
}), image); err != nil {
return ocispec.Manifest{}, err
}
if m == nil {
- return ocispec.Manifest{}, errors.Wrap(errdefs.ErrNotFound, "manifest not found")
+ return ocispec.Manifest{}, errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
}
return *m, nil
@@ -257,7 +257,7 @@
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
}
- return false, nil, nil, nil, errors.Wrap(err, "image check failed")
+ return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
}
// TODO(stevvooe): It is possible that referenced conponents could have
@@ -272,7 +272,7 @@
missing = append(missing, desc)
continue
} else {
- return false, nil, nil, nil, err
+ return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
}
}
ra.Close()
diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go
new file mode 100644
index 0000000..f8cf742
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/images/importexport.go
@@ -0,0 +1,21 @@
+package images
+
+import (
+ "context"
+ "io"
+
+ "github.com/containerd/containerd/content"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Importer is the interface for image importer.
+type Importer interface {
+ // Import imports an image from a tar stream.
+ Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
+}
+
+// Exporter is the interface for image exporter.
+type Exporter interface {
+ // Export exports an image to a tar stream.
+ Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error
+}
diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go
deleted file mode 100644
index 9f8f9af..0000000
--- a/vendor/github.com/containerd/containerd/import.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package containerd
-
-import (
- "archive/tar"
- "context"
- "encoding/json"
- "io"
- "io/ioutil"
- "strings"
-
- "github.com/containerd/containerd/content"
- "github.com/containerd/containerd/errdefs"
- "github.com/containerd/containerd/images"
- "github.com/containerd/containerd/reference"
- digest "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-func resolveOCIIndex(idx ocispec.Index, refObject string) (*ocispec.Descriptor, error) {
- tag, dgst := reference.SplitObject(refObject)
- if tag == "" && dgst == "" {
- return nil, errors.Errorf("unexpected object: %q", refObject)
- }
- for _, m := range idx.Manifests {
- if m.Digest == dgst {
- return &m, nil
- }
- annot, ok := m.Annotations[ocispec.AnnotationRefName]
- if ok && annot == tag && tag != "" {
- return &m, nil
- }
- }
- return nil, errors.Errorf("not found: %q", refObject)
-}
-
-func (c *Client) importFromOCITar(ctx context.Context, ref string, reader io.Reader, iopts importOpts) (Image, error) {
- tr := tar.NewReader(reader)
- store := c.ContentStore()
- var desc *ocispec.Descriptor
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return nil, err
- }
- if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
- continue
- }
- if hdr.Name == "index.json" {
- desc, err = onUntarIndexJSON(tr, iopts.refObject)
- if err != nil {
- return nil, err
- }
- continue
- }
- if strings.HasPrefix(hdr.Name, "blobs/") {
- if err := onUntarBlob(ctx, tr, store, hdr.Name, hdr.Size); err != nil {
- return nil, err
- }
- }
- }
- if desc == nil {
- return nil, errors.Errorf("no descriptor found for reference object %q", iopts.refObject)
- }
- imgrec := images.Image{
- Name: ref,
- Target: *desc,
- Labels: iopts.labels,
- }
- is := c.ImageService()
- if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
- if !errdefs.IsNotFound(err) {
- return nil, err
- }
-
- created, err := is.Create(ctx, imgrec)
- if err != nil {
- return nil, err
- }
-
- imgrec = created
- } else {
- imgrec = updated
- }
-
- img := &image{
- client: c,
- i: imgrec,
- }
- return img, nil
-}
-
-func onUntarIndexJSON(r io.Reader, refObject string) (*ocispec.Descriptor, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
- var idx ocispec.Index
- if err := json.Unmarshal(b, &idx); err != nil {
- return nil, err
- }
- return resolveOCIIndex(idx, refObject)
-}
-
-func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
- // name is like "blobs/sha256/deadbeef"
- split := strings.Split(name, "/")
- if len(split) != 3 {
- return errors.Errorf("unexpected name: %q", name)
- }
- algo := digest.Algorithm(split[1])
- if !algo.Available() {
- return errors.Errorf("unsupported algorithm: %s", algo)
- }
- dgst := digest.NewDigestFromHex(algo.String(), split[2])
- return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
-}
diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go
index 136f2cc..629d7f5 100644
--- a/vendor/github.com/containerd/containerd/linux/bundle.go
+++ b/vendor/github.com/containerd/containerd/linux/bundle.go
@@ -75,10 +75,10 @@
type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt)
// ShimRemote is a ShimOpt for connecting and starting a remote shim
-func ShimRemote(shimBinary, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) ShimOpt {
+func ShimRemote(shimBinary, daemonAddress, cgroup string, debug bool, exitHandler func()) ShimOpt {
return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
return b.shimConfig(ns, ropts),
- client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, nonewns, debug, exitHandler)
+ client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, debug, exitHandler)
}
}
diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go
index 1ffaca1..82ed4f4 100644
--- a/vendor/github.com/containerd/containerd/linux/runtime.go
+++ b/vendor/github.com/containerd/containerd/linux/runtime.go
@@ -22,6 +22,7 @@
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/metadata"
+ "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
@@ -78,17 +79,6 @@
NoShim bool `toml:"no_shim"`
// Debug enable debug on the shim
ShimDebug bool `toml:"shim_debug"`
- // ShimNoMountNS prevents the runtime from putting shims into their own mount namespace.
- //
- // Putting the shim in its own mount namespace ensure that any mounts made
- // by it in order to get the task rootfs ready will be undone regardless
- // on how the shim dies.
- //
- // NOTE: This should only be used in kernel older than 3.18 to avoid shims
- // from causing a DoS in their parent namespace due to having a copy of
- // mounts previously there which would prevent unlink, rename and remove
- // operations on those mountpoints.
- ShimNoMountNS bool `toml:"shim_no_newns"`
}
// New returns a configured runtime
@@ -226,8 +216,7 @@
}).Warn("failed to clen up after killed shim")
}
}
- shimopt = ShimRemote(r.config.Shim, r.address, cgroup,
- r.config.ShimNoMountNS, r.config.ShimDebug, exitHandler)
+ shimopt = ShimRemote(r.config.Shim, r.address, cgroup, r.config.ShimDebug, exitHandler)
}
s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts)
@@ -486,7 +475,7 @@
}); err != nil {
log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
}
- if err := unix.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
+ if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"path": bundle.path,
"id": id,
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client.go b/vendor/github.com/containerd/containerd/linux/shim/client/client.go
index db59e2c..1fb949e 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client.go
@@ -34,7 +34,7 @@
type Opt func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error)
// WithStart executes a new shim process
-func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) Opt {
+func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHandler func()) Opt {
return func(ctx context.Context, config shim.Config) (_ shimapi.ShimService, _ io.Closer, err error) {
socket, err := newSocket(address)
if err != nil {
@@ -47,7 +47,7 @@
}
defer f.Close()
- cmd := newCommand(binary, daemonAddress, nonewns, debug, config, f)
+ cmd := newCommand(binary, daemonAddress, debug, config, f)
ec, err := reaper.Default.Start(cmd)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to start shim")
@@ -87,7 +87,7 @@
}
}
-func newCommand(binary, daemonAddress string, nonewns, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
+func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
selfExe, err := os.Executable()
if err != nil {
panic(err)
@@ -117,7 +117,7 @@
// make sure the shim can be re-parented to system init
// and is cloned in a new mount namespace because the overlay/filesystems
// will be mounted by the shim
- cmd.SysProcAttr = getSysProcAttr(nonewns)
+ cmd.SysProcAttr = getSysProcAttr()
cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
if debug {
cmd.Stdout = os.Stdout
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
index 03ebba0..3125541 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
@@ -10,14 +10,10 @@
"github.com/pkg/errors"
)
-func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
- attr := syscall.SysProcAttr{
+func getSysProcAttr() *syscall.SysProcAttr {
+ return &syscall.SysProcAttr{
Setpgid: true,
}
- if !nonewns {
- attr.Cloneflags = syscall.CLONE_NEWNS
- }
- return &attr
}
func setCgroup(cgroupPath string, cmd *exec.Cmd) error {
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
index b34cf4d..0a24ce4 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
@@ -7,7 +7,7 @@
"syscall"
)
-func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
+func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setpgid: true,
}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/local.go b/vendor/github.com/containerd/containerd/linux/shim/local.go
index 4264977..6e21926 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/local.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/local.go
@@ -7,8 +7,8 @@
"path/filepath"
shimapi "github.com/containerd/containerd/linux/shim/v1"
+ "github.com/containerd/containerd/mount"
ptypes "github.com/gogo/protobuf/types"
- "golang.org/x/sys/unix"
)
// NewLocal returns a shim client implementation for issue commands to a shim
@@ -32,7 +32,7 @@
func (c *local) Delete(ctx context.Context, in *ptypes.Empty) (*shimapi.DeleteResponse, error) {
// make sure we unmount the containers rootfs for this local
- if err := unix.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
+ if err := mount.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
return nil, err
}
return c.s.Delete(ctx, in)
diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go
index 4cca5f6..32f339a 100644
--- a/vendor/github.com/containerd/containerd/metadata/containers.go
+++ b/vendor/github.com/containerd/containerd/metadata/containers.go
@@ -37,12 +37,12 @@
bkt := getContainerBucket(s.tx, namespace, id)
if bkt == nil {
- return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "bucket name %q:%q", namespace, id)
+ return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
}
container := containers.Container{ID: id}
if err := readContainer(&container, bkt); err != nil {
- return containers.Container{}, errors.Wrapf(err, "failed to read container %v", id)
+ return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id)
}
return container, nil
@@ -61,7 +61,7 @@
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
- return nil, nil
+ return nil, nil // empty store
}
var m []containers.Container
@@ -73,7 +73,7 @@
container := containers.Container{ID: string(k)}
if err := readContainer(&container, cbkt); err != nil {
- return errors.Wrap(err, "failed to read container")
+ return errors.Wrapf(err, "failed to read container %q", string(k))
}
if filter.Match(adaptContainer(container)) {
@@ -113,7 +113,7 @@
container.CreatedAt = time.Now().UTC()
container.UpdatedAt = container.CreatedAt
if err := writeContainer(cbkt, &container); err != nil {
- return containers.Container{}, errors.Wrap(err, "failed to write container")
+ return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
}
return container, nil
@@ -131,7 +131,7 @@
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
- return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
+ return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
}
cbkt := bkt.Bucket([]byte(container.ID))
@@ -141,7 +141,7 @@
var updated containers.Container
if err := readContainer(&updated, cbkt); err != nil {
- return updated, errors.Wrapf(err, "failed to read container from bucket")
+ return updated, errors.Wrapf(err, "failed to read container %q", container.ID)
}
createdat := updated.CreatedAt
updated.ID = container.ID
@@ -211,7 +211,7 @@
updated.CreatedAt = createdat
updated.UpdatedAt = time.Now().UTC()
if err := writeContainer(cbkt, &updated); err != nil {
- return containers.Container{}, errors.Wrap(err, "failed to write container")
+ return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
}
return updated, nil
@@ -225,7 +225,7 @@
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
- return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %v, bucket not present", id)
+ return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
}
if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound {
@@ -236,7 +236,7 @@
func validateContainer(container *containers.Container) error {
if err := identifiers.Validate(container.ID); err != nil {
- return errors.Wrapf(err, "container.ID validation error")
+ return errors.Wrap(err, "container.ID")
}
for k := range container.Extensions {
diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go
index 1744321..8be62a9 100644
--- a/vendor/github.com/containerd/containerd/metadata/db.go
+++ b/vendor/github.com/containerd/containerd/metadata/db.go
@@ -138,7 +138,7 @@
if err := m.migrate(tx); err != nil {
return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version)
}
- log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("database migration to %s.%d finished", m.schema, m.version)
+ log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version)
}
}
@@ -269,7 +269,7 @@
stats.SnapshotD = map[string]time.Duration{}
wg.Add(len(m.dirtySS))
for snapshotterName := range m.dirtySS {
- log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup")
+ log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup")
go func(snapshotterName string) {
st1 := time.Now()
m.cleanupSnapshotter(snapshotterName)
@@ -286,7 +286,7 @@
if m.dirtyCS {
wg.Add(1)
- log.G(ctx).Debug("scheduling content cleanup")
+ log.G(ctx).Debug("schedule content cleanup")
go func() {
ct1 := time.Now()
m.cleanupContent()
diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go
index 7fe6f7d..186f350 100644
--- a/vendor/github.com/containerd/containerd/metadata/gc.go
+++ b/vendor/github.com/containerd/containerd/metadata/gc.go
@@ -301,7 +301,7 @@
cbkt = cbkt.Bucket(bucketKeyObjectBlob)
}
if cbkt != nil {
- log.G(ctx).WithField("key", node.Key).Debug("delete content")
+ log.G(ctx).WithField("key", node.Key).Debug("remove content")
return cbkt.DeleteBucket([]byte(node.Key))
}
case ResourceSnapshot:
@@ -313,7 +313,7 @@
}
ssbkt := sbkt.Bucket([]byte(parts[0]))
if ssbkt != nil {
- log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("delete snapshot")
+ log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot")
return ssbkt.DeleteBucket([]byte(parts[1]))
}
}
diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go
index 3e501c5..6c34e49 100644
--- a/vendor/github.com/containerd/containerd/metadata/snapshot.go
+++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go
@@ -359,7 +359,8 @@
return update(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
- return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+ return errors.Wrapf(errdefs.ErrNotFound,
+ "can not find snapshotter %q", s.name)
}
bbkt, err := bkt.CreateBucket([]byte(name))
@@ -722,7 +723,7 @@
if !errdefs.IsFailedPrecondition(err) {
return err
}
- logger.WithError(err).WithField("key", node.info.Name).Warnf("snapshot removal failed")
+ logger.WithError(err).WithField("key", node.info.Name).Warnf("failed to remove snapshot")
} else {
logger.WithField("key", node.info.Name).Debug("removed snapshot")
}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go
index 474792d..de2e8bb 100644
--- a/vendor/github.com/containerd/containerd/mount/mount_linux.go
+++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go
@@ -2,7 +2,9 @@
import (
"strings"
+ "time"
+ "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -42,8 +44,27 @@
}
// Unmount the provided mount path with the flags
-func Unmount(mount string, flags int) error {
- return unix.Unmount(mount, flags)
+func Unmount(target string, flags int) error {
+ if err := unmount(target, flags); err != nil && err != unix.EINVAL {
+ return err
+ }
+ return nil
+}
+
+func unmount(target string, flags int) error {
+ for i := 0; i < 50; i++ {
+ if err := unix.Unmount(target, flags); err != nil {
+ switch err {
+ case unix.EBUSY:
+ time.Sleep(50 * time.Millisecond)
+ continue
+ default:
+ return err
+ }
+ }
+ return nil
+ }
+ return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target)
}
// UnmountAll repeatedly unmounts the given mount point until there
@@ -51,7 +72,7 @@
// useful for undoing a stack of mounts on the same mount point.
func UnmountAll(mount string, flags int) error {
for {
- if err := Unmount(mount, flags); err != nil {
+ if err := unmount(mount, flags); err != nil {
// EINVAL is returned if the target is not a
// mount point, indicating that we are
// done. It can also indicate a few other
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
index b17ca32..865aff2 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
@@ -12,12 +12,11 @@
"strconv"
"strings"
- "golang.org/x/sys/unix"
-
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/fs"
"github.com/containerd/containerd/images"
+ "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/user"
@@ -101,7 +100,7 @@
parts := strings.Split(config.User, ":")
switch len(parts) {
case 1:
- v, err := strconv.ParseUint(parts[0], 0, 10)
+ v, err := strconv.Atoi(parts[0])
if err != nil {
// if we cannot parse as a uint they try to see if it is a username
if err := WithUsername(config.User)(ctx, client, c, s); err != nil {
@@ -113,13 +112,13 @@
return err
}
case 2:
- v, err := strconv.ParseUint(parts[0], 0, 10)
+ v, err := strconv.Atoi(parts[0])
if err != nil {
- return err
+ return errors.Wrapf(err, "parse uid %s", parts[0])
}
uid := uint32(v)
- if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
- return err
+ if v, err = strconv.Atoi(parts[1]); err != nil {
+ return errors.Wrapf(err, "parse gid %s", parts[1])
}
gid := uint32(v)
s.Process.User.UID, s.Process.User.GID = uid, gid
@@ -260,7 +259,7 @@
// or uid is not found in /etc/passwd, it sets gid to be the same with
// uid, and not returns error.
func WithUserID(uid uint32) SpecOpts {
- return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+ return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
if c.Snapshotter == "" {
return errors.Errorf("no snapshotter set for container")
}
@@ -276,13 +275,19 @@
if err != nil {
return err
}
- defer os.RemoveAll(root)
+ defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
- defer unix.Unmount(root, 0)
+ defer func() {
+ if uerr := mount.Unmount(root, 0); uerr != nil {
+ if err == nil {
+ err = uerr
+ }
+ }
+ }()
ppath, err := fs.RootPath(root, "/etc/passwd")
if err != nil {
return err
@@ -317,7 +322,7 @@
// does not exist, or the username is not found in /etc/passwd,
// it returns error.
func WithUsername(username string) SpecOpts {
- return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+ return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
if c.Snapshotter == "" {
return errors.Errorf("no snapshotter set for container")
}
@@ -333,13 +338,19 @@
if err != nil {
return err
}
- defer os.RemoveAll(root)
+ defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
- defer unix.Unmount(root, 0)
+ defer func() {
+ if uerr := mount.Unmount(root, 0); uerr != nil {
+ if err == nil {
+ err = uerr
+ }
+ }
+ }()
ppath, err := fs.RootPath(root, "/etc/passwd")
if err != nil {
return err
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
index 3605f8e..796ad55 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
@@ -60,3 +60,11 @@
return nil
}
}
+
+// WithUsername sets the username on the process
+func WithUsername(username string) SpecOpts {
+ return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+ s.Process.User.Username = username
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go
index e583391..ad4cd9f 100644
--- a/vendor/github.com/containerd/containerd/remotes/handlers.go
+++ b/vendor/github.com/containerd/containerd/remotes/handlers.go
@@ -114,6 +114,7 @@
func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) {
var childrenF func(r io.Reader) ([]ocispec.Descriptor, error)
+ // TODO(AkihiroSuda): use images/oci.GetChildrenDescriptors?
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {
diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go
index e6d2be6..4051295 100644
--- a/vendor/github.com/containerd/containerd/rootfs/apply.go
+++ b/vendor/github.com/containerd/containerd/rootfs/apply.go
@@ -55,10 +55,10 @@
_, err := sn.Stat(ctx, chainID.String())
if err == nil {
- log.G(ctx).Debugf("Extraction not needed, layer snapshot exists")
+ log.G(ctx).Debugf("Extraction not needed, layer snapshot %s exists", chainID)
return false, nil
} else if !errdefs.IsNotFound(err) {
- return false, errors.Wrap(err, "failed to stat snapshot")
+ return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
}
key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
@@ -67,7 +67,7 @@
mounts, err := sn.Prepare(ctx, key, parent.String(), opts...)
if err != nil {
//TODO: If is snapshot exists error, retry
- return false, errors.Wrap(err, "failed to prepare extraction layer")
+ return false, errors.Wrapf(err, "failed to prepare extraction snapshot %q", key)
}
defer func() {
if err != nil {
@@ -89,7 +89,7 @@
if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
- return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
+ return false, errors.Wrapf(err, "failed to commit snapshot %s", key)
}
// Destination already exists, cleanup key and return without error
diff --git a/vendor/github.com/containerd/containerd/runtime/task_list.go b/vendor/github.com/containerd/containerd/runtime/task_list.go
index 7c52265..05f34c3 100644
--- a/vendor/github.com/containerd/containerd/runtime/task_list.go
+++ b/vendor/github.com/containerd/containerd/runtime/task_list.go
@@ -49,6 +49,8 @@
// GetAll tasks under a namespace
func (l *TaskList) GetAll(ctx context.Context) ([]Task, error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go
index 2cbcbaf..8d25683 100644
--- a/vendor/github.com/containerd/containerd/task.go
+++ b/vendor/github.com/containerd/containerd/task.go
@@ -277,7 +277,7 @@
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
}
-func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (Process, error) {
+func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (_ Process, err error) {
if id == "" {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
}
@@ -285,6 +285,12 @@
if err != nil {
return nil, err
}
+ defer func() {
+ if err != nil && i != nil {
+ i.Cancel()
+ i.Close()
+ }
+ }()
any, err := typeurl.MarshalAny(spec)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf
index 382aaa6..fea47e4 100644
--- a/vendor/github.com/containerd/containerd/vendor.conf
+++ b/vendor/github.com/containerd/containerd/vendor.conf
@@ -41,4 +41,4 @@
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
github.com/dmcgowan/go-tar go1.10
-github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1
+github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
diff --git a/vendor/github.com/docker/libnetwork/agent.go b/vendor/github.com/docker/libnetwork/agent.go
index f120065..085b8c4 100644
--- a/vendor/github.com/docker/libnetwork/agent.go
+++ b/vendor/github.com/docker/libnetwork/agent.go
@@ -293,11 +293,13 @@
c.Config().Daemon.NetworkControlPlaneMTU, netDBConf.PacketBufferSize)
}
nDB, err := networkdb.New(netDBConf)
-
if err != nil {
return err
}
+ // Register the diagnose handlers
+ c.DiagnoseServer.RegisterHandler(nDB, networkdb.NetDbPaths2Func)
+
var cancelList []func()
ch, cancel := nDB.Watch(libnetworkEPTable, "", "")
cancelList = append(cancelList, cancel)
@@ -436,7 +438,7 @@
for eid, value := range entries {
var epRec EndpointRecord
nid := n.ID()
- if err := proto.Unmarshal(value.([]byte), &epRec); err != nil {
+ if err := proto.Unmarshal(value.Value, &epRec); err != nil {
logrus.Errorf("Unmarshal of libnetworkEPTable failed for endpoint %s in network %s, %v", eid, nid, err)
continue
}
@@ -461,7 +463,7 @@
}
entries := agent.networkDB.GetTableByNetwork(table.name, n.id)
for key, value := range entries {
- epID, info := d.DecodeTableEntry(table.name, key, value.([]byte))
+ epID, info := d.DecodeTableEntry(table.name, key, value.Value)
if ep, ok := eps[epID]; !ok {
logrus.Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID)
} else {
diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go
index 236095c..e938948 100644
--- a/vendor/github.com/docker/libnetwork/controller.go
+++ b/vendor/github.com/docker/libnetwork/controller.go
@@ -60,6 +60,7 @@
"github.com/docker/libnetwork/cluster"
"github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/datastore"
+ "github.com/docker/libnetwork/diagnose"
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/driverapi"
"github.com/docker/libnetwork/drvregistry"
@@ -133,6 +134,13 @@
// SetKeys configures the encryption key for gossip and overlay data path
SetKeys(keys []*types.EncryptionKey) error
+
+ // StartDiagnose start the network diagnose mode
+ StartDiagnose(port int)
+ // StopDiagnose start the network diagnose mode
+ StopDiagnose()
+ // IsDiagnoseEnabled returns true if the diagnose is enabled
+ IsDiagnoseEnabled() bool
}
// NetworkWalker is a client provided function which will be used to walk the Networks.
@@ -167,6 +175,7 @@
agentStopDone chan struct{}
keys []*types.EncryptionKey
clusterConfigAvailable bool
+ DiagnoseServer *diagnose.Server
sync.Mutex
}
@@ -185,7 +194,9 @@
serviceBindings: make(map[serviceKey]*service),
agentInitDone: make(chan struct{}),
networkLocker: locker.New(),
+ DiagnoseServer: diagnose.New(),
}
+ c.DiagnoseServer.Init()
if err := c.initStores(); err != nil {
return nil, err
@@ -837,11 +848,34 @@
if err = c.updateToStore(network); err != nil {
return nil, err
}
+ defer func() {
+ if err != nil {
+ if e := c.deleteFromStore(network); e != nil {
+ logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e)
+ }
+ }
+ }()
+
if network.configOnly {
return network, nil
}
joinCluster(network)
+ defer func() {
+ if err != nil {
+ network.cancelDriverWatches()
+ if e := network.leaveCluster(); e != nil {
+ logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e)
+ }
+ }
+ }()
+
+ if len(network.loadBalancerIP) != 0 {
+ if err = network.createLoadBalancerSandbox(); err != nil {
+ return nil, err
+ }
+ }
+
if !c.isDistributedControl() {
c.Lock()
arrangeIngressFilterRule()
@@ -1268,3 +1302,28 @@
c.stopExternalKeyListener()
osl.GC()
}
+
+// StartDiagnose start the network diagnose mode
+func (c *controller) StartDiagnose(port int) {
+ c.Lock()
+ if !c.DiagnoseServer.IsDebugEnable() {
+ c.DiagnoseServer.EnableDebug("127.0.0.1", port)
+ }
+ c.Unlock()
+}
+
+// StopDiagnose start the network diagnose mode
+func (c *controller) StopDiagnose() {
+ c.Lock()
+ if c.DiagnoseServer.IsDebugEnable() {
+ c.DiagnoseServer.DisableDebug()
+ }
+ c.Unlock()
+}
+
+// IsDiagnoseEnabled returns true if the diagnose is enabled
+func (c *controller) IsDiagnoseEnabled() bool {
+ c.Lock()
+ defer c.Unlock()
+ return c.DiagnoseServer.IsDebugEnable()
+}
diff --git a/vendor/github.com/docker/libnetwork/diagnose/diagnose.go b/vendor/github.com/docker/libnetwork/diagnose/diagnose.go
deleted file mode 100644
index 9682fff..0000000
--- a/vendor/github.com/docker/libnetwork/diagnose/diagnose.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package diagnose
-
-import (
- "fmt"
- "net"
- "net/http"
- "sync"
-
- "github.com/sirupsen/logrus"
-)
-
-// HTTPHandlerFunc TODO
-type HTTPHandlerFunc func(interface{}, http.ResponseWriter, *http.Request)
-
-type httpHandlerCustom struct {
- ctx interface{}
- F func(interface{}, http.ResponseWriter, *http.Request)
-}
-
-// ServeHTTP TODO
-func (h httpHandlerCustom) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- h.F(h.ctx, w, r)
-}
-
-var diagPaths2Func = map[string]HTTPHandlerFunc{
- "/": notImplemented,
- "/help": help,
- "/ready": ready,
-}
-
-// Server when the debug is enabled exposes a
-// This data structure is protected by the Agent mutex so does not require and additional mutex here
-type Server struct {
- sk net.Listener
- port int
- mux *http.ServeMux
- registeredHanders []string
- sync.Mutex
-}
-
-// Init TODO
-func (n *Server) Init() {
- n.mux = http.NewServeMux()
-
- // Register local handlers
- n.RegisterHandler(n, diagPaths2Func)
-}
-
-// RegisterHandler TODO
-func (n *Server) RegisterHandler(ctx interface{}, hdlrs map[string]HTTPHandlerFunc) {
- n.Lock()
- defer n.Unlock()
- for path, fun := range hdlrs {
- n.mux.Handle(path, httpHandlerCustom{ctx, fun})
- n.registeredHanders = append(n.registeredHanders, path)
- }
-}
-
-// EnableDebug opens a TCP socket to debug the passed network DB
-func (n *Server) EnableDebug(ip string, port int) {
- n.Lock()
- defer n.Unlock()
-
- n.port = port
- logrus.SetLevel(logrus.DebugLevel)
-
- if n.sk != nil {
- logrus.Infof("The server is already up and running")
- return
- }
-
- logrus.Infof("Starting the server listening on %d for commands", port)
-
- // // Create the socket
- // var err error
- // n.sk, err = net.Listen("tcp", listeningAddr)
- // if err != nil {
- // log.Fatal(err)
- // }
- //
- // go func() {
- // http.Serve(n.sk, n.mux)
- // }()
- http.ListenAndServe(fmt.Sprintf(":%d", port), n.mux)
-}
-
-// DisableDebug stop the dubug and closes the tcp socket
-func (n *Server) DisableDebug() {
- n.Lock()
- defer n.Unlock()
- n.sk.Close()
- n.sk = nil
-}
-
-// IsDebugEnable returns true when the debug is enabled
-func (n *Server) IsDebugEnable() bool {
- n.Lock()
- defer n.Unlock()
- return n.sk != nil
-}
-
-func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "URL path: %s no method implemented check /help\n", r.URL.Path)
-}
-
-func help(ctx interface{}, w http.ResponseWriter, r *http.Request) {
- n, ok := ctx.(*Server)
- if ok {
- for _, path := range n.registeredHanders {
- fmt.Fprintf(w, "%s\n", path)
- }
- }
-}
-
-func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) {
- fmt.Fprintf(w, "OK\n")
-}
-
-// DebugHTTPForm TODO
-func DebugHTTPForm(r *http.Request) {
- r.ParseForm()
- for k, v := range r.Form {
- logrus.Debugf("Form[%q] = %q\n", k, v)
- }
-}
-
-// HTTPReplyError TODO
-func HTTPReplyError(w http.ResponseWriter, message, usage string) {
- fmt.Fprintf(w, "%s\n", message)
- if usage != "" {
- fmt.Fprintf(w, "Usage: %s\n", usage)
- }
-}
diff --git a/vendor/github.com/docker/libnetwork/diagnose/server.go b/vendor/github.com/docker/libnetwork/diagnose/server.go
new file mode 100644
index 0000000..2330b65
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/diagnose/server.go
@@ -0,0 +1,227 @@
+package diagnose
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "sync"
+ "sync/atomic"
+
+ stackdump "github.com/docker/docker/pkg/signal"
+ "github.com/docker/libnetwork/common"
+ "github.com/sirupsen/logrus"
+)
+
+// HTTPHandlerFunc TODO
+type HTTPHandlerFunc func(interface{}, http.ResponseWriter, *http.Request)
+
+type httpHandlerCustom struct {
+ ctx interface{}
+ F func(interface{}, http.ResponseWriter, *http.Request)
+}
+
+// ServeHTTP TODO
+func (h httpHandlerCustom) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ h.F(h.ctx, w, r)
+}
+
+var diagPaths2Func = map[string]HTTPHandlerFunc{
+ "/": notImplemented,
+ "/help": help,
+ "/ready": ready,
+ "/stackdump": stackTrace,
+}
+
+// Server when the debug is enabled exposes a
+// This data structure is protected by the Agent mutex so does not require and additional mutex here
+type Server struct {
+ enable int32
+ srv *http.Server
+ port int
+ mux *http.ServeMux
+ registeredHanders map[string]bool
+ sync.Mutex
+}
+
+// New creates a new diagnose server
+func New() *Server {
+ return &Server{
+ registeredHanders: make(map[string]bool),
+ }
+}
+
+// Init initialize the mux for the http handling and register the base hooks
+func (s *Server) Init() {
+ s.mux = http.NewServeMux()
+
+ // Register local handlers
+ s.RegisterHandler(s, diagPaths2Func)
+}
+
+// RegisterHandler allows to register new handlers to the mux and to a specific path
+func (s *Server) RegisterHandler(ctx interface{}, hdlrs map[string]HTTPHandlerFunc) {
+ s.Lock()
+ defer s.Unlock()
+ for path, fun := range hdlrs {
+ if _, ok := s.registeredHanders[path]; ok {
+ continue
+ }
+ s.mux.Handle(path, httpHandlerCustom{ctx, fun})
+ s.registeredHanders[path] = true
+ }
+}
+
+// ServeHTTP this is the method called bu the ListenAndServe, and is needed to allow us to
+// use our custom mux
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ s.mux.ServeHTTP(w, r)
+}
+
+// EnableDebug opens a TCP socket to debug the passed network DB
+func (s *Server) EnableDebug(ip string, port int) {
+ s.Lock()
+ defer s.Unlock()
+
+ s.port = port
+
+ if s.enable == 1 {
+ logrus.Info("The server is already up and running")
+ return
+ }
+
+ logrus.Infof("Starting the diagnose server listening on %d for commands", port)
+ srv := &http.Server{Addr: fmt.Sprintf("%s:%d", ip, port), Handler: s}
+ s.srv = srv
+ s.enable = 1
+ go func(n *Server) {
+ // Ingore ErrServerClosed that is returned on the Shutdown call
+ if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
+ logrus.Errorf("ListenAndServe error: %s", err)
+ atomic.SwapInt32(&n.enable, 0)
+ }
+ }(s)
+}
+
+// DisableDebug stop the dubug and closes the tcp socket
+func (s *Server) DisableDebug() {
+ s.Lock()
+ defer s.Unlock()
+
+ s.srv.Shutdown(context.Background())
+ s.srv = nil
+ s.enable = 0
+ logrus.Info("Disabling the diagnose server")
+}
+
+// IsDebugEnable returns true when the debug is enabled
+func (s *Server) IsDebugEnable() bool {
+ s.Lock()
+ defer s.Unlock()
+ return s.enable == 1
+}
+
+func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ _, json := ParseHTTPFormOptions(r)
+ rsp := WrongCommand("not implemented", fmt.Sprintf("URL path: %s no method implemented check /help\n", r.URL.Path))
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("command not implemented done")
+
+ HTTPReply(w, rsp, json)
+}
+
+func help(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ _, json := ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("help done")
+
+ n, ok := ctx.(*Server)
+ var result string
+ if ok {
+ for path := range n.registeredHanders {
+ result += fmt.Sprintf("%s\n", path)
+ }
+ HTTPReply(w, CommandSucceed(&StringCmd{Info: result}), json)
+ }
+}
+
+func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ _, json := ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("ready done")
+ HTTPReply(w, CommandSucceed(&StringCmd{Info: "OK"}), json)
+}
+
+func stackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ _, json := ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("stack trace")
+
+ path, err := stackdump.DumpStacks("/tmp/")
+ if err != nil {
+ log.WithError(err).Error("failed to write goroutines dump")
+ HTTPReply(w, FailCommand(err), json)
+ } else {
+ log.Info("stack trace done")
+ HTTPReply(w, CommandSucceed(&StringCmd{Info: fmt.Sprintf("goroutine stacks written to %s", path)}), json)
+ }
+}
+
+// DebugHTTPForm helper to print the form url parameters
+func DebugHTTPForm(r *http.Request) {
+ for k, v := range r.Form {
+ logrus.Debugf("Form[%q] = %q\n", k, v)
+ }
+}
+
+// JSONOutput contains details on JSON output printing
+type JSONOutput struct {
+ enable bool
+ prettyPrint bool
+}
+
+// ParseHTTPFormOptions easily parse the JSON printing options
+func ParseHTTPFormOptions(r *http.Request) (bool, *JSONOutput) {
+ _, unsafe := r.Form["unsafe"]
+ v, json := r.Form["json"]
+ var pretty bool
+ if len(v) > 0 {
+ pretty = v[0] == "pretty"
+ }
+ return unsafe, &JSONOutput{enable: json, prettyPrint: pretty}
+}
+
+// HTTPReply helper function that takes care of sending the message out
+func HTTPReply(w http.ResponseWriter, r *HTTPResult, j *JSONOutput) (int, error) {
+ var response []byte
+ if j.enable {
+ w.Header().Set("Content-Type", "application/json")
+ var err error
+ if j.prettyPrint {
+ response, err = json.MarshalIndent(r, "", " ")
+ if err != nil {
+ response, _ = json.MarshalIndent(FailCommand(err), "", " ")
+ }
+ } else {
+ response, err = json.Marshal(r)
+ if err != nil {
+ response, _ = json.Marshal(FailCommand(err))
+ }
+ }
+ } else {
+ response = []byte(r.String())
+ }
+ return fmt.Fprint(w, string(response))
+}
diff --git a/vendor/github.com/docker/libnetwork/diagnose/types.go b/vendor/github.com/docker/libnetwork/diagnose/types.go
new file mode 100644
index 0000000..982c54a
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/diagnose/types.go
@@ -0,0 +1,122 @@
+package diagnose
+
+import "fmt"
+
+// StringInterface interface that has to be implemented by messages
+type StringInterface interface {
+ String() string
+}
+
+// CommandSucceed creates a success message
+func CommandSucceed(result StringInterface) *HTTPResult {
+ return &HTTPResult{
+ Message: "OK",
+ Details: result,
+ }
+}
+
+// FailCommand creates a failure message with error
+func FailCommand(err error) *HTTPResult {
+ return &HTTPResult{
+ Message: "FAIL",
+ Details: &ErrorCmd{Error: err.Error()},
+ }
+}
+
+// WrongCommand creates a wrong command response
+func WrongCommand(message, usage string) *HTTPResult {
+ return &HTTPResult{
+ Message: message,
+ Details: &UsageCmd{Usage: usage},
+ }
+}
+
+// HTTPResult Diagnose Server HTTP result operation
+type HTTPResult struct {
+ Message string `json:"message"`
+ Details StringInterface `json:"details"`
+}
+
+func (h *HTTPResult) String() string {
+ rsp := h.Message
+ if h.Details != nil {
+ rsp += "\n" + h.Details.String()
+ }
+ return rsp
+}
+
+// UsageCmd command with usage field
+type UsageCmd struct {
+ Usage string `json:"usage"`
+}
+
+func (u *UsageCmd) String() string {
+ return "Usage: " + u.Usage
+}
+
+// StringCmd command with info string
+type StringCmd struct {
+ Info string `json:"info"`
+}
+
+func (s *StringCmd) String() string {
+ return s.Info
+}
+
+// ErrorCmd command with error
+type ErrorCmd struct {
+ Error string `json:"error"`
+}
+
+func (e *ErrorCmd) String() string {
+ return "Error: " + e.Error
+}
+
+// TableObj network db table object
+type TableObj struct {
+ Length int `json:"size"`
+ Elements []StringInterface `json:"entries"`
+}
+
+func (t *TableObj) String() string {
+ output := fmt.Sprintf("total entries: %d\n", t.Length)
+ for _, e := range t.Elements {
+ output += e.String()
+ }
+ return output
+}
+
+// PeerEntryObj entry in the networkdb peer table
+type PeerEntryObj struct {
+ Index int `json:"-"`
+ Name string `json:"-=name"`
+ IP string `json:"ip"`
+}
+
+func (p *PeerEntryObj) String() string {
+ return fmt.Sprintf("%d) %s -> %s\n", p.Index, p.Name, p.IP)
+}
+
+// TableEntryObj network db table entry object
+type TableEntryObj struct {
+ Index int `json:"-"`
+ Key string `json:"key"`
+ Value string `json:"value"`
+ Owner string `json:"owner"`
+}
+
+func (t *TableEntryObj) String() string {
+ return fmt.Sprintf("%d) k:`%s` -> v:`%s` owner:`%s`\n", t.Index, t.Key, t.Value, t.Owner)
+}
+
+// TableEndpointsResult fully typed message for proper unmarshaling on the client side
+type TableEndpointsResult struct {
+ TableObj
+ Elements []TableEntryObj `json:"entries"`
+}
+
+// TablePeersResult fully typed message for proper unmarshaling on the client side
+type TablePeersResult struct {
+ TableObj
+ Elements []PeerEntryObj `json:"entries"`
+}
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
index 1742c8d..1fa8f0e 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -42,6 +42,14 @@
DefaultGatewayV6AuxKey = "DefaultGatewayIPv6"
)
+type defaultBridgeNetworkConflict struct {
+ ID string
+}
+
+func (d defaultBridgeNetworkConflict) Error() string {
+ return fmt.Sprintf("Stale default bridge network %s", d.ID)
+}
+
type iptableCleanFunc func() error
type iptablesCleanFuncs []iptableCleanFunc
@@ -137,6 +145,7 @@
networks map[string]*bridgeNetwork
store datastore.DataStore
nlh *netlink.Handle
+ configNetwork sync.Mutex
sync.Mutex
}
@@ -322,41 +331,6 @@
return nil
}
-// Checks whether this network's configuration for the network with this id conflicts with any of the passed networks
-func (c *networkConfiguration) conflictsWithNetworks(id string, others []*bridgeNetwork) error {
- for _, nw := range others {
-
- nw.Lock()
- nwID := nw.id
- nwConfig := nw.config
- nwBridge := nw.bridge
- nw.Unlock()
-
- if nwID == id {
- continue
- }
- // Verify the name (which may have been set by newInterface()) does not conflict with
- // existing bridge interfaces. Ironically the system chosen name gets stored in the config...
- // Basically we are checking if the two original configs were both empty.
- if nwConfig.BridgeName == c.BridgeName {
- return types.ForbiddenErrorf("conflicts with network %s (%s) by bridge name", nwID, nwConfig.BridgeName)
- }
- // If this network config specifies the AddressIPv4, we need
- // to make sure it does not conflict with any previously allocated
- // bridges. This could not be completely caught by the config conflict
- // check, because networks which config does not specify the AddressIPv4
- // get their address and subnet selected by the driver (see electBridgeIPv4())
- if c.AddressIPv4 != nil && nwBridge.bridgeIPv4 != nil {
- if nwBridge.bridgeIPv4.Contains(c.AddressIPv4.IP) ||
- c.AddressIPv4.Contains(nwBridge.bridgeIPv4.IP) {
- return types.ForbiddenErrorf("conflicts with network %s (%s) by ip network", nwID, nwConfig.BridgeName)
- }
- }
- }
-
- return nil
-}
-
func (d *driver) configure(option map[string]interface{}) error {
var (
config *configuration
@@ -602,11 +576,27 @@
return err
}
- err = config.processIPAM(id, ipV4Data, ipV6Data)
- if err != nil {
+ if err = config.processIPAM(id, ipV4Data, ipV6Data); err != nil {
return err
}
+ // start the critical section, from this point onward we are dealing with the list of networks
+ // so to be consistent we cannot allow that the list changes
+ d.configNetwork.Lock()
+ defer d.configNetwork.Unlock()
+
+ // check network conflicts
+ if err = d.checkConflict(config); err != nil {
+ nerr, ok := err.(defaultBridgeNetworkConflict)
+ if !ok {
+ return err
+ }
+ // Got a conflict with a stale default network, clean that up and continue
+ logrus.Warn(nerr)
+ d.deleteNetwork(nerr.ID)
+ }
+
+ // there is no conflict, now create the network
if err = d.createNetwork(config); err != nil {
return err
}
@@ -614,33 +604,47 @@
return d.storeUpdate(config)
}
+func (d *driver) checkConflict(config *networkConfiguration) error {
+ networkList := d.getNetworks()
+ for _, nw := range networkList {
+ nw.Lock()
+ nwConfig := nw.config
+ nw.Unlock()
+ if err := nwConfig.Conflicts(config); err != nil {
+ if config.DefaultBridge {
+ // We encountered and identified a stale default network
+ // We must delete it as libnetwork is the source of truth
+ // The default network being created must be the only one
+ // This can happen only from docker 1.12 on ward
+ logrus.Infof("Found stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName)
+ return defaultBridgeNetworkConflict{nwConfig.ID}
+ }
+
+ return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s",
+ config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error())
+ }
+ }
+ return nil
+}
+
func (d *driver) createNetwork(config *networkConfiguration) error {
var err error
defer osl.InitOSContext()()
networkList := d.getNetworks()
- for i, nw := range networkList {
- nw.Lock()
- nwConfig := nw.config
- nw.Unlock()
- if err := nwConfig.Conflicts(config); err != nil {
- if config.DefaultBridge {
- // We encountered and identified a stale default network
- // We must delete it as libnetwork is the source of thruth
- // The default network being created must be the only one
- // This can happen only from docker 1.12 on ward
- logrus.Infof("Removing stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName)
- if err := d.DeleteNetwork(nwConfig.ID); err != nil {
- logrus.Warnf("Failed to remove stale default network: %s (%s): %v. Will remove from store.", nwConfig.ID, nwConfig.BridgeName, err)
- d.storeDelete(nwConfig)
- }
- networkList = append(networkList[:i], networkList[i+1:]...)
- } else {
- return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s",
- config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error())
- }
- }
+
+ // Initialize handle when needed
+ d.Lock()
+ if d.nlh == nil {
+ d.nlh = ns.NlHandle()
+ }
+ d.Unlock()
+
+ // Create or retrieve the bridge L3 interface
+ bridgeIface, err := newInterface(d.nlh, config)
+ if err != nil {
+ return err
}
// Create and set network handler in driver
@@ -649,6 +653,7 @@
endpoints: make(map[string]*bridgeEndpoint),
config: config,
portMapper: portmapper.New(d.config.UserlandProxyPath),
+ bridge: bridgeIface,
driver: d,
}
@@ -665,35 +670,15 @@
}
}()
- // Initialize handle when needed
- d.Lock()
- if d.nlh == nil {
- d.nlh = ns.NlHandle()
- }
- d.Unlock()
-
- // Create or retrieve the bridge L3 interface
- bridgeIface, err := newInterface(d.nlh, config)
- if err != nil {
- return err
- }
- network.bridge = bridgeIface
-
- // Verify the network configuration does not conflict with previously installed
- // networks. This step is needed now because driver might have now set the bridge
- // name on this config struct. And because we need to check for possible address
- // conflicts, so we need to check against operationa lnetworks.
- if err = config.conflictsWithNetworks(config.ID, networkList); err != nil {
- return err
- }
-
+ // Add inter-network communication rules.
setupNetworkIsolationRules := func(config *networkConfiguration, i *bridgeInterface) error {
if err := network.isolateNetwork(networkList, true); err != nil {
- if err := network.isolateNetwork(networkList, false); err != nil {
+ if err = network.isolateNetwork(networkList, false); err != nil {
logrus.Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err)
}
return err
}
+ // register the cleanup function
network.registerIptCleanFunc(func() error {
nwList := d.getNetworks()
return network.isolateNetwork(nwList, false)
@@ -767,10 +752,17 @@
}
func (d *driver) DeleteNetwork(nid string) error {
+
+ d.configNetwork.Lock()
+ defer d.configNetwork.Unlock()
+
+ return d.deleteNetwork(nid)
+}
+
+func (d *driver) deleteNetwork(nid string) error {
var err error
defer osl.InitOSContext()()
-
// Get network handler and remove it from driver
d.Lock()
n, ok := d.networks[nid]
@@ -814,12 +806,6 @@
}
}()
- // Sanity check
- if n == nil {
- err = driverapi.ErrNoNetwork(nid)
- return err
- }
-
switch config.BridgeIfaceCreator {
case ifaceCreatedByLibnetwork, ifaceCreatorUnknown:
// We only delete the bridge if it was created by the bridge driver and
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go
index fc45a7e..50cbdb1 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go
@@ -9,7 +9,7 @@
d.Unlock()
// Sanity check.
- if driverConfig.EnableIPTables == false {
+ if !driverConfig.EnableIPTables {
return IPTableCfgError(config.BridgeName)
}
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
index 3fbfccf..1131417 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
@@ -696,6 +696,12 @@
var nlSock *nl.NetlinkSocket
sbox.InvokeFunc(func() {
nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH)
+ if err != nil {
+ return
+ }
+ // set the receive timeout to not remain stuck on the RecvFrom if the fd gets closed
+ tv := syscall.NsecToTimeval(soTimeout.Nanoseconds())
+ err = nlSock.SetReceiveTimeout(&tv)
})
n.setNetlinkSocket(nlSock)
@@ -721,6 +727,11 @@
// The netlink socket got closed, simply exit to not leak this goroutine
return
}
+ // When the receive timeout expires the receive will return EAGAIN
+ if err == syscall.EAGAIN {
+ // we continue here to avoid spam for timeouts
+ continue
+ }
logrus.Errorf("Failed to receive from netlink: %v ", err)
continue
}
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/labels.go b/vendor/github.com/docker/libnetwork/drivers/windows/labels.go
index 6cb077c..ead3ee6 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/labels.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/labels.go
@@ -39,4 +39,7 @@
// DisableDNS label
DisableDNS = "com.docker.network.windowsshim.disable_dns"
+
+ // DisableGatewayDNS label
+ DisableGatewayDNS = "com.docker.network.windowsshim.disable_gatewaydns"
)
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
index 4a03e72..964099c 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
@@ -44,6 +44,7 @@
NetworkAdapterName string
dbIndex uint64
dbExists bool
+ DisableGatewayDNS bool
}
// endpointConfiguration represents the user specified configuration for the sandbox endpoint
@@ -177,6 +178,12 @@
config.DNSSuffix = value
case DNSServers:
config.DNSServers = value
+ case DisableGatewayDNS:
+ b, err := strconv.ParseBool(value)
+ if err != nil {
+ return nil, err
+ }
+ config.DisableGatewayDNS = b
case MacPool:
config.MacPools = make([]hcsshim.MacPool, 0)
s := strings.Split(value, ",")
@@ -589,7 +596,14 @@
endpointStruct.DNSServerList = strings.Join(epOption.DNSServers, ",")
+ // overwrite the ep DisableDNS option if DisableGatewayDNS was set to true during the network creation option
+ if n.config.DisableGatewayDNS {
+ logrus.Debugf("n.config.DisableGatewayDNS[%v] overwrites epOption.DisableDNS[%v]", n.config.DisableGatewayDNS, epOption.DisableDNS)
+ epOption.DisableDNS = n.config.DisableGatewayDNS
+ }
+
if n.driver.name == "nat" && !epOption.DisableDNS {
+ logrus.Debugf("endpointStruct.EnableInternalDNS =[%v]", endpointStruct.EnableInternalDNS)
endpointStruct.EnableInternalDNS = true
}
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go
index caa93c6..9f8a7b1 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go
@@ -64,7 +64,7 @@
if err = d.createNetwork(ncfg); err != nil {
logrus.Warnf("could not create windows network for id %s hnsid %s while booting up from persistent state: %v", ncfg.ID, ncfg.HnsID, err)
}
- logrus.Debugf("Network (%s) restored", ncfg.ID[0:7])
+ logrus.Debugf("Network %v (%s) restored", d.name, ncfg.ID[0:7])
}
return nil
diff --git a/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go b/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go
index 9cf99d4..5c7b1f5 100644
--- a/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go
+++ b/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go
@@ -5,7 +5,6 @@
"github.com/docker/libnetwork/discoverapi"
"github.com/docker/libnetwork/ipamapi"
- "github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/types"
"github.com/sirupsen/logrus"
)
@@ -75,15 +74,11 @@
return nil, nil, err
}
- // TODO Windows: Remove this once the bug in docker daemon is fixed
- // that causes it to throw an exception on nil gateway
if prefAddress != nil {
return &net.IPNet{IP: prefAddress, Mask: ipNet.Mask}, nil, nil
- } else if opts[ipamapi.RequestAddressType] == netlabel.Gateway {
- return ipNet, nil, nil
- } else {
- return nil, nil, nil
}
+
+ return nil, nil, nil
}
// ReleaseAddress releases the address - always succeeds
diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
index ebcdd80..effbb71 100644
--- a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
+++ b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
@@ -5,12 +5,19 @@
import (
"net"
"syscall"
+ "time"
"fmt"
+
"github.com/vishvananda/netlink/nl"
"github.com/vishvananda/netns"
)
+const (
+ netlinkRecvSocketsTimeout = 3 * time.Second
+ netlinkSendSocketTimeout = 30 * time.Second
+)
+
// Service defines an IPVS service in its entirety.
type Service struct {
// Virtual service address.
@@ -82,6 +89,15 @@
if err != nil {
return nil, err
}
+ // Add operation timeout to avoid deadlocks
+ tv := syscall.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds())
+ if err := sock.SetSendTimeout(&tv); err != nil {
+ return nil, err
+ }
+ tv = syscall.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds())
+ if err := sock.SetReceiveTimeout(&tv); err != nil {
+ return nil, err
+ }
return &Handle{sock: sock}, nil
}
diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/docker/libnetwork/ipvs/netlink.go
index 2089283..c062a17 100644
--- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go
+++ b/vendor/github.com/docker/libnetwork/ipvs/netlink.go
@@ -203,10 +203,6 @@
}
func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) {
- var (
- err error
- )
-
if err := s.Send(req); err != nil {
return nil, err
}
@@ -222,6 +218,13 @@
for {
msgs, err := s.Receive()
if err != nil {
+ if s.GetFd() == -1 {
+ return nil, fmt.Errorf("Socket got closed on receive")
+ }
+ if err == syscall.EAGAIN {
+ // timeout fired
+ continue
+ }
return nil, err
}
for _, m := range msgs {
diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go
index 1ad4706..70c5584 100644
--- a/vendor/github.com/docker/libnetwork/network.go
+++ b/vendor/github.com/docker/libnetwork/network.go
@@ -199,39 +199,40 @@
}
type network struct {
- ctrlr *controller
- name string
- networkType string
- id string
- created time.Time
- scope string // network data scope
- labels map[string]string
- ipamType string
- ipamOptions map[string]string
- addrSpace string
- ipamV4Config []*IpamConf
- ipamV6Config []*IpamConf
- ipamV4Info []*IpamInfo
- ipamV6Info []*IpamInfo
- enableIPv6 bool
- postIPv6 bool
- epCnt *endpointCnt
- generic options.Generic
- dbIndex uint64
- dbExists bool
- persist bool
- stopWatchCh chan struct{}
- drvOnce *sync.Once
- resolverOnce sync.Once
- resolver []Resolver
- internal bool
- attachable bool
- inDelete bool
- ingress bool
- driverTables []networkDBTable
- dynamic bool
- configOnly bool
- configFrom string
+ ctrlr *controller
+ name string
+ networkType string
+ id string
+ created time.Time
+ scope string // network data scope
+ labels map[string]string
+ ipamType string
+ ipamOptions map[string]string
+ addrSpace string
+ ipamV4Config []*IpamConf
+ ipamV6Config []*IpamConf
+ ipamV4Info []*IpamInfo
+ ipamV6Info []*IpamInfo
+ enableIPv6 bool
+ postIPv6 bool
+ epCnt *endpointCnt
+ generic options.Generic
+ dbIndex uint64
+ dbExists bool
+ persist bool
+ stopWatchCh chan struct{}
+ drvOnce *sync.Once
+ resolverOnce sync.Once
+ resolver []Resolver
+ internal bool
+ attachable bool
+ inDelete bool
+ ingress bool
+ driverTables []networkDBTable
+ dynamic bool
+ configOnly bool
+ configFrom string
+ loadBalancerIP net.IP
sync.Mutex
}
@@ -473,6 +474,7 @@
dstN.ingress = n.ingress
dstN.configOnly = n.configOnly
dstN.configFrom = n.configFrom
+ dstN.loadBalancerIP = n.loadBalancerIP
// copy labels
if dstN.labels == nil {
@@ -589,6 +591,7 @@
netMap["ingress"] = n.ingress
netMap["configOnly"] = n.configOnly
netMap["configFrom"] = n.configFrom
+ netMap["loadBalancerIP"] = n.loadBalancerIP
return json.Marshal(netMap)
}
@@ -699,6 +702,9 @@
if v, ok := netMap["configFrom"]; ok {
n.configFrom = v.(string)
}
+ if v, ok := netMap["loadBalancerIP"]; ok {
+ n.loadBalancerIP = net.ParseIP(v.(string))
+ }
// Reconcile old networks with the recently added `--ipv6` flag
if !n.enableIPv6 {
n.enableIPv6 = len(n.ipamV6Info) > 0
@@ -799,6 +805,13 @@
}
}
+// NetworkOptionLBEndpoint function returns an option setter for the configuration of the load balancer endpoint for this network
+func NetworkOptionLBEndpoint(ip net.IP) NetworkOption {
+ return func(n *network) {
+ n.loadBalancerIP = ip
+ }
+}
+
// NetworkOptionDriverOpts function returns an option setter for any driver parameter described by a map
func NetworkOptionDriverOpts(opts map[string]string) NetworkOption {
return func(n *network) {
@@ -944,6 +957,18 @@
return &UnknownNetworkError{name: name, id: id}
}
+ if len(n.loadBalancerIP) != 0 {
+ endpoints := n.Endpoints()
+ if force || len(endpoints) == 1 {
+ n.deleteLoadBalancerSandbox()
+ }
+ //Reload the network from the store to update the epcnt.
+ n, err = c.getNetworkFromStore(id)
+ if err != nil {
+ return &UnknownNetworkError{name: name, id: id}
+ }
+ }
+
if !force && n.getEpCnt().EndpointCnt() != 0 {
if n.configOnly {
return types.ForbiddenErrorf("configuration network %q is in use", n.Name())
@@ -1071,12 +1096,19 @@
return nil, types.ForbiddenErrorf("endpoint with name %s already exists in network %s", name, n.Name())
}
- ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}}
- ep.id = stringid.GenerateRandomID()
-
n.ctrlr.networkLocker.Lock(n.id)
defer n.ctrlr.networkLocker.Unlock(n.id)
+ return n.createEndpoint(name, options...)
+
+}
+
+func (n *network) createEndpoint(name string, options ...EndpointOption) (Endpoint, error) {
+ var err error
+
+ ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}}
+ ep.id = stringid.GenerateRandomID()
+
// Initialize ep.network with a possibly stale copy of n. We need this to get network from
// store. But once we get it from store we will have the most uptodate copy possibly.
ep.network = n
@@ -1124,6 +1156,18 @@
ep.releaseAddress()
}
}()
+ // Moving updateToSTore before calling addEndpoint so that we shall clean up VETH interfaces in case
+ // DockerD get killed between addEndpoint and updateSTore call
+ if err = n.getController().updateToStore(ep); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ if e := n.getController().deleteFromStore(ep); e != nil {
+ logrus.Warnf("error rolling back endpoint %s from store: %v", name, e)
+ }
+ }
+ }()
if err = n.addEndpoint(ep); err != nil {
return nil, err
@@ -1140,17 +1184,6 @@
return nil, err
}
- if err = n.getController().updateToStore(ep); err != nil {
- return nil, err
- }
- defer func() {
- if err != nil {
- if e := n.getController().deleteFromStore(ep); e != nil {
- logrus.Warnf("error rolling back endpoint %s from store: %v", name, e)
- }
- }
- }()
-
// Watch for service records
n.getController().watchSvcRecord(ep)
defer func() {
@@ -2021,3 +2054,80 @@
return n.(*network), nil
}
+
+func (n *network) createLoadBalancerSandbox() error {
+ sandboxName := n.name + "-sbox"
+ sbOptions := []SandboxOption{}
+ if n.ingress {
+ sbOptions = append(sbOptions, OptionIngress())
+ }
+ sb, err := n.ctrlr.NewSandbox(sandboxName, sbOptions...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if e := n.ctrlr.SandboxDestroy(sandboxName); e != nil {
+ logrus.Warnf("could not delete sandbox %s on failure on failure (%v): %v", sandboxName, err, e)
+ }
+ }
+ }()
+
+ endpointName := n.name + "-endpoint"
+ epOptions := []EndpointOption{
+ CreateOptionIpam(n.loadBalancerIP, nil, nil, nil),
+ CreateOptionLoadBalancer(),
+ }
+ ep, err := n.createEndpoint(endpointName, epOptions...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if e := ep.Delete(true); e != nil {
+ logrus.Warnf("could not delete endpoint %s on failure on failure (%v): %v", endpointName, err, e)
+ }
+ }
+ }()
+
+ if err := ep.Join(sb, nil); err != nil {
+ return err
+ }
+ return sb.EnableService()
+}
+
+func (n *network) deleteLoadBalancerSandbox() {
+ n.Lock()
+ c := n.ctrlr
+ name := n.name
+ n.Unlock()
+
+ endpointName := name + "-endpoint"
+ sandboxName := name + "-sbox"
+
+ endpoint, err := n.EndpointByName(endpointName)
+ if err != nil {
+ logrus.Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err)
+ } else {
+
+ info := endpoint.Info()
+ if info != nil {
+ sb := info.Sandbox()
+ if sb != nil {
+ if err := sb.DisableService(); err != nil {
+ logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
+ //Ignore error and attempt to delete the load balancer endpoint
+ }
+ }
+ }
+
+ if err := endpoint.Delete(true); err != nil {
+ logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoint.Name(), endpoint.ID(), sandboxName, err)
+ //Ignore error and attempt to delete the sandbox.
+ }
+ }
+
+ if err := c.SandboxDestroy(sandboxName); err != nil {
+ logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
+ }
+}
diff --git a/vendor/github.com/docker/libnetwork/network_windows.go b/vendor/github.com/docker/libnetwork/network_windows.go
index 388b811..e7819e1 100644
--- a/vendor/github.com/docker/libnetwork/network_windows.go
+++ b/vendor/github.com/docker/libnetwork/network_windows.go
@@ -28,6 +28,9 @@
}
func (n *network) startResolver() {
+ if n.networkType == "ics" {
+ return
+ }
n.resolverOnce.Do(func() {
logrus.Debugf("Launching DNS server for network %q", n.Name())
options := n.Info().DriverOptions()
diff --git a/vendor/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
index 21c3bc0..072c622 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
@@ -16,46 +16,28 @@
return []byte{}
}
-// getNode searches the node inside the tables
-// returns true if the node was respectively in the active list, explicit node leave list or failed list
-func (nDB *NetworkDB) getNode(nEvent *NodeEvent, extract bool) (bool, bool, bool, *node) {
- var active bool
- var left bool
- var failed bool
-
- for _, nodes := range []map[string]*node{
- nDB.failedNodes,
- nDB.leftNodes,
- nDB.nodes,
- } {
- if n, ok := nodes[nEvent.NodeName]; ok {
- active = &nodes == &nDB.nodes
- left = &nodes == &nDB.leftNodes
- failed = &nodes == &nDB.failedNodes
- if n.ltime >= nEvent.LTime {
- return active, left, failed, nil
- }
- if extract {
- delete(nodes, n.Name)
- }
- return active, left, failed, n
- }
- }
- return active, left, failed, nil
-}
-
func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
// Update our local clock if the received messages has newer
// time.
nDB.networkClock.Witness(nEvent.LTime)
nDB.RLock()
- active, left, _, n := nDB.getNode(nEvent, false)
+ defer nDB.RUnlock()
+
+ // check if the node exists
+ n, _, _ := nDB.findNode(nEvent.NodeName)
if n == nil {
- nDB.RUnlock()
return false
}
- nDB.RUnlock()
+
+ // check if the event is fresh
+ if n.ltime >= nEvent.LTime {
+ return false
+ }
+
+ // If we are here means that the event is fresher and the node is known. Update the laport time
+ n.ltime = nEvent.LTime
+
// If it is a node leave event for a manager and this is the only manager we
// know of we want the reconnect logic to kick in. In a single manager
// cluster manager's gossip can't be bootstrapped unless some other node
@@ -63,45 +45,32 @@
if len(nDB.bootStrapIP) == 1 && nEvent.Type == NodeEventTypeLeave {
for _, ip := range nDB.bootStrapIP {
if ip.Equal(n.Addr) {
- n.ltime = nEvent.LTime
return true
}
}
}
- n.ltime = nEvent.LTime
-
switch nEvent.Type {
case NodeEventTypeJoin:
- if active {
- // the node is already marked as active nothing to do
+ moved, err := nDB.changeNodeState(n.Name, nodeActiveState)
+ if err != nil {
+ logrus.WithError(err).Error("unable to find the node to move")
return false
}
- nDB.Lock()
- // Because the lock got released on the previous check we have to do it again and re verify the status of the node
- // All of this is to avoid a big lock on the function
- if active, _, _, n = nDB.getNode(nEvent, true); !active && n != nil {
- n.reapTime = 0
- nDB.nodes[n.Name] = n
+ if moved {
logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
}
- nDB.Unlock()
- return true
+ return moved
case NodeEventTypeLeave:
- if left {
- // the node is already marked as left nothing to do.
+ moved, err := nDB.changeNodeState(n.Name, nodeLeftState)
+ if err != nil {
+ logrus.WithError(err).Error("unable to find the node to move")
return false
}
- nDB.Lock()
- // Because the lock got released on the previous check we have to do it again and re verify the status of the node
- // All of this is to avoid a big lock on the function
- if _, left, _, n = nDB.getNode(nEvent, true); !left && n != nil {
- n.reapTime = nodeReapInterval
- nDB.leftNodes[n.Name] = n
+ if moved {
logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
}
- nDB.Unlock()
- return true
+ return moved
}
return false
diff --git a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
index 6075718..89aa7c4 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
@@ -21,24 +21,6 @@
}
}
-func (e *eventDelegate) purgeReincarnation(mn *memberlist.Node) {
- for name, node := range e.nDB.failedNodes {
- if node.Addr.Equal(mn.Addr) {
- logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
- delete(e.nDB.failedNodes, name)
- return
- }
- }
-
- for name, node := range e.nDB.leftNodes {
- if node.Addr.Equal(mn.Addr) {
- logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
- delete(e.nDB.leftNodes, name)
- return
- }
- }
-}
-
func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr)
e.broadcastNodeEvent(mn.Addr, opCreate)
@@ -57,44 +39,35 @@
// Every node has a unique ID
// Check on the base of the IP address if the new node that joined is actually a new incarnation of a previous
// failed or shutdown one
- e.purgeReincarnation(mn)
+ e.nDB.purgeReincarnation(mn)
e.nDB.nodes[mn.Name] = &node{Node: *mn}
logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr)
}
func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) {
- var failed bool
logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr)
e.broadcastNodeEvent(mn.Addr, opDelete)
- // The node left or failed, delete all the entries created by it.
- // If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
- // If the node instead left because was going down, then it makes sense to just delete all its state
+
e.nDB.Lock()
defer e.nDB.Unlock()
- e.nDB.deleteNetworkEntriesForNode(mn.Name)
- e.nDB.deleteNodeTableEntries(mn.Name)
- if n, ok := e.nDB.nodes[mn.Name]; ok {
- delete(e.nDB.nodes, mn.Name)
- // Check if a new incarnation of the same node already joined
- // In that case this node can simply be removed and no further action are needed
- for name, node := range e.nDB.nodes {
- if node.Addr.Equal(mn.Addr) {
- logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", name, node.Addr, mn.Name, mn.Addr)
- return
- }
- }
-
- // In case of node failure, keep retrying to reconnect every retryInterval (1sec) for nodeReapInterval (24h)
- // Explicit leave will have already removed the node from the list of nodes (nDB.nodes) and put it into the leftNodes map
- n.reapTime = nodeReapInterval
- e.nDB.failedNodes[mn.Name] = n
- failed = true
+ n, currState, _ := e.nDB.findNode(mn.Name)
+ if n == nil {
+ logrus.Errorf("Node %s/%s not found in the node lists", mn.Name, mn.Addr)
+ return
}
-
- if failed {
- logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
+ // if the node was active means that did not send the leave cluster message, so it's probable that
+ // failed. Else would be already in the left list so nothing else has to be done
+ if currState == nodeActiveState {
+ moved, err := e.nDB.changeNodeState(mn.Name, nodeFailedState)
+ if err != nil {
+ logrus.WithError(err).Errorf("impossible condition, node %s/%s not present in the list", mn.Name, mn.Addr)
+ return
+ }
+ if moved {
+ logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
+ }
}
}
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
index 025e0ca..9ec6bec 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
@@ -401,17 +401,23 @@
return nil
}
+// TableElem elem
+type TableElem struct {
+ Value []byte
+ owner string
+}
+
// GetTableByNetwork walks the networkdb by the give table and network id and
// returns a map of keys and values
-func (nDB *NetworkDB) GetTableByNetwork(tname, nid string) map[string]interface{} {
- entries := make(map[string]interface{})
+func (nDB *NetworkDB) GetTableByNetwork(tname, nid string) map[string]*TableElem {
+ entries := make(map[string]*TableElem)
nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s/%s", tname, nid), func(k string, v interface{}) bool {
entry := v.(*entry)
if entry.deleting {
return false
}
key := k[strings.LastIndex(k, "/")+1:]
- entries[key] = entry.value
+ entries[key] = &TableElem{Value: entry.value, owner: entry.node}
return false
})
return entries
@@ -445,7 +451,7 @@
return nil
}
-func (nDB *NetworkDB) deleteNetworkEntriesForNode(deletedNode string) {
+func (nDB *NetworkDB) deleteNodeFromNetworks(deletedNode string) {
for nid, nodes := range nDB.networkNodes {
updatedNodes := make([]string, 0, len(nodes))
for _, node := range nodes {
@@ -547,7 +553,9 @@
nDB.deleteEntry(nid, tname, key)
- nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
+ if !oldEntry.deleting {
+ nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
+ }
return false
})
}
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
index 01429a5..a4443bc 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
@@ -1,17 +1,19 @@
package networkdb
import (
+ "encoding/base64"
"fmt"
"net/http"
"strings"
- stackdump "github.com/docker/docker/pkg/signal"
+ "github.com/docker/libnetwork/common"
"github.com/docker/libnetwork/diagnose"
"github.com/sirupsen/logrus"
)
const (
missingParameter = "missing parameter"
+ dbNotAvailable = "database not available"
)
// NetDbPaths2Func TODO
@@ -26,14 +28,21 @@
"/deleteentry": dbDeleteEntry,
"/getentry": dbGetEntry,
"/gettable": dbGetTable,
- "/dump": dbStackTrace,
}
func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ _, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("join cluster")
+
if len(r.Form["members"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?members=ip1,ip2,...", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?members=ip1,ip2,...", r.URL.Path))
+ log.Error("join cluster failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -41,51 +50,88 @@
if ok {
err := nDB.Join(strings.Split(r.Form["members"][0], ","))
if err != nil {
- fmt.Fprintf(w, "%s error in the DB join %s\n", r.URL.Path, err)
+ rsp := diagnose.FailCommand(fmt.Errorf("%s error in the DB join %s", r.URL.Path, err))
+ log.WithError(err).Error("join cluster failed")
+ diagnose.HTTPReply(w, rsp, json)
return
}
- fmt.Fprintf(w, "OK\n")
+ log.Info("join cluster done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ _, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("network peers")
+
if len(r.Form["nid"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path))
+ log.Error("network peers failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
nDB, ok := ctx.(*NetworkDB)
if ok {
peers := nDB.Peers(r.Form["nid"][0])
- fmt.Fprintf(w, "Network:%s Total peers: %d\n", r.Form["nid"], len(peers))
+ rsp := &diagnose.TableObj{Length: len(peers)}
for i, peerInfo := range peers {
- fmt.Fprintf(w, "%d) %s -> %s\n", i, peerInfo.Name, peerInfo.IP)
+ rsp.Elements = append(rsp.Elements, &diagnose.PeerEntryObj{Index: i, Name: peerInfo.Name, IP: peerInfo.IP})
}
+ log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("network peers done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) {
+ r.ParseForm()
+ diagnose.DebugHTTPForm(r)
+ _, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("cluster peers")
+
nDB, ok := ctx.(*NetworkDB)
if ok {
peers := nDB.ClusterPeers()
- fmt.Fprintf(w, "Total peers: %d\n", len(peers))
+ rsp := &diagnose.TableObj{Length: len(peers)}
for i, peerInfo := range peers {
- fmt.Fprintf(w, "%d) %s -> %s\n", i, peerInfo.Name, peerInfo.IP)
+ rsp.Elements = append(rsp.Elements, &diagnose.PeerEntryObj{Index: i, Name: peerInfo.Name, IP: peerInfo.IP})
}
+ log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("cluster peers done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("create entry")
+
if len(r.Form["tname"]) < 1 ||
len(r.Form["nid"]) < 1 ||
len(r.Form["key"]) < 1 ||
len(r.Form["value"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+ log.Error("create entry failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -93,25 +139,48 @@
nid := r.Form["nid"][0]
key := r.Form["key"][0]
value := r.Form["value"][0]
+ decodedValue := []byte(value)
+ if !unsafe {
+ var err error
+ decodedValue, err = base64.StdEncoding.DecodeString(value)
+ if err != nil {
+ log.WithError(err).Error("create entry failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
+ return
+ }
+ }
nDB, ok := ctx.(*NetworkDB)
if ok {
- if err := nDB.CreateEntry(tname, nid, key, []byte(value)); err != nil {
- diagnose.HTTPReplyError(w, err.Error(), "")
+ if err := nDB.CreateEntry(tname, nid, key, decodedValue); err != nil {
+ rsp := diagnose.FailCommand(err)
+ diagnose.HTTPReply(w, rsp, json)
+ log.WithError(err).Error("create entry failed")
return
}
- fmt.Fprintf(w, "OK\n")
+ log.Info("create entry done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("update entry")
+
if len(r.Form["tname"]) < 1 ||
len(r.Form["nid"]) < 1 ||
len(r.Form["key"]) < 1 ||
len(r.Form["value"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path))
+ log.Error("update entry failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -119,24 +188,46 @@
nid := r.Form["nid"][0]
key := r.Form["key"][0]
value := r.Form["value"][0]
+ decodedValue := []byte(value)
+ if !unsafe {
+ var err error
+ decodedValue, err = base64.StdEncoding.DecodeString(value)
+ if err != nil {
+ log.WithError(err).Error("update entry failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
+ return
+ }
+ }
nDB, ok := ctx.(*NetworkDB)
if ok {
- if err := nDB.UpdateEntry(tname, nid, key, []byte(value)); err != nil {
- diagnose.HTTPReplyError(w, err.Error(), "")
+ if err := nDB.UpdateEntry(tname, nid, key, decodedValue); err != nil {
+ log.WithError(err).Error("update entry failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
return
}
- fmt.Fprintf(w, "OK\n")
+ log.Info("update entry done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ _, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("delete entry")
+
if len(r.Form["tname"]) < 1 ||
len(r.Form["nid"]) < 1 ||
len(r.Form["key"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+ log.Error("delete entry failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -148,20 +239,32 @@
if ok {
err := nDB.DeleteEntry(tname, nid, key)
if err != nil {
- diagnose.HTTPReplyError(w, err.Error(), "")
+ log.WithError(err).Error("delete entry failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
return
}
- fmt.Fprintf(w, "OK\n")
+ log.Info("delete entry done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("get entry")
+
if len(r.Form["tname"]) < 1 ||
len(r.Form["nid"]) < 1 ||
len(r.Form["key"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path))
+ log.Error("get entry failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -173,18 +276,39 @@
if ok {
value, err := nDB.GetEntry(tname, nid, key)
if err != nil {
- diagnose.HTTPReplyError(w, err.Error(), "")
+ log.WithError(err).Error("get entry failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
return
}
- fmt.Fprintf(w, "key:`%s` value:`%s`\n", key, string(value))
+
+ var encodedValue string
+ if unsafe {
+ encodedValue = string(value)
+ } else {
+ encodedValue = base64.StdEncoding.EncodeToString(value)
+ }
+
+ rsp := &diagnose.TableEntryObj{Key: key, Value: encodedValue}
+ log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("update entry done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ _, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("join network")
+
if len(r.Form["nid"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+ log.Error("join network failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -193,18 +317,30 @@
nDB, ok := ctx.(*NetworkDB)
if ok {
if err := nDB.JoinNetwork(nid); err != nil {
- diagnose.HTTPReplyError(w, err.Error(), "")
+ log.WithError(err).Error("join network failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
return
}
- fmt.Fprintf(w, "OK\n")
+ log.Info("join network done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ _, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("leave network")
+
if len(r.Form["nid"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path))
+ log.Error("leave network failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -213,19 +349,31 @@
nDB, ok := ctx.(*NetworkDB)
if ok {
if err := nDB.LeaveNetwork(nid); err != nil {
- diagnose.HTTPReplyError(w, err.Error(), "")
+ log.WithError(err).Error("leave network failed")
+ diagnose.HTTPReply(w, diagnose.FailCommand(err), json)
return
}
- fmt.Fprintf(w, "OK\n")
+ log.Info("leave network done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(nil), json)
+ return
}
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) {
r.ParseForm()
diagnose.DebugHTTPForm(r)
+ unsafe, json := diagnose.ParseHTTPFormOptions(r)
+
+ // audit logs
+ log := logrus.WithFields(logrus.Fields{"component": "diagnose", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()})
+ log.Info("get table")
+
if len(r.Form["tname"]) < 1 ||
len(r.Form["nid"]) < 1 {
- diagnose.HTTPReplyError(w, missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id", r.URL.Path))
+ rsp := diagnose.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id", r.URL.Path))
+ log.Error("get table failed, wrong input")
+ diagnose.HTTPReply(w, rsp, json)
return
}
@@ -235,20 +383,27 @@
nDB, ok := ctx.(*NetworkDB)
if ok {
table := nDB.GetTableByNetwork(tname, nid)
- fmt.Fprintf(w, "total elements: %d\n", len(table))
- i := 0
+ rsp := &diagnose.TableObj{Length: len(table)}
+ var i = 0
for k, v := range table {
- fmt.Fprintf(w, "%d) k:`%s` -> v:`%s`\n", i, k, string(v.([]byte)))
+ var encodedValue string
+ if unsafe {
+ encodedValue = string(v.Value)
+ } else {
+ encodedValue = base64.StdEncoding.EncodeToString(v.Value)
+ }
+ rsp.Elements = append(rsp.Elements,
+ &diagnose.TableEntryObj{
+ Index: i,
+ Key: k,
+ Value: encodedValue,
+ Owner: v.owner,
+ })
i++
}
+ log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("get table done")
+ diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
+ return
}
-}
-
-func dbStackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) {
- path, err := stackdump.DumpStacks("/tmp/")
- if err != nil {
- logrus.WithError(err).Error("failed to write goroutines dump")
- } else {
- fmt.Fprintf(w, "goroutine stacks written to %s", path)
- }
+ diagnose.HTTPReply(w, diagnose.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json)
}
diff --git a/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go b/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go
new file mode 100644
index 0000000..f5a7498
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go
@@ -0,0 +1,120 @@
+package networkdb
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/memberlist"
+ "github.com/sirupsen/logrus"
+)
+
+type nodeState int
+
+const (
+ nodeNotFound nodeState = -1
+ nodeActiveState nodeState = 0
+ nodeLeftState nodeState = 1
+ nodeFailedState nodeState = 2
+)
+
+var nodeStateName = map[nodeState]string{
+ -1: "NodeNotFound",
+ 0: "NodeActive",
+ 1: "NodeLeft",
+ 2: "NodeFailed",
+}
+
+// findNode search the node into the 3 node lists and returns the node pointer and the list
+// where it got found
+func (nDB *NetworkDB) findNode(nodeName string) (*node, nodeState, map[string]*node) {
+ for i, nodes := range []map[string]*node{
+ nDB.nodes,
+ nDB.leftNodes,
+ nDB.failedNodes,
+ } {
+ if n, ok := nodes[nodeName]; ok {
+ return n, nodeState(i), nodes
+ }
+ }
+ return nil, nodeNotFound, nil
+}
+
+// changeNodeState changes the state of the node specified, returns true if the node was moved,
+// false if there was no need to change the node state. Error will be returned if the node does not
+// exists
+func (nDB *NetworkDB) changeNodeState(nodeName string, newState nodeState) (bool, error) {
+ n, currState, m := nDB.findNode(nodeName)
+ if n == nil {
+ return false, fmt.Errorf("node %s not found", nodeName)
+ }
+
+ switch newState {
+ case nodeActiveState:
+ if currState == nodeActiveState {
+ return false, nil
+ }
+
+ delete(m, nodeName)
+ // reset the node reap time
+ n.reapTime = 0
+ nDB.nodes[nodeName] = n
+ case nodeLeftState:
+ if currState == nodeLeftState {
+ return false, nil
+ }
+
+ delete(m, nodeName)
+ nDB.leftNodes[nodeName] = n
+ case nodeFailedState:
+ if currState == nodeFailedState {
+ return false, nil
+ }
+
+ delete(m, nodeName)
+ nDB.failedNodes[nodeName] = n
+ }
+
+ logrus.Infof("Node %s change state %s --> %s", nodeName, nodeStateName[currState], nodeStateName[newState])
+
+ if newState == nodeLeftState || newState == nodeFailedState {
+ // set the node reap time, if not already set
+ // It is possible that a node passes from failed to left and the reaptime was already set so keep that value
+ if n.reapTime == 0 {
+ n.reapTime = nodeReapInterval
+ }
+ // The node leave or fails, delete all the entries created by it.
+ // If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
+ // If the node instead left because was going down, then it makes sense to just delete all its state
+ nDB.deleteNodeFromNetworks(n.Name)
+ nDB.deleteNodeTableEntries(n.Name)
+ }
+
+ return true, nil
+}
+
+func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool {
+ for name, node := range nDB.nodes {
+ if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
+ logrus.Infof("Node %s/%s, is the new incarnation of the active node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+ nDB.changeNodeState(name, nodeLeftState)
+ return true
+ }
+ }
+
+ for name, node := range nDB.failedNodes {
+ if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
+ logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+ nDB.changeNodeState(name, nodeLeftState)
+ return true
+ }
+ }
+
+ for name, node := range nDB.leftNodes {
+ if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
+ logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+ nDB.changeNodeState(name, nodeLeftState)
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/docker/libnetwork/store.go b/vendor/github.com/docker/libnetwork/store.go
index da7ac1d..95943f6 100644
--- a/vendor/github.com/docker/libnetwork/store.go
+++ b/vendor/github.com/docker/libnetwork/store.go
@@ -256,7 +256,7 @@
if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {
return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err)
}
- logrus.Errorf("Error (%v) deleting object %v, retrying....", err, kvObject.Key())
+ logrus.Warnf("Error (%v) deleting object %v, retrying....", err, kvObject.Key())
goto retry
}
return err
diff --git a/vendor/github.com/docker/libnetwork/vendor.conf b/vendor/github.com/docker/libnetwork/vendor.conf
index f00d8ce..bdcb5a1 100644
--- a/vendor/github.com/docker/libnetwork/vendor.conf
+++ b/vendor/github.com/docker/libnetwork/vendor.conf
@@ -45,7 +45,7 @@
github.com/stretchr/testify dab07ac62d4905d3e48d17dc549c684ac3b7c15a
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
-github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969
+github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
diff --git a/vendor/github.com/docker/swarmkit/agent/exec/controller.go b/vendor/github.com/docker/swarmkit/agent/exec/controller.go
index 9b4fc7b..c9e9343 100644
--- a/vendor/github.com/docker/swarmkit/agent/exec/controller.go
+++ b/vendor/github.com/docker/swarmkit/agent/exec/controller.go
@@ -288,7 +288,9 @@
status.PortStatus = portStatus
}()
- if task.DesiredState == api.TaskStateShutdown {
+ // this branch bounds the largest state achievable in the agent as SHUTDOWN, which
+ // is exactly the correct behavior for the agent.
+ if task.DesiredState >= api.TaskStateShutdown {
if status.State >= api.TaskStateCompleted {
return noop()
}
diff --git a/vendor/github.com/docker/swarmkit/api/ca.pb.go b/vendor/github.com/docker/swarmkit/api/ca.pb.go
index caaa06c..7d28912 100644
--- a/vendor/github.com/docker/swarmkit/api/ca.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/ca.pb.go
@@ -131,6 +131,8 @@
LeaveResponse
ProcessRaftMessageRequest
ProcessRaftMessageResponse
+ StreamRaftMessageRequest
+ StreamRaftMessageResponse
ResolveAddressRequest
ResolveAddressResponse
InternalRaftRequest
@@ -235,6 +237,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -984,12 +987,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
@@ -1126,12 +1129,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/api/control.pb.go b/vendor/github.com/docker/swarmkit/api/control.pb.go
index 13ef482..fb28c5d 100644
--- a/vendor/github.com/docker/swarmkit/api/control.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/control.pb.go
@@ -19,6 +19,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -5859,12 +5860,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go b/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go
index 8e9b038..120df88 100644
--- a/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go
@@ -24,6 +24,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -1602,12 +1603,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/api/health.pb.go b/vendor/github.com/docker/swarmkit/api/health.pb.go
index 757db6a..ed7df73 100644
--- a/vendor/github.com/docker/swarmkit/api/health.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/health.pb.go
@@ -17,6 +17,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -286,12 +287,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/api/logbroker.pb.go b/vendor/github.com/docker/swarmkit/api/logbroker.pb.go
index 58515aa..1108088 100644
--- a/vendor/github.com/docker/swarmkit/api/logbroker.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/logbroker.pb.go
@@ -20,6 +20,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -1273,12 +1274,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
@@ -1396,12 +1397,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/api/raft.pb.go b/vendor/github.com/docker/swarmkit/api/raft.pb.go
index 4710ee6..ddee3e1 100644
--- a/vendor/github.com/docker/swarmkit/api/raft.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/raft.pb.go
@@ -21,6 +21,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -133,6 +134,23 @@
func (*ProcessRaftMessageResponse) ProtoMessage() {}
func (*ProcessRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} }
+// Raft message streaming request.
+type StreamRaftMessageRequest struct {
+ Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
+}
+
+func (m *StreamRaftMessageRequest) Reset() { *m = StreamRaftMessageRequest{} }
+func (*StreamRaftMessageRequest) ProtoMessage() {}
+func (*StreamRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} }
+
+// Raft message streaming response.
+type StreamRaftMessageResponse struct {
+}
+
+func (m *StreamRaftMessageResponse) Reset() { *m = StreamRaftMessageResponse{} }
+func (*StreamRaftMessageResponse) ProtoMessage() {}
+func (*StreamRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} }
+
type ResolveAddressRequest struct {
// raft_id is the ID to resolve to an address.
RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"`
@@ -140,7 +158,7 @@
func (m *ResolveAddressRequest) Reset() { *m = ResolveAddressRequest{} }
func (*ResolveAddressRequest) ProtoMessage() {}
-func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} }
+func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} }
type ResolveAddressResponse struct {
// Addr specifies the address of the member
@@ -149,7 +167,7 @@
func (m *ResolveAddressResponse) Reset() { *m = ResolveAddressResponse{} }
func (*ResolveAddressResponse) ProtoMessage() {}
-func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} }
+func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{10} }
// Contains one of many protobuf encoded objects to replicate
// over the raft backend with a request ID to track when the
@@ -161,7 +179,7 @@
func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
func (*InternalRaftRequest) ProtoMessage() {}
-func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} }
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{11} }
// StoreAction defines a target and operation to apply on the storage system.
type StoreAction struct {
@@ -181,7 +199,7 @@
func (m *StoreAction) Reset() { *m = StoreAction{} }
func (*StoreAction) ProtoMessage() {}
-func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{10} }
+func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{12} }
type isStoreAction_Target interface {
isStoreAction_Target()
@@ -512,6 +530,8 @@
proto.RegisterType((*LeaveResponse)(nil), "docker.swarmkit.v1.LeaveResponse")
proto.RegisterType((*ProcessRaftMessageRequest)(nil), "docker.swarmkit.v1.ProcessRaftMessageRequest")
proto.RegisterType((*ProcessRaftMessageResponse)(nil), "docker.swarmkit.v1.ProcessRaftMessageResponse")
+ proto.RegisterType((*StreamRaftMessageRequest)(nil), "docker.swarmkit.v1.StreamRaftMessageRequest")
+ proto.RegisterType((*StreamRaftMessageResponse)(nil), "docker.swarmkit.v1.StreamRaftMessageResponse")
proto.RegisterType((*ResolveAddressRequest)(nil), "docker.swarmkit.v1.ResolveAddressRequest")
proto.RegisterType((*ResolveAddressResponse)(nil), "docker.swarmkit.v1.ResolveAddressResponse")
proto.RegisterType((*InternalRaftRequest)(nil), "docker.swarmkit.v1.InternalRaftRequest")
@@ -539,6 +559,14 @@
return p.local.ProcessRaftMessage(ctx, r)
}
+func (p *authenticatedWrapperRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error {
+
+ if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil {
+ return err
+ }
+ return p.local.StreamRaftMessage(stream)
+}
+
func (p *authenticatedWrapperRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) {
if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
@@ -673,6 +701,16 @@
}
func (m *ProcessRaftMessageResponse) CopyFrom(src interface{}) {}
+func (m *StreamRaftMessageResponse) Copy() *StreamRaftMessageResponse {
+ if m == nil {
+ return nil
+ }
+ o := &StreamRaftMessageResponse{}
+ o.CopyFrom(m)
+ return o
+}
+
+func (m *StreamRaftMessageResponse) CopyFrom(src interface{}) {}
func (m *ResolveAddressRequest) Copy() *ResolveAddressRequest {
if m == nil {
return nil
@@ -813,6 +851,12 @@
// ProcessRaftMessage sends a raft message to be processed on a raft member, it is
// called from the RaftMember willing to send a message to its destination ('To' field)
ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error)
+ // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest
+ // to be processed on a raft member, returning a StreamRaftMessageResponse
+ // when processing of the streamed messages is complete. A single stream corresponds
+ // to a single raft message, which may be disassembled and streamed as individual messages.
+ // It is called from the Raft leader, which uses it to stream messages to a raft member.
+ StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error)
// ResolveAddress returns the address where the node with the given ID can be reached.
ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error)
}
@@ -834,6 +878,40 @@
return out, nil
}
+func (c *raftClient) StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Raft_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Raft/StreamRaftMessage", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &raftStreamRaftMessageClient{stream}
+ return x, nil
+}
+
+type Raft_StreamRaftMessageClient interface {
+ Send(*StreamRaftMessageRequest) error
+ CloseAndRecv() (*StreamRaftMessageResponse, error)
+ grpc.ClientStream
+}
+
+type raftStreamRaftMessageClient struct {
+ grpc.ClientStream
+}
+
+func (x *raftStreamRaftMessageClient) Send(m *StreamRaftMessageRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *raftStreamRaftMessageClient) CloseAndRecv() (*StreamRaftMessageResponse, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(StreamRaftMessageResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func (c *raftClient) ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) {
out := new(ResolveAddressResponse)
err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ResolveAddress", in, out, c.cc, opts...)
@@ -849,6 +927,12 @@
// ProcessRaftMessage sends a raft message to be processed on a raft member, it is
// called from the RaftMember willing to send a message to its destination ('To' field)
ProcessRaftMessage(context.Context, *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error)
+ // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest
+ // to be processed on a raft member, returning a StreamRaftMessageResponse
+ // when processing of the streamed messages is complete. A single stream corresponds
+ // to a single raft message, which may be disassembled and streamed as individual messages.
+ // It is called from the Raft leader, which uses it to stream messages to a raft member.
+ StreamRaftMessage(Raft_StreamRaftMessageServer) error
// ResolveAddress returns the address where the node with the given ID can be reached.
ResolveAddress(context.Context, *ResolveAddressRequest) (*ResolveAddressResponse, error)
}
@@ -875,6 +959,32 @@
return interceptor(ctx, in, info, handler)
}
+func _Raft_StreamRaftMessage_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(RaftServer).StreamRaftMessage(&raftStreamRaftMessageServer{stream})
+}
+
+type Raft_StreamRaftMessageServer interface {
+ SendAndClose(*StreamRaftMessageResponse) error
+ Recv() (*StreamRaftMessageRequest, error)
+ grpc.ServerStream
+}
+
+type raftStreamRaftMessageServer struct {
+ grpc.ServerStream
+}
+
+func (x *raftStreamRaftMessageServer) SendAndClose(m *StreamRaftMessageResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *raftStreamRaftMessageServer) Recv() (*StreamRaftMessageRequest, error) {
+ m := new(StreamRaftMessageRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func _Raft_ResolveAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ResolveAddressRequest)
if err := dec(in); err != nil {
@@ -906,7 +1016,13 @@
Handler: _Raft_ResolveAddress_Handler,
},
},
- Streams: []grpc.StreamDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamRaftMessage",
+ Handler: _Raft_StreamRaftMessage_Handler,
+ ClientStreams: true,
+ },
+ },
Metadata: "github.com/docker/swarmkit/api/raft.proto",
}
@@ -1212,6 +1328,52 @@
return i, nil
}
+func (m *StreamRaftMessageRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StreamRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Message != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size()))
+ n4, err := m.Message.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ return i, nil
+}
+
+func (m *StreamRaftMessageResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StreamRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ return i, nil
+}
+
func (m *ResolveAddressRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -1315,11 +1477,11 @@
i = encodeVarintRaft(dAtA, i, uint64(m.Action))
}
if m.Target != nil {
- nn4, err := m.Target.MarshalTo(dAtA[i:])
+ nn5, err := m.Target.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += nn4
+ i += nn5
}
return i, nil
}
@@ -1330,11 +1492,11 @@
dAtA[i] = 0x12
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size()))
- n5, err := m.Node.MarshalTo(dAtA[i:])
+ n6, err := m.Node.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n5
+ i += n6
}
return i, nil
}
@@ -1344,11 +1506,11 @@
dAtA[i] = 0x1a
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Service.Size()))
- n6, err := m.Service.MarshalTo(dAtA[i:])
+ n7, err := m.Service.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n6
+ i += n7
}
return i, nil
}
@@ -1358,11 +1520,11 @@
dAtA[i] = 0x22
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Task.Size()))
- n7, err := m.Task.MarshalTo(dAtA[i:])
+ n8, err := m.Task.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n7
+ i += n8
}
return i, nil
}
@@ -1372,11 +1534,11 @@
dAtA[i] = 0x2a
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Network.Size()))
- n8, err := m.Network.MarshalTo(dAtA[i:])
+ n9, err := m.Network.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n8
+ i += n9
}
return i, nil
}
@@ -1386,11 +1548,11 @@
dAtA[i] = 0x32
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Cluster.Size()))
- n9, err := m.Cluster.MarshalTo(dAtA[i:])
+ n10, err := m.Cluster.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n9
+ i += n10
}
return i, nil
}
@@ -1400,11 +1562,11 @@
dAtA[i] = 0x3a
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Secret.Size()))
- n10, err := m.Secret.MarshalTo(dAtA[i:])
+ n11, err := m.Secret.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n10
+ i += n11
}
return i, nil
}
@@ -1414,11 +1576,11 @@
dAtA[i] = 0x42
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Resource.Size()))
- n11, err := m.Resource.MarshalTo(dAtA[i:])
+ n12, err := m.Resource.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n11
+ i += n12
}
return i, nil
}
@@ -1428,11 +1590,11 @@
dAtA[i] = 0x4a
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Extension.Size()))
- n12, err := m.Extension.MarshalTo(dAtA[i:])
+ n13, err := m.Extension.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n12
+ i += n13
}
return i, nil
}
@@ -1442,11 +1604,11 @@
dAtA[i] = 0x52
i++
i = encodeVarintRaft(dAtA, i, uint64(m.Config.Size()))
- n13, err := m.Config.MarshalTo(dAtA[i:])
+ n14, err := m.Config.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
- i += n13
+ i += n14
}
return i, nil
}
@@ -1488,12 +1650,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
@@ -1585,6 +1747,63 @@
return resp, err
}
+type Raft_StreamRaftMessageServerWrapper struct {
+ Raft_StreamRaftMessageServer
+ ctx context.Context
+}
+
+func (s Raft_StreamRaftMessageServerWrapper) Context() context.Context {
+ return s.ctx
+}
+
+func (p *raftProxyRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error {
+ ctx := stream.Context()
+ conn, err := p.connSelector.LeaderConn(ctx)
+ if err != nil {
+ if err == raftselector.ErrIsLeader {
+ ctx, err = p.runCtxMods(ctx, p.localCtxMods)
+ if err != nil {
+ return err
+ }
+ streamWrapper := Raft_StreamRaftMessageServerWrapper{
+ Raft_StreamRaftMessageServer: stream,
+ ctx: ctx,
+ }
+ return p.local.StreamRaftMessage(streamWrapper)
+ }
+ return err
+ }
+ ctx, err = p.runCtxMods(ctx, p.remoteCtxMods)
+ if err != nil {
+ return err
+ }
+ clientStream, err := NewRaftClient(conn).StreamRaftMessage(ctx)
+
+ if err != nil {
+ return err
+ }
+
+ for {
+ msg, err := stream.Recv()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if err := clientStream.Send(msg); err != nil {
+ return err
+ }
+ }
+
+ reply, err := clientStream.CloseAndRecv()
+ if err != nil {
+ return err
+ }
+
+ return stream.SendAndClose(reply)
+}
+
func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) {
conn, err := p.connSelector.LeaderConn(ctx)
@@ -1630,12 +1849,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
@@ -1843,6 +2062,22 @@
return n
}
+func (m *StreamRaftMessageRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Message != nil {
+ l = m.Message.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *StreamRaftMessageResponse) Size() (n int) {
+ var l int
+ _ = l
+ return n
+}
+
func (m *ResolveAddressRequest) Size() (n int) {
var l int
_ = l
@@ -2057,6 +2292,25 @@
}, "")
return s
}
+func (this *StreamRaftMessageRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StreamRaftMessageRequest{`,
+ `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StreamRaftMessageResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StreamRaftMessageResponse{`,
+ `}`,
+ }, "")
+ return s
+}
func (this *ResolveAddressRequest) String() string {
if this == nil {
return "nil"
@@ -2861,6 +3115,139 @@
}
return nil
}
+func (m *StreamRaftMessageRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StreamRaftMessageRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StreamRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Message == nil {
+ m.Message = &raftpb.Message{}
+ }
+ if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StreamRaftMessageResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StreamRaftMessageResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StreamRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *ResolveAddressRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -3574,66 +3961,69 @@
func init() { proto.RegisterFile("github.com/docker/swarmkit/api/raft.proto", fileDescriptorRaft) }
var fileDescriptorRaft = []byte{
- // 974 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x96, 0x4f, 0x6f, 0x1b, 0x45,
- 0x18, 0xc6, 0x77, 0xd7, 0x5b, 0x27, 0x79, 0xd3, 0x26, 0xd1, 0x94, 0x84, 0xed, 0x52, 0x1c, 0x77,
- 0x8b, 0x84, 0x13, 0x9a, 0xb5, 0x30, 0x48, 0x45, 0x85, 0x1e, 0x62, 0xc7, 0x92, 0x4d, 0x5b, 0xa7,
- 0xda, 0x24, 0xd0, 0x5b, 0x58, 0xef, 0x4e, 0xdc, 0xc5, 0xf6, 0x8e, 0x99, 0x19, 0x3b, 0x70, 0x41,
- 0x3d, 0xa2, 0x5c, 0x39, 0x80, 0x90, 0x7a, 0x82, 0x73, 0x3f, 0x00, 0x1f, 0x00, 0x45, 0x9c, 0xb8,
- 0xc1, 0x29, 0xa2, 0xfe, 0x00, 0xf0, 0x15, 0xd0, 0xcc, 0xee, 0x3a, 0xc6, 0x59, 0x3b, 0xb9, 0x24,
- 0xa3, 0x9d, 0xdf, 0xf3, 0x3e, 0xef, 0x3b, 0x7f, 0xde, 0x31, 0x6c, 0xb4, 0x02, 0xfe, 0xbc, 0xdf,
- 0xb4, 0x3d, 0xd2, 0x2d, 0xfa, 0xc4, 0x6b, 0x63, 0x5a, 0x64, 0xc7, 0x2e, 0xed, 0xb6, 0x03, 0x5e,
- 0x74, 0x7b, 0x41, 0x91, 0xba, 0x47, 0xdc, 0xee, 0x51, 0xc2, 0x09, 0x42, 0xd1, 0xbc, 0x9d, 0xcc,
- 0xdb, 0x83, 0xf7, 0xcd, 0x7b, 0x97, 0xc8, 0x49, 0xf3, 0x4b, 0xec, 0x71, 0x16, 0x45, 0x30, 0x37,
- 0x2f, 0xa1, 0xf9, 0x37, 0x3d, 0x9c, 0xb0, 0x5b, 0x63, 0xac, 0x47, 0x28, 0x26, 0xac, 0x88, 0xb9,
- 0xe7, 0xcb, 0x84, 0xe4, 0x9f, 0x5e, 0x73, 0x2c, 0x39, 0xf3, 0x8d, 0x16, 0x69, 0x11, 0x39, 0x2c,
- 0x8a, 0x51, 0xfc, 0xf5, 0xfe, 0x0c, 0x43, 0x49, 0x34, 0xfb, 0x47, 0xc5, 0x5e, 0xa7, 0xdf, 0x0a,
- 0xc2, 0xf8, 0x5f, 0x24, 0xb4, 0x5e, 0xa9, 0x00, 0x8e, 0x7b, 0xc4, 0x9f, 0xe0, 0x6e, 0x13, 0x53,
- 0x74, 0x17, 0xe6, 0x84, 0xd7, 0x61, 0xe0, 0x1b, 0x6a, 0x5e, 0x2d, 0xe8, 0x65, 0x18, 0x9e, 0xad,
- 0x67, 0x05, 0x50, 0xdf, 0x71, 0xb2, 0x62, 0xaa, 0xee, 0x0b, 0x28, 0x24, 0x3e, 0x16, 0x90, 0x96,
- 0x57, 0x0b, 0x0b, 0x11, 0xd4, 0x20, 0x3e, 0x16, 0x90, 0x98, 0xaa, 0xfb, 0x08, 0x81, 0xee, 0xfa,
- 0x3e, 0x35, 0x32, 0x82, 0x70, 0xe4, 0x18, 0x95, 0x21, 0xcb, 0xb8, 0xcb, 0xfb, 0xcc, 0xd0, 0xf3,
- 0x6a, 0x61, 0xb1, 0xf4, 0x8e, 0x7d, 0x71, 0xa5, 0xed, 0xf3, 0x6c, 0xf6, 0x24, 0x5b, 0xd6, 0x4f,
- 0xcf, 0xd6, 0x15, 0x27, 0x56, 0x5a, 0x77, 0x60, 0xf1, 0x53, 0x12, 0x84, 0x0e, 0xfe, 0xaa, 0x8f,
- 0x19, 0x1f, 0xd9, 0xa8, 0xe7, 0x36, 0xd6, 0x4f, 0x2a, 0x5c, 0x8f, 0x18, 0xd6, 0x23, 0x21, 0xc3,
- 0x57, 0xab, 0xea, 0x23, 0x98, 0xeb, 0x4a, 0x5b, 0x66, 0x68, 0xf9, 0x4c, 0x61, 0xb1, 0x94, 0x9b,
- 0x9d, 0x9d, 0x93, 0xe0, 0xe8, 0x3d, 0x58, 0xa6, 0xb8, 0x4b, 0x06, 0xd8, 0x3f, 0x4c, 0x22, 0x64,
- 0xf2, 0x99, 0x82, 0x5e, 0xd6, 0x56, 0x14, 0x67, 0x29, 0x9e, 0x8a, 0x44, 0xcc, 0x2a, 0xc3, 0xf5,
- 0xc7, 0xd8, 0x1d, 0xe0, 0xa4, 0x80, 0x12, 0xe8, 0x62, 0xc5, 0x64, 0x62, 0x97, 0x7b, 0x4a, 0xd6,
- 0x5a, 0x86, 0x1b, 0x71, 0x8c, 0xa8, 0x40, 0xeb, 0x31, 0xdc, 0x7a, 0x4a, 0x89, 0x87, 0x19, 0x8b,
- 0x58, 0xc6, 0xdc, 0xd6, 0xc8, 0x61, 0x43, 0x14, 0x26, 0xbf, 0xc4, 0x26, 0xcb, 0x76, 0x74, 0xac,
- 0xec, 0x04, 0x4c, 0xe6, 0x1f, 0xe8, 0x2f, 0x7e, 0xb0, 0x14, 0xeb, 0x36, 0x98, 0x69, 0xd1, 0x62,
- 0xaf, 0x4f, 0x60, 0xd5, 0xc1, 0x8c, 0x74, 0x06, 0x78, 0xdb, 0xf7, 0xa9, 0x80, 0x62, 0x9f, 0xab,
- 0xac, 0xb2, 0x75, 0x0f, 0xd6, 0x26, 0xd5, 0xf1, 0x26, 0xa5, 0xed, 0x64, 0x07, 0x6e, 0xd6, 0x43,
- 0x8e, 0x69, 0xe8, 0x76, 0x44, 0x9c, 0xc4, 0x69, 0x0d, 0xb4, 0x91, 0x49, 0x76, 0x78, 0xb6, 0xae,
- 0xd5, 0x77, 0x1c, 0x2d, 0xf0, 0xd1, 0x43, 0xc8, 0xba, 0x1e, 0x0f, 0x48, 0x18, 0xef, 0xe0, 0x7a,
- 0xda, 0x6a, 0xee, 0x71, 0x42, 0xf1, 0xb6, 0xc4, 0x92, 0xa3, 0x15, 0x89, 0xac, 0xdf, 0x74, 0x58,
- 0x1c, 0x9b, 0x45, 0x1f, 0x8f, 0xc2, 0x09, 0xab, 0xa5, 0xd2, 0xdd, 0x4b, 0xc2, 0x3d, 0x0a, 0x42,
- 0x3f, 0x09, 0x86, 0xec, 0x78, 0x5f, 0x35, 0xb9, 0xe4, 0x46, 0x9a, 0x54, 0xdc, 0x98, 0x9a, 0x12,
- 0xed, 0x29, 0xba, 0x0f, 0x73, 0x0c, 0xd3, 0x41, 0xe0, 0x61, 0x79, 0x65, 0x16, 0x4b, 0x6f, 0xa5,
- 0xba, 0x45, 0x48, 0x4d, 0x71, 0x12, 0x5a, 0x18, 0x71, 0x97, 0xb5, 0xe3, 0x2b, 0x95, 0x6a, 0xb4,
- 0xef, 0xb2, 0xb6, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x31, 0x3f, 0x26, 0xb4, 0x6d, 0x5c, 0x9b, 0x6e,
- 0xd4, 0x88, 0x10, 0x61, 0x14, 0xd3, 0x42, 0xe8, 0x75, 0xfa, 0x8c, 0x63, 0x6a, 0x64, 0xa7, 0x0b,
- 0x2b, 0x11, 0x22, 0x84, 0x31, 0x8d, 0x3e, 0x84, 0x2c, 0xc3, 0x1e, 0xc5, 0xdc, 0x98, 0x93, 0x3a,
- 0x33, 0xbd, 0x32, 0x41, 0xd4, 0xc4, 0x45, 0x97, 0x23, 0xf4, 0x00, 0xe6, 0x29, 0x66, 0xa4, 0x4f,
- 0x3d, 0x6c, 0xcc, 0x4b, 0xdd, 0xed, 0xd4, 0xcb, 0x11, 0x33, 0x35, 0xc5, 0x19, 0xf1, 0xe8, 0x21,
- 0x2c, 0xe0, 0xaf, 0x39, 0x0e, 0x99, 0xd8, 0xbc, 0x05, 0x29, 0x7e, 0x3b, 0x4d, 0x5c, 0x4d, 0xa0,
- 0x9a, 0xe2, 0x9c, 0x2b, 0x44, 0xc2, 0x1e, 0x09, 0x8f, 0x82, 0x96, 0x01, 0xd3, 0x13, 0xae, 0x48,
- 0x42, 0x24, 0x1c, 0xb1, 0xe5, 0x79, 0xc8, 0x72, 0x97, 0xb6, 0x30, 0xdf, 0xfc, 0x57, 0x85, 0xe5,
- 0x89, 0x73, 0x81, 0xde, 0x85, 0xb9, 0x83, 0xc6, 0xa3, 0xc6, 0xee, 0xe7, 0x8d, 0x15, 0xc5, 0x34,
- 0x4f, 0x5e, 0xe6, 0xd7, 0x26, 0x88, 0x83, 0xb0, 0x1d, 0x92, 0xe3, 0x10, 0x95, 0xe0, 0xe6, 0xde,
- 0xfe, 0xae, 0x53, 0x3d, 0xdc, 0xae, 0xec, 0xd7, 0x77, 0x1b, 0x87, 0x15, 0xa7, 0xba, 0xbd, 0x5f,
- 0x5d, 0x51, 0xcd, 0x5b, 0x27, 0x2f, 0xf3, 0xab, 0x13, 0xa2, 0x0a, 0xc5, 0x2e, 0xc7, 0x17, 0x34,
- 0x07, 0x4f, 0x77, 0x84, 0x46, 0x4b, 0xd5, 0x1c, 0xf4, 0xfc, 0x34, 0x8d, 0x53, 0x7d, 0xb2, 0xfb,
- 0x59, 0x75, 0x25, 0x93, 0xaa, 0x71, 0x64, 0x13, 0x33, 0xdf, 0xfc, 0xee, 0xe7, 0x9c, 0xf2, 0xeb,
- 0x2f, 0xb9, 0xc9, 0xea, 0x4a, 0xdf, 0x6b, 0xa0, 0x8b, 0x1b, 0x8a, 0x4e, 0x54, 0x40, 0x17, 0x9b,
- 0x07, 0xda, 0x4a, 0x5b, 0xc1, 0xa9, 0x2d, 0xcb, 0xb4, 0xaf, 0x8a, 0xc7, 0x3d, 0x69, 0xf5, 0xf7,
- 0x57, 0xff, 0xfc, 0xa8, 0x2d, 0xc3, 0x0d, 0xc9, 0x6f, 0x75, 0xdd, 0xd0, 0x6d, 0x61, 0x8a, 0xbe,
- 0x85, 0xa5, 0xff, 0x37, 0x1b, 0xb4, 0x31, 0xed, 0x08, 0x5d, 0x68, 0x67, 0xe6, 0xe6, 0x55, 0xd0,
- 0x99, 0xfe, 0xa5, 0x3f, 0x55, 0x58, 0x3a, 0x6f, 0xde, 0xec, 0x79, 0xd0, 0x43, 0x5f, 0x80, 0x2e,
- 0x9e, 0x26, 0x94, 0xda, 0x9a, 0xc6, 0x1e, 0x36, 0x33, 0x3f, 0x1d, 0x98, 0x5d, 0xb4, 0x07, 0xd7,
- 0xe4, 0xe3, 0x80, 0x52, 0x23, 0x8c, 0xbf, 0x3d, 0xe6, 0x9d, 0x19, 0xc4, 0x4c, 0x93, 0xb2, 0x71,
- 0xfa, 0x3a, 0xa7, 0xfc, 0xf5, 0x3a, 0xa7, 0xbc, 0x18, 0xe6, 0xd4, 0xd3, 0x61, 0x4e, 0xfd, 0x63,
- 0x98, 0x53, 0xff, 0x1e, 0xe6, 0xd4, 0x67, 0x99, 0x67, 0x7a, 0x33, 0x2b, 0x7f, 0x5b, 0x7c, 0xf0,
- 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x64, 0x01, 0xa3, 0x4f, 0x74, 0x09, 0x00, 0x00,
+ // 1015 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xc1, 0x6e, 0x1b, 0x45,
+ 0x18, 0xc7, 0x77, 0xed, 0xad, 0xd3, 0x7c, 0x69, 0x93, 0x30, 0x25, 0x61, 0xb3, 0x2d, 0x8e, 0xbb,
+ 0x45, 0xc2, 0x09, 0xc9, 0x5a, 0x18, 0xa4, 0xa2, 0x42, 0x0f, 0x71, 0x62, 0x29, 0x26, 0xad, 0x53,
+ 0x6d, 0x12, 0xe8, 0x2d, 0xac, 0x77, 0x27, 0xee, 0x62, 0x7b, 0xc7, 0xcc, 0x8c, 0x1d, 0xb8, 0xa0,
+ 0x1e, 0x21, 0x2f, 0x00, 0x42, 0xaa, 0x38, 0xc0, 0xb9, 0x0f, 0xc0, 0x03, 0xa0, 0x88, 0x13, 0x37,
+ 0x38, 0x45, 0xd4, 0x0f, 0x00, 0xaf, 0x80, 0x66, 0x76, 0xd7, 0x31, 0xf6, 0xda, 0xf1, 0x81, 0x4b,
+ 0x32, 0xda, 0xf9, 0xfd, 0xbf, 0xff, 0x37, 0x33, 0xdf, 0x7c, 0x63, 0x58, 0xab, 0xfb, 0xfc, 0x59,
+ 0xa7, 0x66, 0xb9, 0xa4, 0x55, 0xf0, 0x88, 0xdb, 0xc0, 0xb4, 0xc0, 0x4e, 0x1d, 0xda, 0x6a, 0xf8,
+ 0xbc, 0xe0, 0xb4, 0xfd, 0x02, 0x75, 0x4e, 0xb8, 0xd5, 0xa6, 0x84, 0x13, 0x84, 0xc2, 0x79, 0x2b,
+ 0x9e, 0xb7, 0xba, 0xef, 0x1a, 0x1b, 0x57, 0xc8, 0x49, 0xed, 0x73, 0xec, 0x72, 0x16, 0x46, 0x30,
+ 0xd6, 0xaf, 0xa0, 0xf9, 0x57, 0x6d, 0x1c, 0xb3, 0x9b, 0x03, 0xac, 0x4b, 0x28, 0x26, 0xac, 0x80,
+ 0xb9, 0xeb, 0xc9, 0x84, 0xe4, 0x9f, 0x76, 0x6d, 0x20, 0x39, 0xe3, 0xf5, 0x3a, 0xa9, 0x13, 0x39,
+ 0x2c, 0x88, 0x51, 0xf4, 0xf5, 0xfe, 0x04, 0x43, 0x49, 0xd4, 0x3a, 0x27, 0x85, 0x76, 0xb3, 0x53,
+ 0xf7, 0x83, 0xe8, 0x5f, 0x28, 0x34, 0x5f, 0xaa, 0x00, 0xb6, 0x73, 0xc2, 0x1f, 0xe3, 0x56, 0x0d,
+ 0x53, 0x74, 0x0f, 0x66, 0x84, 0xd7, 0xb1, 0xef, 0xe9, 0x6a, 0x4e, 0xcd, 0x6b, 0x25, 0xe8, 0x5d,
+ 0xac, 0x66, 0x04, 0x50, 0xd9, 0xb1, 0x33, 0x62, 0xaa, 0xe2, 0x09, 0x28, 0x20, 0x1e, 0x16, 0x50,
+ 0x2a, 0xa7, 0xe6, 0x67, 0x43, 0xa8, 0x4a, 0x3c, 0x2c, 0x20, 0x31, 0x55, 0xf1, 0x10, 0x02, 0xcd,
+ 0xf1, 0x3c, 0xaa, 0xa7, 0x05, 0x61, 0xcb, 0x31, 0x2a, 0x41, 0x86, 0x71, 0x87, 0x77, 0x98, 0xae,
+ 0xe5, 0xd4, 0xfc, 0x5c, 0xf1, 0x2d, 0x6b, 0x74, 0xa7, 0xad, 0xcb, 0x6c, 0x0e, 0x24, 0x5b, 0xd2,
+ 0xce, 0x2f, 0x56, 0x15, 0x3b, 0x52, 0x9a, 0x77, 0x61, 0xee, 0x63, 0xe2, 0x07, 0x36, 0xfe, 0xa2,
+ 0x83, 0x19, 0xef, 0xdb, 0xa8, 0x97, 0x36, 0xe6, 0x0f, 0x2a, 0xdc, 0x08, 0x19, 0xd6, 0x26, 0x01,
+ 0xc3, 0xd3, 0xad, 0xea, 0x03, 0x98, 0x69, 0x49, 0x5b, 0xa6, 0xa7, 0x72, 0xe9, 0xfc, 0x5c, 0x31,
+ 0x3b, 0x39, 0x3b, 0x3b, 0xc6, 0xd1, 0x3b, 0xb0, 0x40, 0x71, 0x8b, 0x74, 0xb1, 0x77, 0x1c, 0x47,
+ 0x48, 0xe7, 0xd2, 0x79, 0xad, 0x94, 0x5a, 0x54, 0xec, 0xf9, 0x68, 0x2a, 0x14, 0x31, 0xb3, 0x04,
+ 0x37, 0x1e, 0x61, 0xa7, 0x8b, 0xe3, 0x05, 0x14, 0x41, 0x13, 0x3b, 0x26, 0x13, 0xbb, 0xda, 0x53,
+ 0xb2, 0xe6, 0x02, 0xdc, 0x8c, 0x62, 0x84, 0x0b, 0x34, 0x1f, 0xc1, 0xca, 0x13, 0x4a, 0x5c, 0xcc,
+ 0x58, 0xc8, 0x32, 0xe6, 0xd4, 0xfb, 0x0e, 0x6b, 0x62, 0x61, 0xf2, 0x4b, 0x64, 0xb2, 0x60, 0x85,
+ 0x65, 0x65, 0xc5, 0x60, 0x3c, 0xff, 0x40, 0x7b, 0xfe, 0x9d, 0xa9, 0x98, 0x77, 0xc0, 0x48, 0x8a,
+ 0x16, 0x79, 0xed, 0x81, 0x7e, 0xc0, 0x29, 0x76, 0x5a, 0xff, 0x87, 0xd5, 0x6d, 0x58, 0x49, 0x08,
+ 0x16, 0x39, 0x7d, 0x04, 0x4b, 0x36, 0x66, 0xa4, 0xd9, 0xc5, 0x5b, 0x9e, 0x47, 0x45, 0x3a, 0x91,
+ 0xcd, 0x34, 0xe7, 0x69, 0x6e, 0xc0, 0xf2, 0xb0, 0x3a, 0x2a, 0x87, 0xa4, 0x9a, 0x69, 0xc2, 0xad,
+ 0x4a, 0xc0, 0x31, 0x0d, 0x9c, 0xa6, 0x88, 0x13, 0x3b, 0x2d, 0x43, 0xaa, 0x6f, 0x92, 0xe9, 0x5d,
+ 0xac, 0xa6, 0x2a, 0x3b, 0x76, 0xca, 0xf7, 0xd0, 0x43, 0xc8, 0x38, 0x2e, 0xf7, 0x49, 0x10, 0xd5,
+ 0xca, 0x6a, 0xd2, 0xb9, 0x1d, 0x70, 0x42, 0xf1, 0x96, 0xc4, 0xe2, 0x22, 0x0e, 0x45, 0xe6, 0xaf,
+ 0x1a, 0xcc, 0x0d, 0xcc, 0xa2, 0x0f, 0xfb, 0xe1, 0x84, 0xd5, 0x7c, 0xf1, 0xde, 0x15, 0xe1, 0xf6,
+ 0xfc, 0xc0, 0x8b, 0x83, 0x21, 0x2b, 0xaa, 0xa0, 0x94, 0xdc, 0x71, 0x3d, 0x49, 0x2a, 0xee, 0xe6,
+ 0xae, 0x12, 0x56, 0x0f, 0xba, 0x0f, 0x33, 0x0c, 0xd3, 0xae, 0xef, 0x62, 0x79, 0x39, 0xe7, 0x8a,
+ 0xb7, 0x13, 0xdd, 0x42, 0x64, 0x57, 0xb1, 0x63, 0x5a, 0x18, 0x71, 0x87, 0x35, 0xa2, 0xcb, 0x9b,
+ 0x68, 0x74, 0xe8, 0xb0, 0x86, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30, 0x3f, 0x25, 0xb4, 0xa1, 0x5f,
+ 0x1b, 0x6f, 0x54, 0x0d, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0x36, 0x3b, 0x8c, 0x63, 0xaa, 0x67,
+ 0xc6, 0x0b, 0xb7, 0x43, 0x44, 0x08, 0x23, 0x1a, 0xbd, 0x0f, 0x19, 0x86, 0x5d, 0x8a, 0xb9, 0x3e,
+ 0x23, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2b, 0x5a, 0x8a, 0x1c, 0xa1, 0x07, 0x70, 0x9d, 0x62,
+ 0x46, 0x3a, 0xd4, 0xc5, 0xfa, 0x75, 0xa9, 0xbb, 0x93, 0x78, 0x0d, 0x23, 0x66, 0x57, 0xb1, 0xfb,
+ 0x3c, 0x7a, 0x08, 0xb3, 0xf8, 0x4b, 0x8e, 0x03, 0x26, 0x0e, 0x6f, 0x56, 0x8a, 0xdf, 0x4c, 0x12,
+ 0x97, 0x63, 0x68, 0x57, 0xb1, 0x2f, 0x15, 0x22, 0x61, 0x97, 0x04, 0x27, 0x7e, 0x5d, 0x87, 0xf1,
+ 0x09, 0x6f, 0x4b, 0x42, 0x24, 0x1c, 0xb2, 0xa5, 0xeb, 0x90, 0xe1, 0x0e, 0xad, 0x63, 0xbe, 0xfe,
+ 0x8f, 0x0a, 0x0b, 0x43, 0x75, 0x81, 0xde, 0x86, 0x99, 0xa3, 0xea, 0x5e, 0x75, 0xff, 0xd3, 0xea,
+ 0xa2, 0x62, 0x18, 0x67, 0x2f, 0x72, 0xcb, 0x43, 0xc4, 0x51, 0xd0, 0x08, 0xc8, 0x69, 0x80, 0x8a,
+ 0x70, 0xeb, 0xe0, 0x70, 0xdf, 0x2e, 0x1f, 0x6f, 0x6d, 0x1f, 0x56, 0xf6, 0xab, 0xc7, 0xdb, 0x76,
+ 0x79, 0xeb, 0xb0, 0xbc, 0xa8, 0x1a, 0x2b, 0x67, 0x2f, 0x72, 0x4b, 0x43, 0xa2, 0x6d, 0x8a, 0x1d,
+ 0x8e, 0x47, 0x34, 0x47, 0x4f, 0x76, 0x84, 0x26, 0x95, 0xa8, 0x39, 0x6a, 0x7b, 0x49, 0x1a, 0xbb,
+ 0xfc, 0x78, 0xff, 0x93, 0xf2, 0x62, 0x3a, 0x51, 0x63, 0xcb, 0x76, 0x69, 0xbc, 0xf1, 0xcd, 0x4f,
+ 0x59, 0xe5, 0x97, 0x9f, 0xb3, 0xc3, 0xab, 0x2b, 0xfe, 0x98, 0x06, 0x4d, 0xdc, 0x50, 0x74, 0xa6,
+ 0x02, 0x1a, 0x6d, 0x53, 0x68, 0x33, 0x69, 0x07, 0xc7, 0x36, 0x47, 0xc3, 0x9a, 0x16, 0x8f, 0x7a,
+ 0xd2, 0xd2, 0x6f, 0x2f, 0xff, 0xfe, 0x3e, 0xb5, 0x00, 0x37, 0x25, 0xbf, 0xd9, 0x72, 0x02, 0xa7,
+ 0x8e, 0x29, 0xfa, 0x56, 0x85, 0xd7, 0x46, 0x1a, 0x19, 0xda, 0x48, 0xbe, 0xc6, 0xc9, 0xcd, 0xd3,
+ 0xd8, 0x9c, 0x92, 0x9e, 0x98, 0x49, 0x5e, 0x45, 0x5f, 0xc3, 0xfc, 0x7f, 0x1b, 0x1f, 0x5a, 0x1b,
+ 0x57, 0xce, 0x23, 0xad, 0xd5, 0x58, 0x9f, 0x06, 0x9d, 0x98, 0x41, 0xf1, 0x0f, 0x15, 0xe6, 0x2f,
+ 0x9f, 0x2c, 0xf6, 0xcc, 0x6f, 0xa3, 0xcf, 0x40, 0x13, 0x0f, 0x32, 0x4a, 0x6c, 0x93, 0x03, 0xcf,
+ 0xb9, 0x91, 0x1b, 0x0f, 0x4c, 0x3e, 0x00, 0x17, 0xae, 0xc9, 0x27, 0x11, 0x25, 0x46, 0x18, 0x7c,
+ 0x71, 0x8d, 0xbb, 0x13, 0x88, 0x89, 0x26, 0x25, 0xfd, 0xfc, 0x55, 0x56, 0xf9, 0xf3, 0x55, 0x56,
+ 0x79, 0xde, 0xcb, 0xaa, 0xe7, 0xbd, 0xac, 0xfa, 0x7b, 0x2f, 0xab, 0xfe, 0xd5, 0xcb, 0xaa, 0x4f,
+ 0xd3, 0x4f, 0xb5, 0x5a, 0x46, 0xfe, 0xa2, 0x7a, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e,
+ 0x7a, 0x8b, 0xe7, 0x6a, 0x0a, 0x00, 0x00,
}
diff --git a/vendor/github.com/docker/swarmkit/api/raft.proto b/vendor/github.com/docker/swarmkit/api/raft.proto
index b398315..b351c15 100644
--- a/vendor/github.com/docker/swarmkit/api/raft.proto
+++ b/vendor/github.com/docker/swarmkit/api/raft.proto
@@ -16,6 +16,15 @@
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
+ // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest
+ // to be processed on a raft member, returning a StreamRaftMessageResponse
+ // when processing of the streamed messages is complete. A single stream corresponds
+ // to a single raft message, which may be disassembled and streamed as individual messages.
+ // It is called from the Raft leader, which uses it to stream messages to a raft member.
+ rpc StreamRaftMessage(stream StreamRaftMessageRequest) returns (StreamRaftMessageResponse) {
+ option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
+ };
+
// ResolveAddress returns the address where the node with the given ID can be reached.
rpc ResolveAddress(ResolveAddressRequest) returns (ResolveAddressResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
@@ -82,6 +91,15 @@
message ProcessRaftMessageResponse {}
+// Raft message streaming request.
+message StreamRaftMessageRequest {
+ option (docker.protobuf.plugin.deepcopy) = false;
+ raftpb.Message message = 1;
+}
+
+// Raft message streaming response.
+message StreamRaftMessageResponse {}
+
message ResolveAddressRequest {
// raft_id is the ID to resolve to an address.
uint64 raft_id = 1;
diff --git a/vendor/github.com/docker/swarmkit/api/resource.pb.go b/vendor/github.com/docker/swarmkit/api/resource.pb.go
index ead5d27..d4e27f3 100644
--- a/vendor/github.com/docker/swarmkit/api/resource.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/resource.pb.go
@@ -19,6 +19,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -403,12 +404,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/api/types.pb.go b/vendor/github.com/docker/swarmkit/api/types.pb.go
index 33e2281..67243dc 100644
--- a/vendor/github.com/docker/swarmkit/api/types.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/types.pb.go
@@ -72,7 +72,16 @@
TaskStateShutdown TaskState = 640
TaskStateFailed TaskState = 704
TaskStateRejected TaskState = 768
- TaskStateOrphaned TaskState = 832
+ // TaskStateRemove is used to correctly handle service deletions and scale
+ // downs. This allows us to keep track of tasks that have been marked for
+ // deletion, but can't yet be removed because the agent is in the process of
+ // shutting them down. Once the agent has shut down tasks with desired state
+ // REMOVE, the task reaper is responsible for removing them.
+ TaskStateRemove TaskState = 800
+ // TaskStateOrphaned is used to free up resources associated with service
+ // tasks on unresponsive nodes without having to delete those tasks. This
+ // state is directly assigned to the task by the orchestrator.
+ TaskStateOrphaned TaskState = 832
)
var TaskState_name = map[int32]string{
@@ -88,6 +97,7 @@
640: "SHUTDOWN",
704: "FAILED",
768: "REJECTED",
+ 800: "REMOVE",
832: "ORPHANED",
}
var TaskState_value = map[string]int32{
@@ -103,6 +113,7 @@
"SHUTDOWN": 640,
"FAILED": 704,
"REJECTED": 768,
+ "REMOVE": 800,
"ORPHANED": 832,
}
@@ -17029,319 +17040,321 @@
func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) }
var fileDescriptorTypes = []byte{
- // 5020 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x6c, 0x24, 0x49,
- 0x56, 0x76, 0xfd, 0xba, 0xea, 0x55, 0xd9, 0x4e, 0x47, 0x7b, 0x7b, 0xdc, 0xb5, 0xdd, 0x76, 0x4d,
- 0xce, 0xf4, 0xce, 0x6c, 0x6f, 0x53, 0xfd, 0xb7, 0xbb, 0xea, 0x99, 0x61, 0x77, 0xa6, 0xfe, 0x6c,
- 0xd7, 0xb6, 0x5d, 0x55, 0x8a, 0x2a, 0x77, 0xef, 0x22, 0x41, 0x2a, 0x9d, 0x19, 0x2e, 0xe7, 0x38,
- 0x2b, 0xa3, 0xc8, 0xcc, 0xb2, 0xbb, 0x58, 0x10, 0x2d, 0x0e, 0x80, 0x7c, 0x82, 0xdb, 0x22, 0x64,
- 0x2e, 0x70, 0x42, 0x48, 0x1c, 0x40, 0x42, 0x70, 0x1a, 0x24, 0x0e, 0x7b, 0x83, 0x05, 0x09, 0xad,
- 0x40, 0x32, 0xac, 0x0f, 0xdc, 0x56, 0x70, 0x59, 0x71, 0x01, 0x09, 0xc5, 0x4f, 0x66, 0xa5, 0xab,
- 0xd3, 0x76, 0x0f, 0xb3, 0x17, 0xbb, 0xe2, 0xbd, 0xef, 0xbd, 0x78, 0xf1, 0x22, 0xe2, 0xc5, 0x7b,
- 0x11, 0x09, 0xf7, 0x06, 0x96, 0x7f, 0x30, 0xde, 0xab, 0x18, 0x74, 0xf8, 0xc0, 0xa4, 0xc6, 0x21,
- 0x71, 0x1f, 0x78, 0xc7, 0xba, 0x3b, 0x3c, 0xb4, 0xfc, 0x07, 0xfa, 0xc8, 0x7a, 0xe0, 0x4f, 0x46,
- 0xc4, 0xab, 0x8c, 0x5c, 0xea, 0x53, 0x84, 0x04, 0xa0, 0x12, 0x00, 0x2a, 0x47, 0x8f, 0x4a, 0xeb,
- 0x03, 0x4a, 0x07, 0x36, 0x79, 0xc0, 0x11, 0x7b, 0xe3, 0xfd, 0x07, 0xbe, 0x35, 0x24, 0x9e, 0xaf,
- 0x0f, 0x47, 0x42, 0xa8, 0xb4, 0x36, 0x0b, 0x30, 0xc7, 0xae, 0xee, 0x5b, 0xd4, 0x91, 0xfc, 0x95,
- 0x01, 0x1d, 0x50, 0xfe, 0xf3, 0x01, 0xfb, 0x25, 0xa8, 0xea, 0x3a, 0xcc, 0x3f, 0x27, 0xae, 0x67,
- 0x51, 0x07, 0xad, 0x40, 0xc6, 0x72, 0x4c, 0xf2, 0x72, 0x35, 0x51, 0x4e, 0xbc, 0x9f, 0xc6, 0xa2,
- 0xa1, 0x3e, 0x04, 0x68, 0xb1, 0x1f, 0x4d, 0xc7, 0x77, 0x27, 0x48, 0x81, 0xd4, 0x21, 0x99, 0x70,
- 0x44, 0x1e, 0xb3, 0x9f, 0x8c, 0x72, 0xa4, 0xdb, 0xab, 0x49, 0x41, 0x39, 0xd2, 0x6d, 0xf5, 0x27,
- 0x09, 0x28, 0x54, 0x1d, 0x87, 0xfa, 0xbc, 0x77, 0x0f, 0x21, 0x48, 0x3b, 0xfa, 0x90, 0x48, 0x21,
- 0xfe, 0x1b, 0xd5, 0x21, 0x6b, 0xeb, 0x7b, 0xc4, 0xf6, 0x56, 0x93, 0xe5, 0xd4, 0xfb, 0x85, 0xc7,
- 0x5f, 0xab, 0xbc, 0x3e, 0xe4, 0x4a, 0x44, 0x49, 0x65, 0x9b, 0xa3, 0xb9, 0x11, 0x58, 0x8a, 0xa2,
- 0x6f, 0xc3, 0xbc, 0xe5, 0x98, 0x96, 0x41, 0xbc, 0xd5, 0x34, 0xd7, 0xb2, 0x16, 0xa7, 0x65, 0x6a,
- 0x7d, 0x2d, 0xfd, 0xc3, 0xb3, 0xf5, 0x39, 0x1c, 0x08, 0x95, 0x3e, 0x80, 0x42, 0x44, 0x6d, 0xcc,
- 0xd8, 0x56, 0x20, 0x73, 0xa4, 0xdb, 0x63, 0x22, 0x47, 0x27, 0x1a, 0x1f, 0x26, 0x9f, 0x26, 0xd4,
- 0x4f, 0x60, 0xa5, 0xad, 0x0f, 0x89, 0xb9, 0x49, 0x1c, 0xe2, 0x5a, 0x06, 0x26, 0x1e, 0x1d, 0xbb,
- 0x06, 0x61, 0x63, 0x3d, 0xb4, 0x1c, 0x33, 0x18, 0x2b, 0xfb, 0x1d, 0xaf, 0x45, 0xad, 0xc3, 0x5b,
- 0x0d, 0xcb, 0x33, 0x5c, 0xe2, 0x93, 0xcf, 0xad, 0x24, 0x15, 0x28, 0x39, 0x4b, 0xc0, 0xd2, 0xac,
- 0xf4, 0x2f, 0xc1, 0x0d, 0xe6, 0x62, 0x53, 0x73, 0x25, 0x45, 0xf3, 0x46, 0xc4, 0xe0, 0xca, 0x0a,
- 0x8f, 0xdf, 0x8f, 0xf3, 0x50, 0xdc, 0x48, 0xb6, 0xe6, 0xf0, 0x32, 0x57, 0x13, 0x10, 0x7a, 0x23,
- 0x62, 0x20, 0x03, 0x6e, 0x9a, 0xd2, 0xe8, 0x19, 0xf5, 0x49, 0xae, 0x3e, 0x76, 0x1a, 0x2f, 0x19,
- 0xe6, 0xd6, 0x1c, 0x5e, 0x09, 0x94, 0x45, 0x3b, 0xa9, 0x01, 0xe4, 0x02, 0xdd, 0xea, 0x0f, 0x12,
- 0x90, 0x0f, 0x98, 0x1e, 0xfa, 0x2a, 0xe4, 0x1d, 0xdd, 0xa1, 0x9a, 0x31, 0x1a, 0x7b, 0x7c, 0x40,
- 0xa9, 0x5a, 0xf1, 0xfc, 0x6c, 0x3d, 0xd7, 0xd6, 0x1d, 0x5a, 0xef, 0xee, 0x7a, 0x38, 0xc7, 0xd8,
- 0xf5, 0xd1, 0xd8, 0x43, 0x6f, 0x43, 0x71, 0x48, 0x86, 0xd4, 0x9d, 0x68, 0x7b, 0x13, 0x9f, 0x78,
- 0xd2, 0x6d, 0x05, 0x41, 0xab, 0x31, 0x12, 0xfa, 0x16, 0xcc, 0x0f, 0x84, 0x49, 0xab, 0x29, 0xbe,
- 0x7c, 0xde, 0x89, 0xb3, 0x7e, 0xc6, 0x6a, 0x1c, 0xc8, 0xa8, 0xbf, 0x97, 0x80, 0x95, 0x90, 0x4a,
- 0x7e, 0x75, 0x6c, 0xb9, 0x64, 0x48, 0x1c, 0xdf, 0x43, 0xdf, 0x80, 0xac, 0x6d, 0x0d, 0x2d, 0xdf,
- 0x93, 0x3e, 0xbf, 0x13, 0xa7, 0x36, 0x1c, 0x14, 0x96, 0x60, 0x54, 0x85, 0xa2, 0x4b, 0x3c, 0xe2,
- 0x1e, 0x89, 0x15, 0x2f, 0x3d, 0x7a, 0x8d, 0xf0, 0x05, 0x11, 0x75, 0x03, 0x72, 0x5d, 0x5b, 0xf7,
- 0xf7, 0xa9, 0x3b, 0x44, 0x2a, 0x14, 0x75, 0xd7, 0x38, 0xb0, 0x7c, 0x62, 0xf8, 0x63, 0x37, 0xd8,
- 0x7d, 0x17, 0x68, 0xe8, 0x26, 0x24, 0xa9, 0xe8, 0x28, 0x5f, 0xcb, 0x9e, 0x9f, 0xad, 0x27, 0x3b,
- 0x3d, 0x9c, 0xa4, 0x9e, 0xfa, 0x11, 0x2c, 0x77, 0xed, 0xf1, 0xc0, 0x72, 0x1a, 0xc4, 0x33, 0x5c,
- 0x6b, 0xc4, 0xb4, 0xb3, 0x55, 0xc9, 0x62, 0x54, 0xb0, 0x2a, 0xd9, 0xef, 0x70, 0x6b, 0x27, 0xa7,
- 0x5b, 0x5b, 0xfd, 0x9d, 0x24, 0x2c, 0x37, 0x9d, 0x81, 0xe5, 0x90, 0xa8, 0xf4, 0x5d, 0x58, 0x24,
- 0x9c, 0xa8, 0x1d, 0x89, 0x70, 0x23, 0xf5, 0x2c, 0x08, 0x6a, 0x10, 0x83, 0x5a, 0x33, 0x71, 0xe1,
- 0x51, 0xdc, 0xf0, 0x5f, 0xd3, 0x1e, 0x1b, 0x1d, 0x9a, 0x30, 0x3f, 0xe2, 0x83, 0xf0, 0xe4, 0xf4,
- 0xde, 0x8d, 0xd3, 0xf5, 0xda, 0x38, 0x83, 0x20, 0x21, 0x65, 0xbf, 0x48, 0x90, 0xf8, 0xb3, 0x24,
- 0x2c, 0xb5, 0xa9, 0x79, 0xc1, 0x0f, 0x25, 0xc8, 0x1d, 0x50, 0xcf, 0x8f, 0x04, 0xc4, 0xb0, 0x8d,
- 0x9e, 0x42, 0x6e, 0x24, 0xa7, 0x4f, 0xce, 0xfe, 0xed, 0x78, 0x93, 0x05, 0x06, 0x87, 0x68, 0xf4,
- 0x11, 0xe4, 0x83, 0x2d, 0xc3, 0x46, 0xfb, 0x06, 0x0b, 0x67, 0x8a, 0x47, 0xdf, 0x82, 0xac, 0x98,
- 0x84, 0xd5, 0x34, 0x97, 0xbc, 0xfb, 0x46, 0x3e, 0xc7, 0x52, 0x08, 0x6d, 0x42, 0xce, 0xb7, 0x3d,
- 0xcd, 0x72, 0xf6, 0xe9, 0x6a, 0x86, 0x2b, 0x58, 0x8f, 0x0d, 0x32, 0xd4, 0x24, 0xfd, 0xed, 0x5e,
- 0xcb, 0xd9, 0xa7, 0xb5, 0xc2, 0xf9, 0xd9, 0xfa, 0xbc, 0x6c, 0xe0, 0x79, 0xdf, 0xf6, 0xd8, 0x0f,
- 0xf5, 0xf7, 0x13, 0x50, 0x88, 0xa0, 0xd0, 0x1d, 0x00, 0xdf, 0x1d, 0x7b, 0xbe, 0xe6, 0x52, 0xea,
- 0x73, 0x67, 0x15, 0x71, 0x9e, 0x53, 0x30, 0xa5, 0x3e, 0xaa, 0xc0, 0x0d, 0x83, 0xb8, 0xbe, 0x66,
- 0x79, 0xde, 0x98, 0xb8, 0x9a, 0x37, 0xde, 0xfb, 0x94, 0x18, 0x3e, 0x77, 0x5c, 0x11, 0x2f, 0x33,
- 0x56, 0x8b, 0x73, 0x7a, 0x82, 0x81, 0x9e, 0xc0, 0xcd, 0x28, 0x7e, 0x34, 0xde, 0xb3, 0x2d, 0x43,
- 0x63, 0x93, 0x99, 0xe2, 0x22, 0x37, 0xa6, 0x22, 0x5d, 0xce, 0x7b, 0x46, 0x26, 0xea, 0x8f, 0x13,
- 0xa0, 0x60, 0x7d, 0xdf, 0xdf, 0x21, 0xc3, 0x3d, 0xe2, 0xf6, 0x7c, 0xdd, 0x1f, 0x7b, 0xe8, 0x26,
- 0x64, 0x6d, 0xa2, 0x9b, 0xc4, 0xe5, 0x46, 0xe5, 0xb0, 0x6c, 0xa1, 0x5d, 0xb6, 0x83, 0x75, 0xe3,
- 0x40, 0xdf, 0xb3, 0x6c, 0xcb, 0x9f, 0x70, 0x53, 0x16, 0xe3, 0x97, 0xf0, 0xac, 0xce, 0x0a, 0x8e,
- 0x08, 0xe2, 0x0b, 0x6a, 0xd0, 0x2a, 0xcc, 0x0f, 0x89, 0xe7, 0xe9, 0x03, 0xc2, 0x2d, 0xcd, 0xe3,
- 0xa0, 0xa9, 0x7e, 0x04, 0xc5, 0xa8, 0x1c, 0x2a, 0xc0, 0xfc, 0x6e, 0xfb, 0x59, 0xbb, 0xf3, 0xa2,
- 0xad, 0xcc, 0xa1, 0x25, 0x28, 0xec, 0xb6, 0x71, 0xb3, 0x5a, 0xdf, 0xaa, 0xd6, 0xb6, 0x9b, 0x4a,
- 0x02, 0x2d, 0x40, 0x7e, 0xda, 0x4c, 0xaa, 0x7f, 0x91, 0x00, 0x60, 0xee, 0x96, 0x83, 0xfa, 0x10,
- 0x32, 0x9e, 0xaf, 0xfb, 0x62, 0x55, 0x2e, 0x3e, 0x7e, 0xf7, 0xb2, 0x39, 0x94, 0xf6, 0xb2, 0x7f,
- 0x04, 0x0b, 0x91, 0xa8, 0x85, 0xc9, 0x0b, 0x16, 0xb2, 0x00, 0xa1, 0x9b, 0xa6, 0x2b, 0x0d, 0xe7,
- 0xbf, 0xd5, 0x8f, 0x20, 0xc3, 0xa5, 0x2f, 0x9a, 0x9b, 0x83, 0x74, 0x83, 0xfd, 0x4a, 0xa0, 0x3c,
- 0x64, 0x70, 0xb3, 0xda, 0xf8, 0x9e, 0x92, 0x44, 0x0a, 0x14, 0x1b, 0xad, 0x5e, 0xbd, 0xd3, 0x6e,
- 0x37, 0xeb, 0xfd, 0x66, 0x43, 0x49, 0xa9, 0x77, 0x21, 0xd3, 0x1a, 0x32, 0xcd, 0xb7, 0xd9, 0x92,
- 0xdf, 0x27, 0x2e, 0x71, 0x8c, 0x60, 0x27, 0x4d, 0x09, 0xea, 0x4f, 0x0b, 0x90, 0xd9, 0xa1, 0x63,
- 0xc7, 0x47, 0x8f, 0x23, 0x61, 0x6b, 0x31, 0x3e, 0x43, 0xe0, 0xc0, 0x4a, 0x7f, 0x32, 0x22, 0x32,
- 0xac, 0xdd, 0x84, 0xac, 0xd8, 0x1c, 0x72, 0x38, 0xb2, 0xc5, 0xe8, 0xbe, 0xee, 0x0e, 0x88, 0x2f,
- 0xc7, 0x23, 0x5b, 0xe8, 0x7d, 0x76, 0x62, 0xe9, 0x26, 0x75, 0xec, 0x09, 0xdf, 0x43, 0x39, 0x71,
- 0x2c, 0x61, 0xa2, 0x9b, 0x1d, 0xc7, 0x9e, 0xe0, 0x90, 0x8b, 0xb6, 0xa0, 0xb8, 0x67, 0x39, 0xa6,
- 0x46, 0x47, 0x22, 0xc8, 0x67, 0x2e, 0xdf, 0x71, 0xc2, 0xaa, 0x9a, 0xe5, 0x98, 0x1d, 0x01, 0xc6,
- 0x85, 0xbd, 0x69, 0x03, 0xb5, 0x61, 0xf1, 0x88, 0xda, 0xe3, 0x21, 0x09, 0x75, 0x65, 0xb9, 0xae,
- 0xf7, 0x2e, 0xd7, 0xf5, 0x9c, 0xe3, 0x03, 0x6d, 0x0b, 0x47, 0xd1, 0x26, 0x7a, 0x06, 0x0b, 0xfe,
- 0x70, 0xb4, 0xef, 0x85, 0xea, 0xe6, 0xb9, 0xba, 0xaf, 0x5c, 0xe1, 0x30, 0x06, 0x0f, 0xb4, 0x15,
- 0xfd, 0x48, 0x0b, 0x6d, 0x42, 0xc1, 0xa0, 0x8e, 0x67, 0x79, 0x3e, 0x71, 0x8c, 0xc9, 0x6a, 0x8e,
- 0xfb, 0xfe, 0x8a, 0x51, 0xd6, 0xa7, 0x60, 0x1c, 0x95, 0x2c, 0xfd, 0x56, 0x0a, 0x0a, 0x11, 0x17,
- 0xa0, 0x1e, 0x14, 0x46, 0x2e, 0x1d, 0xe9, 0x03, 0x7e, 0xe2, 0xc9, 0x49, 0x7d, 0xf4, 0x46, 0xee,
- 0xab, 0x74, 0xa7, 0x82, 0x38, 0xaa, 0x45, 0x3d, 0x4d, 0x42, 0x21, 0xc2, 0x44, 0xf7, 0x20, 0x87,
- 0xbb, 0xb8, 0xf5, 0xbc, 0xda, 0x6f, 0x2a, 0x73, 0xa5, 0xdb, 0x27, 0xa7, 0xe5, 0x55, 0xae, 0x2d,
- 0xaa, 0xa0, 0xeb, 0x5a, 0x47, 0x6c, 0x0d, 0xbf, 0x0f, 0xf3, 0x01, 0x34, 0x51, 0xfa, 0xf2, 0xc9,
- 0x69, 0xf9, 0xad, 0x59, 0x68, 0x04, 0x89, 0x7b, 0x5b, 0x55, 0xdc, 0x6c, 0x28, 0xc9, 0x78, 0x24,
- 0xee, 0x1d, 0xe8, 0x2e, 0x31, 0xd1, 0x57, 0x20, 0x2b, 0x81, 0xa9, 0x52, 0xe9, 0xe4, 0xb4, 0x7c,
- 0x73, 0x16, 0x38, 0xc5, 0xe1, 0xde, 0x76, 0xf5, 0x79, 0x53, 0x49, 0xc7, 0xe3, 0x70, 0xcf, 0xd6,
- 0x8f, 0x08, 0x7a, 0x17, 0x32, 0x02, 0x96, 0x29, 0xdd, 0x3a, 0x39, 0x2d, 0x7f, 0xe9, 0x35, 0x75,
- 0x0c, 0x55, 0x5a, 0xfd, 0xdd, 0x3f, 0x5e, 0x9b, 0xfb, 0x9b, 0x3f, 0x59, 0x53, 0x66, 0xd9, 0xa5,
- 0xff, 0x49, 0xc0, 0xc2, 0x85, 0xb5, 0x83, 0x54, 0xc8, 0x3a, 0xd4, 0xa0, 0x23, 0x71, 0x10, 0xe6,
- 0x6a, 0x70, 0x7e, 0xb6, 0x9e, 0x6d, 0xd3, 0x3a, 0x1d, 0x4d, 0xb0, 0xe4, 0xa0, 0x67, 0x33, 0x47,
- 0xf9, 0x93, 0x37, 0x5c, 0x98, 0xb1, 0x87, 0xf9, 0xc7, 0xb0, 0x60, 0xba, 0xd6, 0x11, 0x71, 0x35,
- 0x83, 0x3a, 0xfb, 0xd6, 0x40, 0x1e, 0x72, 0xa5, 0xd8, 0x7c, 0x93, 0x03, 0x71, 0x51, 0x08, 0xd4,
- 0x39, 0xfe, 0x0b, 0x1c, 0xe3, 0xa5, 0xe7, 0x50, 0x8c, 0x2e, 0x75, 0x76, 0x2e, 0x79, 0xd6, 0xaf,
- 0x11, 0x99, 0x58, 0xf2, 0x34, 0x14, 0xe7, 0x19, 0x45, 0xa4, 0x95, 0xef, 0x41, 0x7a, 0x48, 0x4d,
- 0xa1, 0x67, 0xa1, 0x76, 0x83, 0x65, 0x13, 0xff, 0x72, 0xb6, 0x5e, 0xa0, 0x5e, 0x65, 0xc3, 0xb2,
- 0xc9, 0x0e, 0x35, 0x09, 0xe6, 0x00, 0xf5, 0x08, 0xd2, 0x2c, 0xe6, 0xa0, 0x2f, 0x43, 0xba, 0xd6,
- 0x6a, 0x37, 0x94, 0xb9, 0xd2, 0xf2, 0xc9, 0x69, 0x79, 0x81, 0xbb, 0x84, 0x31, 0xd8, 0xda, 0x45,
- 0xeb, 0x90, 0x7d, 0xde, 0xd9, 0xde, 0xdd, 0x61, 0xcb, 0xeb, 0xc6, 0xc9, 0x69, 0x79, 0x29, 0x64,
- 0x0b, 0xa7, 0xa1, 0x3b, 0x90, 0xe9, 0xef, 0x74, 0x37, 0x7a, 0x4a, 0xb2, 0x84, 0x4e, 0x4e, 0xcb,
- 0x8b, 0x21, 0x9f, 0xdb, 0x5c, 0x5a, 0x96, 0xb3, 0x9a, 0x0f, 0xe9, 0xea, 0x8f, 0x12, 0x50, 0x88,
- 0x6c, 0x38, 0xb6, 0x30, 0x1b, 0xcd, 0x8d, 0xea, 0xee, 0x76, 0x5f, 0x99, 0x8b, 0x2c, 0xcc, 0x08,
- 0xa4, 0x41, 0xf6, 0xf5, 0xb1, 0xcd, 0xe2, 0x1c, 0xd4, 0x3b, 0xed, 0x5e, 0xab, 0xd7, 0x6f, 0xb6,
- 0xfb, 0x4a, 0xa2, 0xb4, 0x7a, 0x72, 0x5a, 0x5e, 0x99, 0x05, 0x6f, 0x8c, 0x6d, 0x9b, 0x2d, 0xcd,
- 0x7a, 0xb5, 0xbe, 0xc5, 0xd7, 0xfa, 0x74, 0x69, 0x46, 0x50, 0x75, 0xdd, 0x38, 0x20, 0x26, 0xba,
- 0x0f, 0xf9, 0x46, 0x73, 0xbb, 0xb9, 0x59, 0xe5, 0xd1, 0xbd, 0x74, 0xe7, 0xe4, 0xb4, 0x7c, 0xeb,
- 0xf5, 0xde, 0x6d, 0x32, 0xd0, 0x7d, 0x62, 0xce, 0x2c, 0xd1, 0x08, 0x44, 0xfd, 0x59, 0x12, 0x16,
- 0x30, 0x2b, 0x87, 0x5d, 0xbf, 0x4b, 0x6d, 0xcb, 0x98, 0xa0, 0x2e, 0xe4, 0x0d, 0xea, 0x98, 0x56,
- 0x24, 0x4e, 0x3c, 0xbe, 0x24, 0x25, 0x9a, 0x4a, 0x05, 0xad, 0x7a, 0x20, 0x89, 0xa7, 0x4a, 0xd0,
- 0x03, 0xc8, 0x98, 0xc4, 0xd6, 0x27, 0x32, 0x37, 0xbb, 0x55, 0x11, 0x05, 0x77, 0x25, 0x28, 0xb8,
- 0x2b, 0x0d, 0x59, 0x70, 0x63, 0x81, 0xe3, 0x35, 0x88, 0xfe, 0x52, 0xd3, 0x7d, 0x9f, 0x0c, 0x47,
- 0xbe, 0x48, 0xcc, 0xd2, 0xb8, 0x30, 0xd4, 0x5f, 0x56, 0x25, 0x09, 0x3d, 0x82, 0xec, 0xb1, 0xe5,
- 0x98, 0xf4, 0x58, 0xe6, 0x5e, 0x57, 0x28, 0x95, 0x40, 0xf5, 0x84, 0xa5, 0x24, 0x33, 0x66, 0xb2,
- 0x35, 0xd4, 0xee, 0xb4, 0x9b, 0xc1, 0x1a, 0x92, 0xfc, 0x8e, 0xd3, 0xa6, 0x0e, 0xdb, 0xff, 0xd0,
- 0x69, 0x6b, 0x1b, 0xd5, 0xd6, 0xf6, 0x2e, 0x66, 0xeb, 0x68, 0xe5, 0xe4, 0xb4, 0xac, 0x84, 0x90,
- 0x0d, 0xdd, 0xb2, 0x59, 0x31, 0x70, 0x0b, 0x52, 0xd5, 0xf6, 0xf7, 0x94, 0x64, 0x49, 0x39, 0x39,
- 0x2d, 0x17, 0x43, 0x76, 0xd5, 0x99, 0x4c, 0xfd, 0x3e, 0xdb, 0xaf, 0xfa, 0xf7, 0x29, 0x28, 0xee,
- 0x8e, 0x4c, 0xdd, 0x27, 0x62, 0x9f, 0xa1, 0x32, 0x14, 0x46, 0xba, 0xab, 0xdb, 0x36, 0xb1, 0x2d,
- 0x6f, 0x28, 0xaf, 0x12, 0xa2, 0x24, 0xf4, 0xc1, 0x9b, 0xba, 0xb1, 0x96, 0x63, 0x7b, 0xe7, 0x07,
- 0xff, 0xb6, 0x9e, 0x08, 0x1c, 0xba, 0x0b, 0x8b, 0xfb, 0xc2, 0x5a, 0x4d, 0x37, 0xf8, 0xc4, 0xa6,
- 0xf8, 0xc4, 0x56, 0xe2, 0x26, 0x36, 0x6a, 0x56, 0x45, 0x0e, 0xb2, 0xca, 0xa5, 0xf0, 0xc2, 0x7e,
- 0xb4, 0x89, 0x9e, 0xc0, 0xfc, 0x90, 0x3a, 0x96, 0x4f, 0xdd, 0xeb, 0x67, 0x21, 0x40, 0xa2, 0x7b,
- 0xb0, 0xcc, 0x26, 0x37, 0xb0, 0x87, 0xb3, 0xf9, 0x71, 0x9e, 0xc4, 0x4b, 0x43, 0xfd, 0xa5, 0xec,
- 0x10, 0x33, 0x32, 0xaa, 0x41, 0x86, 0xba, 0x2c, 0x5f, 0xcc, 0x72, 0x73, 0xef, 0x5f, 0x6b, 0xae,
- 0x68, 0x74, 0x98, 0x0c, 0x16, 0xa2, 0xea, 0x37, 0x61, 0xe1, 0xc2, 0x20, 0x58, 0x9a, 0xd4, 0xad,
- 0xee, 0xf6, 0x9a, 0xca, 0x1c, 0x2a, 0x42, 0xae, 0xde, 0x69, 0xf7, 0x5b, 0xed, 0x5d, 0x96, 0xe7,
- 0x15, 0x21, 0x87, 0x3b, 0xdb, 0xdb, 0xb5, 0x6a, 0xfd, 0x99, 0x92, 0x54, 0x2b, 0x50, 0x88, 0x68,
- 0x43, 0x8b, 0x00, 0xbd, 0x7e, 0xa7, 0xab, 0x6d, 0xb4, 0x70, 0xaf, 0x2f, 0xb2, 0xc4, 0x5e, 0xbf,
- 0x8a, 0xfb, 0x92, 0x90, 0x50, 0xff, 0x33, 0x19, 0xcc, 0xa8, 0x4c, 0x0c, 0x6b, 0x17, 0x13, 0xc3,
- 0x2b, 0x8c, 0x97, 0xa9, 0xe1, 0xb4, 0x11, 0x26, 0x88, 0x1f, 0x00, 0xf0, 0x85, 0x43, 0x4c, 0x4d,
- 0xf7, 0xe5, 0xc4, 0x97, 0x5e, 0x73, 0x72, 0x3f, 0xb8, 0xd1, 0xc2, 0x79, 0x89, 0xae, 0xfa, 0xe8,
- 0x5b, 0x50, 0x34, 0xe8, 0x70, 0x64, 0x13, 0x29, 0x9c, 0xba, 0x56, 0xb8, 0x10, 0xe2, 0xab, 0x7e,
- 0x34, 0x35, 0x4d, 0x5f, 0x4c, 0x9e, 0x7f, 0x3b, 0x11, 0x78, 0x26, 0x26, 0x1b, 0x2d, 0x42, 0x6e,
- 0xb7, 0xdb, 0xa8, 0xf6, 0x5b, 0xed, 0x4d, 0x25, 0x81, 0x00, 0xb2, 0xdc, 0xd5, 0x0d, 0x25, 0xc9,
- 0xb2, 0xe8, 0x7a, 0x67, 0xa7, 0xbb, 0xdd, 0xe4, 0x11, 0x0b, 0xad, 0x80, 0x12, 0x38, 0x5b, 0xe3,
- 0x8e, 0x6c, 0x36, 0x94, 0x34, 0xba, 0x01, 0x4b, 0x21, 0x55, 0x4a, 0x66, 0xd0, 0x4d, 0x40, 0x21,
- 0x71, 0xaa, 0x22, 0xab, 0xfe, 0x06, 0x2c, 0xd5, 0xa9, 0xe3, 0xeb, 0x96, 0x13, 0x56, 0x18, 0x8f,
- 0xd9, 0xa0, 0x25, 0x49, 0xb3, 0xe4, 0x4d, 0x50, 0x6d, 0xe9, 0xfc, 0x6c, 0xbd, 0x10, 0x42, 0x5b,
- 0x0d, 0x9e, 0x2a, 0xc9, 0x86, 0xc9, 0xf6, 0xef, 0xc8, 0x32, 0xb9, 0x73, 0x33, 0xb5, 0xf9, 0xf3,
- 0xb3, 0xf5, 0x54, 0xb7, 0xd5, 0xc0, 0x8c, 0x86, 0xbe, 0x0c, 0x79, 0xf2, 0xd2, 0xf2, 0x35, 0x83,
- 0x9d, 0x4b, 0xcc, 0x81, 0x19, 0x9c, 0x63, 0x84, 0x3a, 0x3b, 0x86, 0x6a, 0x00, 0x5d, 0xea, 0xfa,
- 0xb2, 0xe7, 0xaf, 0x43, 0x66, 0x44, 0x5d, 0x7e, 0x77, 0x71, 0xe9, 0x8d, 0x1a, 0x83, 0x8b, 0x85,
- 0x8a, 0x05, 0x58, 0xfd, 0x83, 0x14, 0x40, 0x5f, 0xf7, 0x0e, 0xa5, 0x92, 0xa7, 0x90, 0x0f, 0x6f,
- 0x27, 0xe5, 0x25, 0xc8, 0x95, 0xb3, 0x1d, 0x82, 0xd1, 0x93, 0x60, 0xb1, 0x89, 0xda, 0x29, 0xb6,
- 0x88, 0x0d, 0x3a, 0x8a, 0x2b, 0x3f, 0x2e, 0x16, 0x48, 0xec, 0x98, 0x27, 0xae, 0x2b, 0x67, 0x9e,
- 0xfd, 0x44, 0x75, 0x7e, 0x2c, 0x08, 0xa7, 0xc9, 0xec, 0x3b, 0xf6, 0xda, 0x67, 0x66, 0x46, 0xb6,
- 0xe6, 0xf0, 0x54, 0x0e, 0x7d, 0x0c, 0x05, 0x36, 0x6e, 0xcd, 0xe3, 0x3c, 0x99, 0x78, 0x5f, 0xea,
- 0x2a, 0xa1, 0x01, 0xc3, 0x68, 0xea, 0xe5, 0x3b, 0x00, 0xfa, 0x68, 0x64, 0x5b, 0xc4, 0xd4, 0xf6,
- 0x26, 0x3c, 0xd3, 0xce, 0xe3, 0xbc, 0xa4, 0xd4, 0x26, 0x6c, 0xbb, 0x04, 0x6c, 0xdd, 0xe7, 0xd9,
- 0xf3, 0x35, 0x0e, 0x94, 0xe8, 0xaa, 0x5f, 0x53, 0x60, 0xd1, 0x1d, 0x3b, 0xcc, 0xa1, 0xd2, 0x3a,
- 0xf5, 0xcf, 0x93, 0xf0, 0x56, 0x9b, 0xf8, 0xc7, 0xd4, 0x3d, 0xac, 0xfa, 0xbe, 0x6e, 0x1c, 0x0c,
- 0x89, 0x23, 0xa7, 0x2f, 0x52, 0xd0, 0x24, 0x2e, 0x14, 0x34, 0xab, 0x30, 0xaf, 0xdb, 0x96, 0xee,
- 0x11, 0x91, 0xbc, 0xe5, 0x71, 0xd0, 0x64, 0x65, 0x17, 0x2b, 0xe2, 0x88, 0xe7, 0x11, 0x71, 0xaf,
- 0xc2, 0x0c, 0x0f, 0x08, 0xe8, 0xfb, 0x70, 0x53, 0xa6, 0x69, 0x7a, 0xd8, 0x15, 0x2b, 0x28, 0x82,
- 0x0b, 0xda, 0x66, 0x6c, 0x55, 0x19, 0x6f, 0x9c, 0xcc, 0xe3, 0xa6, 0xe4, 0xce, 0xc8, 0x97, 0x59,
- 0xe1, 0x8a, 0x19, 0xc3, 0x2a, 0x6d, 0xc2, 0xad, 0x4b, 0x45, 0x3e, 0xd7, 0xbd, 0xcd, 0x3f, 0x25,
- 0x01, 0x5a, 0xdd, 0xea, 0x8e, 0x74, 0x52, 0x03, 0xb2, 0xfb, 0xfa, 0xd0, 0xb2, 0x27, 0x57, 0x45,
- 0xc0, 0x29, 0xbe, 0x52, 0x15, 0xee, 0xd8, 0xe0, 0x32, 0x58, 0xca, 0xf2, 0x9a, 0x72, 0xbc, 0xe7,
- 0x10, 0x3f, 0xac, 0x29, 0x79, 0x8b, 0x99, 0xe1, 0xea, 0x4e, 0xb8, 0x74, 0x45, 0x83, 0x4d, 0x00,
- 0x4b, 0x79, 0x8e, 0xf5, 0x49, 0x10, 0xb6, 0x64, 0x13, 0x6d, 0xf1, 0xdb, 0x51, 0xe2, 0x1e, 0x11,
- 0x73, 0x35, 0xc3, 0x9d, 0x7a, 0x9d, 0x3d, 0x58, 0xc2, 0x85, 0xef, 0x42, 0xe9, 0xd2, 0x47, 0x3c,
- 0x65, 0x9a, 0xb2, 0x3e, 0x97, 0x8f, 0x1e, 0xc2, 0xc2, 0x85, 0x71, 0xbe, 0x56, 0xcc, 0xb7, 0xba,
- 0xcf, 0xbf, 0xae, 0xa4, 0xe5, 0xaf, 0x6f, 0x2a, 0x59, 0xf5, 0x4f, 0x53, 0x22, 0xd0, 0x48, 0xaf,
- 0xc6, 0xbf, 0x0a, 0xe4, 0xf8, 0xea, 0x36, 0xa8, 0x2d, 0x03, 0xc0, 0x7b, 0x57, 0xc7, 0x1f, 0x56,
- 0xd3, 0x71, 0x38, 0x0e, 0x05, 0xd1, 0x3a, 0x14, 0xc4, 0x2a, 0xd6, 0xd8, 0x86, 0xe3, 0x6e, 0x5d,
- 0xc0, 0x20, 0x48, 0x4c, 0x12, 0xdd, 0x85, 0x45, 0x7e, 0xf9, 0xe3, 0x1d, 0x10, 0x53, 0x60, 0xd2,
- 0x1c, 0xb3, 0x10, 0x52, 0x39, 0x6c, 0x07, 0x8a, 0x92, 0xa0, 0xf1, 0x7c, 0x3e, 0xc3, 0x0d, 0xba,
- 0x77, 0x9d, 0x41, 0x42, 0x84, 0xa7, 0xf9, 0x85, 0xd1, 0xb4, 0xa1, 0x36, 0x20, 0x17, 0x18, 0x8b,
- 0x56, 0x21, 0xd5, 0xaf, 0x77, 0x95, 0xb9, 0xd2, 0xd2, 0xc9, 0x69, 0xb9, 0x10, 0x90, 0xfb, 0xf5,
- 0x2e, 0xe3, 0xec, 0x36, 0xba, 0x4a, 0xe2, 0x22, 0x67, 0xb7, 0xd1, 0x2d, 0xa5, 0x59, 0x0e, 0xa6,
- 0xee, 0x43, 0x21, 0xd2, 0x03, 0x7a, 0x07, 0xe6, 0x5b, 0xed, 0x4d, 0xdc, 0xec, 0xf5, 0x94, 0xb9,
- 0xd2, 0xcd, 0x93, 0xd3, 0x32, 0x8a, 0x70, 0x5b, 0xce, 0x80, 0xcd, 0x0f, 0xba, 0x03, 0xe9, 0xad,
- 0x0e, 0x3b, 0xdb, 0x45, 0x01, 0x11, 0x41, 0x6c, 0x51, 0xcf, 0x2f, 0xdd, 0x90, 0xc9, 0x5d, 0x54,
- 0xb1, 0xfa, 0x87, 0x09, 0xc8, 0x8a, 0xcd, 0x14, 0x3b, 0x51, 0x55, 0x98, 0x0f, 0xae, 0x09, 0x44,
- 0x71, 0xf7, 0xde, 0xe5, 0x85, 0x58, 0x45, 0xd6, 0x4d, 0x62, 0xf9, 0x05, 0x72, 0xa5, 0x0f, 0xa1,
- 0x18, 0x65, 0x7c, 0xae, 0xc5, 0xf7, 0x7d, 0x28, 0xb0, 0xf5, 0x1d, 0x14, 0x64, 0x8f, 0x21, 0x2b,
- 0x02, 0x42, 0x78, 0xd6, 0x5c, 0x5e, 0x15, 0x4a, 0x24, 0x7a, 0x0a, 0xf3, 0xa2, 0x92, 0x0c, 0x6e,
- 0x87, 0xd7, 0xae, 0xde, 0x45, 0x38, 0x80, 0xab, 0x1f, 0x43, 0xba, 0x4b, 0x88, 0xcb, 0x7c, 0xef,
- 0x50, 0x93, 0x4c, 0x8f, 0x67, 0x59, 0x04, 0x9b, 0xa4, 0xd5, 0x60, 0x45, 0xb0, 0x49, 0x5a, 0x66,
- 0x78, 0xff, 0x95, 0x8c, 0xdc, 0x7f, 0xf5, 0xa1, 0xf8, 0x82, 0x58, 0x83, 0x03, 0x9f, 0x98, 0x5c,
- 0xd1, 0x7d, 0x48, 0x8f, 0x48, 0x68, 0xfc, 0x6a, 0xec, 0x02, 0x23, 0xc4, 0xc5, 0x1c, 0xc5, 0xe2,
- 0xc8, 0x31, 0x97, 0x96, 0x4f, 0x1a, 0xb2, 0xa5, 0xfe, 0x63, 0x12, 0x16, 0x5b, 0x9e, 0x37, 0xd6,
- 0x1d, 0x23, 0xc8, 0xdc, 0xbe, 0x7d, 0x31, 0x73, 0x8b, 0x7d, 0xfb, 0xb9, 0x28, 0x72, 0xf1, 0x5a,
- 0x4f, 0x9e, 0x9e, 0xc9, 0xf0, 0xf4, 0x54, 0x7f, 0x9a, 0x08, 0xee, 0xee, 0xee, 0x46, 0xb6, 0xbb,
- 0xa8, 0x03, 0xa3, 0x9a, 0xc8, 0xae, 0x73, 0xe8, 0xd0, 0x63, 0x07, 0xbd, 0x0d, 0x19, 0xdc, 0x6c,
- 0x37, 0x5f, 0x28, 0x09, 0xb1, 0x3c, 0x2f, 0x80, 0x30, 0x71, 0xc8, 0x31, 0xd3, 0xd4, 0x6d, 0xb6,
- 0x1b, 0x2c, 0xd3, 0x4a, 0xc6, 0x68, 0xea, 0x12, 0xc7, 0xb4, 0x9c, 0x01, 0x7a, 0x07, 0xb2, 0xad,
- 0x5e, 0x6f, 0x97, 0x97, 0x89, 0x6f, 0x9d, 0x9c, 0x96, 0x6f, 0x5c, 0x40, 0xf1, 0x7b, 0x5b, 0x93,
- 0x81, 0x58, 0x99, 0xc3, 0x72, 0xb0, 0x18, 0x10, 0xcb, 0x9f, 0x05, 0x08, 0x77, 0xfa, 0xd5, 0x7e,
- 0x53, 0xc9, 0xc4, 0x80, 0x30, 0x65, 0x7f, 0xe5, 0x76, 0xfb, 0xd7, 0x24, 0x28, 0x55, 0xc3, 0x20,
- 0x23, 0x9f, 0xf1, 0x65, 0x65, 0xd9, 0x87, 0xdc, 0x88, 0xfd, 0xb2, 0x48, 0x90, 0x25, 0x3d, 0x8d,
- 0x7d, 0xbd, 0x9c, 0x91, 0xab, 0x60, 0x6a, 0x93, 0xaa, 0x39, 0xb4, 0x3c, 0xcf, 0xa2, 0x8e, 0xa0,
- 0xe1, 0x50, 0x53, 0xe9, 0xbf, 0x12, 0x70, 0x23, 0x06, 0x81, 0x1e, 0x42, 0xda, 0xa5, 0x76, 0x30,
- 0x87, 0xb7, 0x2f, 0xbb, 0x96, 0x65, 0xa2, 0x98, 0x23, 0xd1, 0x1a, 0x80, 0x3e, 0xf6, 0xa9, 0xce,
- 0xfb, 0xe7, 0xb3, 0x97, 0xc3, 0x11, 0x0a, 0x7a, 0x01, 0x59, 0x8f, 0x18, 0x2e, 0x09, 0x72, 0xe9,
- 0x8f, 0xff, 0xbf, 0xd6, 0x57, 0x7a, 0x5c, 0x0d, 0x96, 0xea, 0x4a, 0x15, 0xc8, 0x0a, 0x0a, 0x5b,
- 0xf6, 0xa6, 0xee, 0xeb, 0xf2, 0xd2, 0x9e, 0xff, 0x66, 0xab, 0x49, 0xb7, 0x07, 0xc1, 0x6a, 0xd2,
- 0xed, 0x81, 0xfa, 0x77, 0x49, 0x80, 0xe6, 0x4b, 0x9f, 0xb8, 0x8e, 0x6e, 0xd7, 0xab, 0xa8, 0x19,
- 0x89, 0xfe, 0x62, 0xb4, 0x5f, 0x8d, 0x7d, 0x89, 0x08, 0x25, 0x2a, 0xf5, 0x6a, 0x4c, 0xfc, 0xbf,
- 0x05, 0xa9, 0xb1, 0x2b, 0x1f, 0xa4, 0x45, 0x1e, 0xbc, 0x8b, 0xb7, 0x31, 0xa3, 0xa1, 0xe6, 0x34,
- 0x6c, 0xa5, 0x2e, 0x7f, 0x76, 0x8e, 0x74, 0x10, 0x1b, 0xba, 0xd8, 0xce, 0x37, 0x74, 0xcd, 0x20,
- 0xf2, 0xe4, 0x28, 0x8a, 0x9d, 0x5f, 0xaf, 0xd6, 0x89, 0xeb, 0xe3, 0xac, 0xa1, 0xb3, 0xff, 0x5f,
- 0x28, 0xbe, 0xdd, 0x07, 0x98, 0x0e, 0x0d, 0xad, 0x41, 0xa6, 0xbe, 0xd1, 0xeb, 0x6d, 0x2b, 0x73,
- 0x22, 0x80, 0x4f, 0x59, 0x9c, 0xac, 0xfe, 0x75, 0x12, 0x72, 0xf5, 0xaa, 0x3c, 0x56, 0xeb, 0xa0,
- 0xf0, 0xa8, 0xc4, 0x9f, 0x3a, 0xc8, 0xcb, 0x91, 0xe5, 0x4e, 0x64, 0x60, 0xb9, 0xa2, 0xa8, 0x5d,
- 0x64, 0x22, 0xcc, 0xea, 0x26, 0x17, 0x40, 0x18, 0x8a, 0x44, 0x3a, 0x41, 0x33, 0xf4, 0x20, 0xc6,
- 0xaf, 0x5d, 0xed, 0x2c, 0x51, 0x9e, 0x4c, 0xdb, 0x1e, 0x2e, 0x04, 0x4a, 0xea, 0xba, 0x87, 0x3e,
- 0x80, 0x25, 0xcf, 0x1a, 0x38, 0x96, 0x33, 0xd0, 0x02, 0xe7, 0xf1, 0x77, 0x97, 0xda, 0xf2, 0xf9,
- 0xd9, 0xfa, 0x42, 0x4f, 0xb0, 0xa4, 0x0f, 0x17, 0x24, 0xb2, 0xce, 0x5d, 0x89, 0xbe, 0x09, 0x8b,
- 0x11, 0x51, 0xe6, 0x45, 0xe1, 0x76, 0xe5, 0xfc, 0x6c, 0xbd, 0x18, 0x4a, 0x3e, 0x23, 0x13, 0x5c,
- 0x0c, 0x05, 0x9f, 0x11, 0x7e, 0xff, 0xb2, 0x4f, 0x5d, 0x83, 0x68, 0x2e, 0xdf, 0xd3, 0xfc, 0x04,
- 0x4f, 0xe3, 0x02, 0xa7, 0x89, 0x6d, 0xae, 0x3e, 0x87, 0x1b, 0x1d, 0xd7, 0x38, 0x20, 0x9e, 0x2f,
- 0x5c, 0x21, 0xbd, 0xf8, 0x31, 0xdc, 0xf6, 0x75, 0xef, 0x50, 0x3b, 0xb0, 0x3c, 0x9f, 0xba, 0x13,
- 0xcd, 0x25, 0x3e, 0x71, 0x18, 0x5f, 0xe3, 0x8f, 0xb5, 0xf2, 0xd2, 0xef, 0x16, 0xc3, 0x6c, 0x09,
- 0x08, 0x0e, 0x10, 0xdb, 0x0c, 0xa0, 0xb6, 0xa0, 0xc8, 0xca, 0x14, 0x79, 0x71, 0xc6, 0x46, 0x0f,
- 0x36, 0x1d, 0x68, 0x6f, 0x7c, 0x4c, 0xe5, 0x6d, 0x3a, 0x10, 0x3f, 0xd5, 0xef, 0x82, 0xd2, 0xb0,
- 0xbc, 0x91, 0xee, 0x1b, 0x07, 0xc1, 0x6d, 0x26, 0x6a, 0x80, 0x72, 0x40, 0x74, 0xd7, 0xdf, 0x23,
- 0xba, 0xaf, 0x8d, 0x88, 0x6b, 0x51, 0xf3, 0xfa, 0x59, 0x5e, 0x0a, 0x45, 0xba, 0x5c, 0x42, 0xfd,
- 0xef, 0x04, 0x00, 0xd6, 0xf7, 0x83, 0x8c, 0xec, 0x6b, 0xb0, 0xec, 0x39, 0xfa, 0xc8, 0x3b, 0xa0,
- 0xbe, 0x66, 0x39, 0x3e, 0x71, 0x8f, 0x74, 0x5b, 0x5e, 0xe0, 0x28, 0x01, 0xa3, 0x25, 0xe9, 0xe8,
- 0x3e, 0xa0, 0x43, 0x42, 0x46, 0x1a, 0xb5, 0x4d, 0x2d, 0x60, 0x8a, 0xa7, 0xe4, 0x34, 0x56, 0x18,
- 0xa7, 0x63, 0x9b, 0xbd, 0x80, 0x8e, 0x6a, 0xb0, 0xc6, 0x86, 0x4f, 0x1c, 0xdf, 0xb5, 0x88, 0xa7,
- 0xed, 0x53, 0x57, 0xf3, 0x6c, 0x7a, 0xac, 0xed, 0x53, 0xdb, 0xa6, 0xc7, 0xc4, 0x0d, 0xee, 0xc6,
- 0x4a, 0x36, 0x1d, 0x34, 0x05, 0x68, 0x83, 0xba, 0x3d, 0x9b, 0x1e, 0x6f, 0x04, 0x08, 0x96, 0xb6,
- 0x4d, 0xc7, 0xec, 0x5b, 0xc6, 0x61, 0x90, 0xb6, 0x85, 0xd4, 0xbe, 0x65, 0x1c, 0xa2, 0x77, 0x60,
- 0x81, 0xd8, 0x84, 0x5f, 0x91, 0x08, 0x54, 0x86, 0xa3, 0x8a, 0x01, 0x91, 0x81, 0xd4, 0x4f, 0x40,
- 0x69, 0x3a, 0x86, 0x3b, 0x19, 0x45, 0xe6, 0xfc, 0x3e, 0x20, 0x16, 0x24, 0x35, 0x9b, 0x1a, 0x87,
- 0xda, 0x50, 0x77, 0xf4, 0x01, 0xb3, 0x4b, 0xbc, 0xf0, 0x29, 0x8c, 0xb3, 0x4d, 0x8d, 0xc3, 0x1d,
- 0x49, 0x57, 0x3f, 0x00, 0xe8, 0x8d, 0x5c, 0xa2, 0x9b, 0x1d, 0x96, 0x4d, 0x30, 0xd7, 0xf1, 0x96,
- 0x66, 0xca, 0x17, 0x52, 0xea, 0xca, 0xad, 0xae, 0x08, 0x46, 0x23, 0xa4, 0xab, 0xbf, 0x0c, 0x37,
- 0xba, 0xb6, 0x6e, 0xf0, 0xaf, 0x05, 0xba, 0xe1, 0x93, 0x15, 0x7a, 0x0a, 0x59, 0x01, 0x95, 0x33,
- 0x19, 0xbb, 0xdd, 0xa6, 0x7d, 0x6e, 0xcd, 0x61, 0x89, 0xaf, 0x15, 0x01, 0xa6, 0x7a, 0xd4, 0xbf,
- 0x4c, 0x40, 0x3e, 0xd4, 0x8f, 0xca, 0xe2, 0x25, 0xc6, 0x77, 0x75, 0xcb, 0x91, 0x55, 0x7d, 0x1e,
- 0x47, 0x49, 0xa8, 0x05, 0x85, 0x51, 0x28, 0x7d, 0x65, 0x3e, 0x17, 0x63, 0x35, 0x8e, 0xca, 0xa2,
- 0x0f, 0x21, 0x1f, 0x3c, 0x49, 0x07, 0x11, 0xf6, 0xea, 0x17, 0xec, 0x29, 0x5c, 0xfd, 0x36, 0xc0,
- 0x77, 0xa8, 0xe5, 0xf4, 0xe9, 0x21, 0x71, 0xf8, 0x13, 0x2b, 0xab, 0x09, 0x49, 0xe0, 0x45, 0xd9,
- 0xe2, 0xa5, 0xbe, 0x98, 0x82, 0xf0, 0xa5, 0x51, 0x34, 0xd5, 0xbf, 0x4d, 0x42, 0x16, 0x53, 0xea,
- 0xd7, 0xab, 0xa8, 0x0c, 0x59, 0x19, 0x27, 0xf8, 0xf9, 0x53, 0xcb, 0x9f, 0x9f, 0xad, 0x67, 0x44,
- 0x80, 0xc8, 0x18, 0x3c, 0x32, 0x44, 0x22, 0x78, 0xf2, 0xb2, 0x08, 0x8e, 0x1e, 0x42, 0x51, 0x82,
- 0xb4, 0x03, 0xdd, 0x3b, 0x10, 0x05, 0x5a, 0x6d, 0xf1, 0xfc, 0x6c, 0x1d, 0x04, 0x72, 0x4b, 0xf7,
- 0x0e, 0x30, 0x08, 0x34, 0xfb, 0x8d, 0x9a, 0x50, 0xf8, 0x94, 0x5a, 0x8e, 0xe6, 0xf3, 0x41, 0xc8,
- 0xcb, 0xc4, 0xd8, 0x79, 0x9c, 0x0e, 0x55, 0x7e, 0x6f, 0x00, 0x9f, 0x4e, 0x07, 0xdf, 0x84, 0x05,
- 0x97, 0x52, 0x5f, 0x84, 0x2d, 0x8b, 0x3a, 0xf2, 0x9e, 0xa2, 0x1c, 0x7b, 0x7d, 0x4d, 0xa9, 0x8f,
- 0x25, 0x0e, 0x17, 0xdd, 0x48, 0x0b, 0x3d, 0x84, 0x15, 0x5b, 0xf7, 0x7c, 0x8d, 0xc7, 0x3b, 0x73,
- 0xaa, 0x2d, 0xcb, 0xb7, 0x1a, 0x62, 0xbc, 0x0d, 0xce, 0x0a, 0x24, 0xd4, 0x7f, 0x4e, 0x40, 0x81,
- 0x0d, 0xc6, 0xda, 0xb7, 0x0c, 0x96, 0xe4, 0x7d, 0xfe, 0xdc, 0xe3, 0x16, 0xa4, 0x0c, 0xcf, 0x95,
- 0x4e, 0xe5, 0x87, 0x6f, 0xbd, 0x87, 0x31, 0xa3, 0xa1, 0x4f, 0x20, 0x2b, 0xef, 0x4b, 0x44, 0xda,
- 0xa1, 0x5e, 0x9f, 0x8e, 0x4a, 0xdf, 0x48, 0x39, 0xbe, 0x96, 0xa7, 0xd6, 0x89, 0x43, 0x00, 0x47,
- 0x49, 0xe8, 0x26, 0x24, 0x0d, 0xe1, 0x2e, 0xf9, 0x41, 0x4b, 0xbd, 0x8d, 0x93, 0x86, 0xa3, 0xfe,
- 0x28, 0x01, 0x0b, 0xd3, 0x0d, 0xcf, 0x56, 0xc0, 0x6d, 0xc8, 0x7b, 0xe3, 0x3d, 0x6f, 0xe2, 0xf9,
- 0x64, 0x18, 0x3c, 0x1f, 0x87, 0x04, 0xd4, 0x82, 0xbc, 0x6e, 0x0f, 0xa8, 0x6b, 0xf9, 0x07, 0x43,
- 0x59, 0x89, 0xc6, 0xa7, 0x0a, 0x51, 0x9d, 0x95, 0x6a, 0x20, 0x82, 0xa7, 0xd2, 0xc1, 0xb9, 0x2f,
- 0xbe, 0x31, 0xe0, 0xe7, 0xfe, 0xdb, 0x50, 0xb4, 0xf5, 0x21, 0xbf, 0x40, 0xf2, 0xad, 0xa1, 0x18,
- 0x47, 0x1a, 0x17, 0x24, 0xad, 0x6f, 0x0d, 0x89, 0xaa, 0x42, 0x3e, 0x54, 0x86, 0x96, 0xa0, 0x50,
- 0x6d, 0xf6, 0xb4, 0x47, 0x8f, 0x9f, 0x6a, 0x9b, 0xf5, 0x1d, 0x65, 0x4e, 0xe6, 0xa6, 0x7f, 0x95,
- 0x80, 0x05, 0x19, 0x8e, 0x64, 0xbe, 0xff, 0x0e, 0xcc, 0xbb, 0xfa, 0xbe, 0x1f, 0x54, 0x24, 0x69,
- 0xb1, 0xaa, 0x59, 0x84, 0x67, 0x15, 0x09, 0x63, 0xc5, 0x57, 0x24, 0x91, 0x0f, 0x1a, 0x52, 0x57,
- 0x7e, 0xd0, 0x90, 0xfe, 0xb9, 0x7c, 0xd0, 0xa0, 0xfe, 0x26, 0xc0, 0x86, 0x65, 0x93, 0xbe, 0xb8,
- 0x6b, 0x8a, 0xab, 0x2f, 0x59, 0x0e, 0x27, 0xef, 0x32, 0x83, 0x1c, 0xae, 0xd5, 0xc0, 0x8c, 0xc6,
- 0x58, 0x03, 0xcb, 0x94, 0x9b, 0x91, 0xb3, 0x36, 0x19, 0x6b, 0x60, 0x99, 0xe1, 0xcb, 0x5b, 0xfa,
- 0xba, 0x97, 0xb7, 0xd3, 0x04, 0x2c, 0xc9, 0xdc, 0x35, 0x0c, 0xbf, 0x5f, 0x85, 0xbc, 0x48, 0x63,
- 0xa7, 0x05, 0x1d, 0x7f, 0xc4, 0x17, 0xb8, 0x56, 0x03, 0xe7, 0x04, 0xbb, 0x65, 0xa2, 0x75, 0x28,
- 0x48, 0x68, 0xe4, 0xe3, 0x27, 0x10, 0xa4, 0x36, 0x33, 0xff, 0xeb, 0x90, 0xde, 0xb7, 0x6c, 0x22,
- 0x17, 0x7a, 0x6c, 0x00, 0x98, 0x3a, 0x60, 0x6b, 0x0e, 0x73, 0x74, 0x2d, 0x17, 0x5c, 0xc6, 0x71,
- 0xfb, 0x64, 0xd9, 0x19, 0xb5, 0x4f, 0x54, 0xa0, 0x33, 0xf6, 0x09, 0x1c, 0xb3, 0x4f, 0xb0, 0x85,
- 0x7d, 0x12, 0x1a, 0xb5, 0x4f, 0x90, 0x7e, 0x2e, 0xf6, 0x6d, 0xc3, 0xcd, 0x9a, 0xad, 0x1b, 0x87,
- 0xb6, 0xe5, 0xf9, 0xc4, 0x8c, 0x46, 0x8c, 0xc7, 0x90, 0xbd, 0x90, 0x74, 0x5e, 0x75, 0x6b, 0x29,
- 0x91, 0xea, 0x7f, 0x24, 0xa0, 0xb8, 0x45, 0x74, 0xdb, 0x3f, 0x98, 0x5e, 0x0d, 0xf9, 0xc4, 0xf3,
- 0xe5, 0x61, 0xc5, 0x7f, 0xa3, 0x6f, 0x40, 0x2e, 0xcc, 0x49, 0xae, 0x7d, 0x7f, 0x0b, 0xa1, 0xe8,
- 0x09, 0xcc, 0xb3, 0x3d, 0x46, 0xc7, 0x41, 0xb1, 0x73, 0xd5, 0xd3, 0x8e, 0x44, 0xb2, 0x43, 0xc6,
- 0x25, 0x3c, 0x09, 0xe1, 0x4b, 0x29, 0x83, 0x83, 0x26, 0xfa, 0x45, 0x28, 0xf2, 0x97, 0x89, 0x20,
- 0xe7, 0xca, 0x5c, 0xa7, 0xb3, 0x20, 0x1e, 0x17, 0x45, 0xbe, 0xf5, 0xbf, 0x09, 0x58, 0xd9, 0xd1,
- 0x27, 0x7b, 0x44, 0x86, 0x0d, 0x62, 0x62, 0x62, 0x50, 0xd7, 0x44, 0xdd, 0x68, 0xb8, 0xb9, 0xe2,
- 0xad, 0x32, 0x4e, 0x38, 0x3e, 0xea, 0x04, 0x05, 0x58, 0x32, 0x52, 0x80, 0xad, 0x40, 0xc6, 0xa1,
- 0x8e, 0x41, 0x64, 0x2c, 0x12, 0x0d, 0xd5, 0x8a, 0x86, 0x9a, 0x52, 0xf8, 0x8c, 0xc8, 0x1f, 0x01,
- 0xdb, 0xd4, 0x0f, 0x7b, 0x43, 0x9f, 0x40, 0xa9, 0xd7, 0xac, 0xe3, 0x66, 0xbf, 0xd6, 0xf9, 0xae,
- 0xd6, 0xab, 0x6e, 0xf7, 0xaa, 0x8f, 0x1f, 0x6a, 0xdd, 0xce, 0xf6, 0xf7, 0x1e, 0x3d, 0x79, 0xf8,
- 0x0d, 0x25, 0x51, 0x2a, 0x9f, 0x9c, 0x96, 0x6f, 0xb7, 0xab, 0xf5, 0x6d, 0xb1, 0x63, 0xf6, 0xe8,
- 0xcb, 0x9e, 0x6e, 0x7b, 0xfa, 0xe3, 0x87, 0x5d, 0x6a, 0x4f, 0x18, 0x86, 0x2d, 0xeb, 0x62, 0xf4,
- 0xbc, 0x8a, 0x1e, 0xc3, 0x89, 0x4b, 0x8f, 0xe1, 0xe9, 0x69, 0x9e, 0xbc, 0xe4, 0x34, 0xdf, 0x80,
- 0x15, 0xc3, 0xa5, 0x9e, 0xa7, 0xb1, 0xec, 0x9f, 0x98, 0x33, 0xf5, 0xc5, 0x97, 0xce, 0xcf, 0xd6,
- 0x97, 0xeb, 0x8c, 0xdf, 0xe3, 0x6c, 0xa9, 0x7e, 0xd9, 0x88, 0x90, 0x78, 0x4f, 0xea, 0x1f, 0xa5,
- 0x58, 0x22, 0x65, 0x1d, 0x59, 0x36, 0x19, 0x10, 0x0f, 0x3d, 0x87, 0x25, 0xc3, 0x25, 0x26, 0x4b,
- 0xeb, 0x75, 0x3b, 0xfa, 0x11, 0xed, 0x2f, 0xc4, 0xe6, 0x34, 0xa1, 0x60, 0xa5, 0x1e, 0x4a, 0xf5,
- 0x46, 0xc4, 0xc0, 0x8b, 0xc6, 0x85, 0x36, 0xfa, 0x14, 0x96, 0x3c, 0x62, 0x5b, 0xce, 0xf8, 0xa5,
- 0x66, 0x50, 0xc7, 0x27, 0x2f, 0x83, 0x17, 0xb1, 0xeb, 0xf4, 0xf6, 0x9a, 0xdb, 0x4c, 0xaa, 0x2e,
- 0x84, 0x6a, 0xe8, 0xfc, 0x6c, 0x7d, 0xf1, 0x22, 0x0d, 0x2f, 0x4a, 0xcd, 0xb2, 0x5d, 0x6a, 0xc3,
- 0xe2, 0x45, 0x6b, 0xd0, 0x8a, 0xdc, 0xfb, 0x3c, 0x84, 0x04, 0x7b, 0x1b, 0xdd, 0x86, 0x9c, 0x4b,
- 0x06, 0x96, 0xe7, 0xbb, 0xc2, 0xcd, 0x8c, 0x13, 0x52, 0xd8, 0xce, 0x17, 0x5f, 0x40, 0x95, 0x7e,
- 0x1d, 0x66, 0x7a, 0x64, 0x9b, 0xc5, 0xb4, 0x3c, 0x7d, 0x4f, 0xaa, 0xcc, 0xe1, 0xa0, 0xc9, 0xd6,
- 0xe0, 0xd8, 0x0b, 0x13, 0x35, 0xfe, 0x9b, 0xd1, 0x78, 0x46, 0x21, 0xbf, 0x07, 0xe3, 0x39, 0x43,
- 0xf0, 0x61, 0x69, 0x3a, 0xf2, 0x61, 0xe9, 0x0a, 0x64, 0x6c, 0x72, 0x44, 0x6c, 0x71, 0x96, 0x63,
- 0xd1, 0xb8, 0xf7, 0x10, 0x8a, 0xc1, 0x17, 0x8c, 0xfc, 0xcb, 0x89, 0x1c, 0xa4, 0xfb, 0xd5, 0xde,
- 0x33, 0x65, 0x0e, 0x01, 0x64, 0xc5, 0xe2, 0x14, 0xaf, 0x75, 0xf5, 0x4e, 0x7b, 0xa3, 0xb5, 0xa9,
- 0x24, 0xef, 0xfd, 0x2c, 0x05, 0xf9, 0xf0, 0xbd, 0x88, 0x9d, 0x1d, 0xed, 0xe6, 0x8b, 0x60, 0x75,
- 0x87, 0xf4, 0x36, 0x39, 0x46, 0x6f, 0x4f, 0x6f, 0xa1, 0x3e, 0x11, 0x0f, 0xe4, 0x21, 0x3b, 0xb8,
- 0x81, 0x7a, 0x17, 0x72, 0xd5, 0x5e, 0xaf, 0xb5, 0xd9, 0x6e, 0x36, 0x94, 0xcf, 0x12, 0xa5, 0x2f,
- 0x9d, 0x9c, 0x96, 0x97, 0x43, 0x50, 0xd5, 0x13, 0x8b, 0x8f, 0xa3, 0xea, 0xf5, 0x66, 0xb7, 0xdf,
- 0x6c, 0x28, 0xaf, 0x92, 0xb3, 0x28, 0x7e, 0xab, 0xc2, 0x3f, 0xdd, 0xc9, 0x77, 0x71, 0xb3, 0x5b,
- 0xc5, 0xac, 0xc3, 0xcf, 0x92, 0xe2, 0x72, 0x6c, 0xda, 0xa3, 0x4b, 0x46, 0xba, 0xcb, 0xfa, 0x5c,
- 0x0b, 0xbe, 0x85, 0x7b, 0x95, 0x12, 0x9f, 0x77, 0x4c, 0x1f, 0xbf, 0x88, 0x6e, 0x4e, 0x58, 0x6f,
- 0xfc, 0xd5, 0x91, 0xab, 0x49, 0xcd, 0xf4, 0xd6, 0x63, 0xb1, 0x87, 0x69, 0x51, 0x61, 0x1e, 0xef,
- 0xb6, 0xdb, 0x0c, 0xf4, 0x2a, 0x3d, 0x33, 0x3a, 0x3c, 0x76, 0x58, 0xc5, 0x8c, 0xee, 0x42, 0x2e,
- 0x78, 0x94, 0x54, 0x3e, 0x4b, 0xcf, 0x18, 0x54, 0x0f, 0x5e, 0x54, 0x79, 0x87, 0x5b, 0xbb, 0x7d,
- 0xfe, 0xa9, 0xde, 0xab, 0xcc, 0x6c, 0x87, 0x07, 0x63, 0xdf, 0xa4, 0xc7, 0x0e, 0xdb, 0xb3, 0xf2,
- 0x1e, 0xee, 0xb3, 0x8c, 0xb8, 0xb4, 0x08, 0x31, 0xf2, 0x12, 0xee, 0x5d, 0xc8, 0xe1, 0xe6, 0x77,
- 0xc4, 0x57, 0x7d, 0xaf, 0xb2, 0x33, 0x7a, 0x30, 0xf9, 0x94, 0x18, 0xb2, 0xb7, 0x0e, 0xee, 0x6e,
- 0x55, 0xb9, 0xcb, 0x67, 0x51, 0x1d, 0x77, 0x74, 0xa0, 0x3b, 0xc4, 0x9c, 0x7e, 0xe3, 0x12, 0xb2,
- 0xee, 0xfd, 0x0a, 0xe4, 0x82, 0xcc, 0x14, 0xad, 0x41, 0xf6, 0x45, 0x07, 0x3f, 0x6b, 0x62, 0x65,
- 0x4e, 0xf8, 0x30, 0xe0, 0xbc, 0x10, 0x35, 0x45, 0x19, 0xe6, 0x77, 0xaa, 0xed, 0xea, 0x66, 0x13,
- 0x07, 0x57, 0xe4, 0x01, 0x40, 0xa6, 0x57, 0x25, 0x45, 0x76, 0x10, 0xea, 0xac, 0xad, 0xfe, 0xf0,
- 0x27, 0x6b, 0x73, 0x3f, 0xfe, 0xc9, 0xda, 0xdc, 0xab, 0xf3, 0xb5, 0xc4, 0x0f, 0xcf, 0xd7, 0x12,
- 0xff, 0x70, 0xbe, 0x96, 0xf8, 0xf7, 0xf3, 0xb5, 0xc4, 0x5e, 0x96, 0x1f, 0x02, 0x4f, 0xfe, 0x2f,
- 0x00, 0x00, 0xff, 0xff, 0x4b, 0xdb, 0xdc, 0xec, 0xf0, 0x31, 0x00, 0x00,
+ // 5043 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x4d, 0x6c, 0x24, 0x49,
+ 0x56, 0xbf, 0xeb, 0xd3, 0x55, 0xaf, 0xca, 0x76, 0x3a, 0xda, 0xdb, 0xe3, 0xae, 0xed, 0xb6, 0x6b,
+ 0x72, 0xa6, 0x77, 0x66, 0x7b, 0xfb, 0x5f, 0xfd, 0xb5, 0xbb, 0xea, 0x99, 0xf9, 0xef, 0xce, 0xd4,
+ 0x97, 0xed, 0xda, 0xb6, 0xab, 0x4a, 0x51, 0xe5, 0xee, 0x5d, 0x24, 0x48, 0xa5, 0x33, 0xc3, 0xe5,
+ 0x1c, 0x67, 0x65, 0x14, 0x99, 0x59, 0x76, 0x17, 0x0b, 0xa2, 0xc5, 0x01, 0x90, 0x4f, 0x70, 0x62,
+ 0x11, 0x32, 0x42, 0x82, 0x13, 0x42, 0xe2, 0x00, 0x12, 0x82, 0xd3, 0x20, 0x71, 0xd8, 0x1b, 0x2c,
+ 0x48, 0x68, 0x05, 0x92, 0x61, 0x7d, 0xe0, 0xb6, 0x82, 0x0b, 0xe2, 0x02, 0x12, 0x8a, 0x8f, 0xcc,
+ 0x4a, 0x57, 0xa7, 0xed, 0x1e, 0x66, 0x2f, 0x76, 0xc5, 0x7b, 0xbf, 0xf7, 0xe2, 0xc5, 0x8b, 0x88,
+ 0x17, 0xef, 0x45, 0x24, 0xdc, 0x1b, 0x58, 0xfe, 0xc1, 0x78, 0xaf, 0x62, 0xd0, 0xe1, 0x03, 0x93,
+ 0x1a, 0x87, 0xc4, 0x7d, 0xe0, 0x1d, 0xeb, 0xee, 0xf0, 0xd0, 0xf2, 0x1f, 0xe8, 0x23, 0xeb, 0x81,
+ 0x3f, 0x19, 0x11, 0xaf, 0x32, 0x72, 0xa9, 0x4f, 0x11, 0x12, 0x80, 0x4a, 0x00, 0xa8, 0x1c, 0x3d,
+ 0x2a, 0xad, 0x0f, 0x28, 0x1d, 0xd8, 0xe4, 0x01, 0x47, 0xec, 0x8d, 0xf7, 0x1f, 0xf8, 0xd6, 0x90,
+ 0x78, 0xbe, 0x3e, 0x1c, 0x09, 0xa1, 0xd2, 0xda, 0x2c, 0xc0, 0x1c, 0xbb, 0xba, 0x6f, 0x51, 0x47,
+ 0xf2, 0x57, 0x06, 0x74, 0x40, 0xf9, 0xcf, 0x07, 0xec, 0x97, 0xa0, 0xaa, 0xeb, 0x30, 0xff, 0x9c,
+ 0xb8, 0x9e, 0x45, 0x1d, 0xb4, 0x02, 0x19, 0xcb, 0x31, 0xc9, 0xcb, 0xd5, 0x44, 0x39, 0xf1, 0x7e,
+ 0x1a, 0x8b, 0x86, 0xfa, 0x10, 0xa0, 0xc5, 0x7e, 0x34, 0x1d, 0xdf, 0x9d, 0x20, 0x05, 0x52, 0x87,
+ 0x64, 0xc2, 0x11, 0x79, 0xcc, 0x7e, 0x32, 0xca, 0x91, 0x6e, 0xaf, 0x26, 0x05, 0xe5, 0x48, 0xb7,
+ 0xd5, 0x9f, 0x24, 0xa0, 0x50, 0x75, 0x1c, 0xea, 0xf3, 0xde, 0x3d, 0x84, 0x20, 0xed, 0xe8, 0x43,
+ 0x22, 0x85, 0xf8, 0x6f, 0x54, 0x87, 0xac, 0xad, 0xef, 0x11, 0xdb, 0x5b, 0x4d, 0x96, 0x53, 0xef,
+ 0x17, 0x1e, 0x7f, 0xad, 0xf2, 0xfa, 0x90, 0x2b, 0x11, 0x25, 0x95, 0x6d, 0x8e, 0xe6, 0x46, 0x60,
+ 0x29, 0x8a, 0xbe, 0x0d, 0xf3, 0x96, 0x63, 0x5a, 0x06, 0xf1, 0x56, 0xd3, 0x5c, 0xcb, 0x5a, 0x9c,
+ 0x96, 0xa9, 0xf5, 0xb5, 0xf4, 0x0f, 0xcf, 0xd6, 0xe7, 0x70, 0x20, 0x54, 0xfa, 0x00, 0x0a, 0x11,
+ 0xb5, 0x31, 0x63, 0x5b, 0x81, 0xcc, 0x91, 0x6e, 0x8f, 0x89, 0x1c, 0x9d, 0x68, 0x7c, 0x98, 0x7c,
+ 0x9a, 0x50, 0x3f, 0x81, 0x95, 0xb6, 0x3e, 0x24, 0xe6, 0x26, 0x71, 0x88, 0x6b, 0x19, 0x98, 0x78,
+ 0x74, 0xec, 0x1a, 0x84, 0x8d, 0xf5, 0xd0, 0x72, 0xcc, 0x60, 0xac, 0xec, 0x77, 0xbc, 0x16, 0xb5,
+ 0x0e, 0x6f, 0x35, 0x2c, 0xcf, 0x70, 0x89, 0x4f, 0x3e, 0xb7, 0x92, 0x54, 0xa0, 0xe4, 0x2c, 0x01,
+ 0x4b, 0xb3, 0xd2, 0x3f, 0x07, 0x37, 0x98, 0x8b, 0x4d, 0xcd, 0x95, 0x14, 0xcd, 0x1b, 0x11, 0x83,
+ 0x2b, 0x2b, 0x3c, 0x7e, 0x3f, 0xce, 0x43, 0x71, 0x23, 0xd9, 0x9a, 0xc3, 0xcb, 0x5c, 0x4d, 0x40,
+ 0xe8, 0x8d, 0x88, 0x81, 0x0c, 0xb8, 0x69, 0x4a, 0xa3, 0x67, 0xd4, 0x27, 0xb9, 0xfa, 0xd8, 0x69,
+ 0xbc, 0x64, 0x98, 0x5b, 0x73, 0x78, 0x25, 0x50, 0x16, 0xed, 0xa4, 0x06, 0x90, 0x0b, 0x74, 0xab,
+ 0x3f, 0x48, 0x40, 0x3e, 0x60, 0x7a, 0xe8, 0xab, 0x90, 0x77, 0x74, 0x87, 0x6a, 0xc6, 0x68, 0xec,
+ 0xf1, 0x01, 0xa5, 0x6a, 0xc5, 0xf3, 0xb3, 0xf5, 0x5c, 0x5b, 0x77, 0x68, 0xbd, 0xbb, 0xeb, 0xe1,
+ 0x1c, 0x63, 0xd7, 0x47, 0x63, 0x0f, 0xbd, 0x0d, 0xc5, 0x21, 0x19, 0x52, 0x77, 0xa2, 0xed, 0x4d,
+ 0x7c, 0xe2, 0x49, 0xb7, 0x15, 0x04, 0xad, 0xc6, 0x48, 0xe8, 0x5b, 0x30, 0x3f, 0x10, 0x26, 0xad,
+ 0xa6, 0xf8, 0xf2, 0x79, 0x27, 0xce, 0xfa, 0x19, 0xab, 0x71, 0x20, 0xa3, 0xfe, 0x56, 0x02, 0x56,
+ 0x42, 0x2a, 0xf9, 0xc5, 0xb1, 0xe5, 0x92, 0x21, 0x71, 0x7c, 0x0f, 0x7d, 0x03, 0xb2, 0xb6, 0x35,
+ 0xb4, 0x7c, 0x4f, 0xfa, 0xfc, 0x4e, 0x9c, 0xda, 0x70, 0x50, 0x58, 0x82, 0x51, 0x15, 0x8a, 0x2e,
+ 0xf1, 0x88, 0x7b, 0x24, 0x56, 0xbc, 0xf4, 0xe8, 0x35, 0xc2, 0x17, 0x44, 0xd4, 0x0d, 0xc8, 0x75,
+ 0x6d, 0xdd, 0xdf, 0xa7, 0xee, 0x10, 0xa9, 0x50, 0xd4, 0x5d, 0xe3, 0xc0, 0xf2, 0x89, 0xe1, 0x8f,
+ 0xdd, 0x60, 0xf7, 0x5d, 0xa0, 0xa1, 0x9b, 0x90, 0xa4, 0xa2, 0xa3, 0x7c, 0x2d, 0x7b, 0x7e, 0xb6,
+ 0x9e, 0xec, 0xf4, 0x70, 0x92, 0x7a, 0xea, 0x47, 0xb0, 0xdc, 0xb5, 0xc7, 0x03, 0xcb, 0x69, 0x10,
+ 0xcf, 0x70, 0xad, 0x11, 0xd3, 0xce, 0x56, 0x25, 0x8b, 0x51, 0xc1, 0xaa, 0x64, 0xbf, 0xc3, 0xad,
+ 0x9d, 0x9c, 0x6e, 0x6d, 0xf5, 0x37, 0x92, 0xb0, 0xdc, 0x74, 0x06, 0x96, 0x43, 0xa2, 0xd2, 0x77,
+ 0x61, 0x91, 0x70, 0xa2, 0x76, 0x24, 0xc2, 0x8d, 0xd4, 0xb3, 0x20, 0xa8, 0x41, 0x0c, 0x6a, 0xcd,
+ 0xc4, 0x85, 0x47, 0x71, 0xc3, 0x7f, 0x4d, 0x7b, 0x6c, 0x74, 0x68, 0xc2, 0xfc, 0x88, 0x0f, 0xc2,
+ 0x93, 0xd3, 0x7b, 0x37, 0x4e, 0xd7, 0x6b, 0xe3, 0x0c, 0x82, 0x84, 0x94, 0xfd, 0x22, 0x41, 0xe2,
+ 0x4f, 0x92, 0xb0, 0xd4, 0xa6, 0xe6, 0x05, 0x3f, 0x94, 0x20, 0x77, 0x40, 0x3d, 0x3f, 0x12, 0x10,
+ 0xc3, 0x36, 0x7a, 0x0a, 0xb9, 0x91, 0x9c, 0x3e, 0x39, 0xfb, 0xb7, 0xe3, 0x4d, 0x16, 0x18, 0x1c,
+ 0xa2, 0xd1, 0x47, 0x90, 0x0f, 0xb6, 0x0c, 0x1b, 0xed, 0x1b, 0x2c, 0x9c, 0x29, 0x1e, 0x7d, 0x0b,
+ 0xb2, 0x62, 0x12, 0x56, 0xd3, 0x5c, 0xf2, 0xee, 0x1b, 0xf9, 0x1c, 0x4b, 0x21, 0xb4, 0x09, 0x39,
+ 0xdf, 0xf6, 0x34, 0xcb, 0xd9, 0xa7, 0xab, 0x19, 0xae, 0x60, 0x3d, 0x36, 0xc8, 0x50, 0x93, 0xf4,
+ 0xb7, 0x7b, 0x2d, 0x67, 0x9f, 0xd6, 0x0a, 0xe7, 0x67, 0xeb, 0xf3, 0xb2, 0x81, 0xe7, 0x7d, 0xdb,
+ 0x63, 0x3f, 0xd4, 0xdf, 0x4e, 0x40, 0x21, 0x82, 0x42, 0x77, 0x00, 0x7c, 0x77, 0xec, 0xf9, 0x9a,
+ 0x4b, 0xa9, 0xcf, 0x9d, 0x55, 0xc4, 0x79, 0x4e, 0xc1, 0x94, 0xfa, 0xa8, 0x02, 0x37, 0x0c, 0xe2,
+ 0xfa, 0x9a, 0xe5, 0x79, 0x63, 0xe2, 0x6a, 0xde, 0x78, 0xef, 0x53, 0x62, 0xf8, 0xdc, 0x71, 0x45,
+ 0xbc, 0xcc, 0x58, 0x2d, 0xce, 0xe9, 0x09, 0x06, 0x7a, 0x02, 0x37, 0xa3, 0xf8, 0xd1, 0x78, 0xcf,
+ 0xb6, 0x0c, 0x8d, 0x4d, 0x66, 0x8a, 0x8b, 0xdc, 0x98, 0x8a, 0x74, 0x39, 0xef, 0x19, 0x99, 0xa8,
+ 0x3f, 0x4e, 0x80, 0x82, 0xf5, 0x7d, 0x7f, 0x87, 0x0c, 0xf7, 0x88, 0xdb, 0xf3, 0x75, 0x7f, 0xec,
+ 0xa1, 0x9b, 0x90, 0xb5, 0x89, 0x6e, 0x12, 0x97, 0x1b, 0x95, 0xc3, 0xb2, 0x85, 0x76, 0xd9, 0x0e,
+ 0xd6, 0x8d, 0x03, 0x7d, 0xcf, 0xb2, 0x2d, 0x7f, 0xc2, 0x4d, 0x59, 0x8c, 0x5f, 0xc2, 0xb3, 0x3a,
+ 0x2b, 0x38, 0x22, 0x88, 0x2f, 0xa8, 0x41, 0xab, 0x30, 0x3f, 0x24, 0x9e, 0xa7, 0x0f, 0x08, 0xb7,
+ 0x34, 0x8f, 0x83, 0xa6, 0xfa, 0x11, 0x14, 0xa3, 0x72, 0xa8, 0x00, 0xf3, 0xbb, 0xed, 0x67, 0xed,
+ 0xce, 0x8b, 0xb6, 0x32, 0x87, 0x96, 0xa0, 0xb0, 0xdb, 0xc6, 0xcd, 0x6a, 0x7d, 0xab, 0x5a, 0xdb,
+ 0x6e, 0x2a, 0x09, 0xb4, 0x00, 0xf9, 0x69, 0x33, 0xa9, 0xfe, 0x59, 0x02, 0x80, 0xb9, 0x5b, 0x0e,
+ 0xea, 0x43, 0xc8, 0x78, 0xbe, 0xee, 0x8b, 0x55, 0xb9, 0xf8, 0xf8, 0xdd, 0xcb, 0xe6, 0x50, 0xda,
+ 0xcb, 0xfe, 0x11, 0x2c, 0x44, 0xa2, 0x16, 0x26, 0x2f, 0x58, 0xc8, 0x02, 0x84, 0x6e, 0x9a, 0xae,
+ 0x34, 0x9c, 0xff, 0x56, 0x3f, 0x82, 0x0c, 0x97, 0xbe, 0x68, 0x6e, 0x0e, 0xd2, 0x0d, 0xf6, 0x2b,
+ 0x81, 0xf2, 0x90, 0xc1, 0xcd, 0x6a, 0xe3, 0x7b, 0x4a, 0x12, 0x29, 0x50, 0x6c, 0xb4, 0x7a, 0xf5,
+ 0x4e, 0xbb, 0xdd, 0xac, 0xf7, 0x9b, 0x0d, 0x25, 0xa5, 0xde, 0x85, 0x4c, 0x6b, 0xc8, 0x34, 0xdf,
+ 0x66, 0x4b, 0x7e, 0x9f, 0xb8, 0xc4, 0x31, 0x82, 0x9d, 0x34, 0x25, 0xa8, 0x3f, 0x2d, 0x40, 0x66,
+ 0x87, 0x8e, 0x1d, 0x1f, 0x3d, 0x8e, 0x84, 0xad, 0xc5, 0xf8, 0x0c, 0x81, 0x03, 0x2b, 0xfd, 0xc9,
+ 0x88, 0xc8, 0xb0, 0x76, 0x13, 0xb2, 0x62, 0x73, 0xc8, 0xe1, 0xc8, 0x16, 0xa3, 0xfb, 0xba, 0x3b,
+ 0x20, 0xbe, 0x1c, 0x8f, 0x6c, 0xa1, 0xf7, 0xd9, 0x89, 0xa5, 0x9b, 0xd4, 0xb1, 0x27, 0x7c, 0x0f,
+ 0xe5, 0xc4, 0xb1, 0x84, 0x89, 0x6e, 0x76, 0x1c, 0x7b, 0x82, 0x43, 0x2e, 0xda, 0x82, 0xe2, 0x9e,
+ 0xe5, 0x98, 0x1a, 0x1d, 0x89, 0x20, 0x9f, 0xb9, 0x7c, 0xc7, 0x09, 0xab, 0x6a, 0x96, 0x63, 0x76,
+ 0x04, 0x18, 0x17, 0xf6, 0xa6, 0x0d, 0xd4, 0x86, 0xc5, 0x23, 0x6a, 0x8f, 0x87, 0x24, 0xd4, 0x95,
+ 0xe5, 0xba, 0xde, 0xbb, 0x5c, 0xd7, 0x73, 0x8e, 0x0f, 0xb4, 0x2d, 0x1c, 0x45, 0x9b, 0xe8, 0x19,
+ 0x2c, 0xf8, 0xc3, 0xd1, 0xbe, 0x17, 0xaa, 0x9b, 0xe7, 0xea, 0xbe, 0x72, 0x85, 0xc3, 0x18, 0x3c,
+ 0xd0, 0x56, 0xf4, 0x23, 0x2d, 0xb4, 0x09, 0x05, 0x83, 0x3a, 0x9e, 0xe5, 0xf9, 0xc4, 0x31, 0x26,
+ 0xab, 0x39, 0xee, 0xfb, 0x2b, 0x46, 0x59, 0x9f, 0x82, 0x71, 0x54, 0xb2, 0xf4, 0x6b, 0x29, 0x28,
+ 0x44, 0x5c, 0x80, 0x7a, 0x50, 0x18, 0xb9, 0x74, 0xa4, 0x0f, 0xf8, 0x89, 0x27, 0x27, 0xf5, 0xd1,
+ 0x1b, 0xb9, 0xaf, 0xd2, 0x9d, 0x0a, 0xe2, 0xa8, 0x16, 0xf5, 0x34, 0x09, 0x85, 0x08, 0x13, 0xdd,
+ 0x83, 0x1c, 0xee, 0xe2, 0xd6, 0xf3, 0x6a, 0xbf, 0xa9, 0xcc, 0x95, 0x6e, 0x9f, 0x9c, 0x96, 0x57,
+ 0xb9, 0xb6, 0xa8, 0x82, 0xae, 0x6b, 0x1d, 0xb1, 0x35, 0xfc, 0x3e, 0xcc, 0x07, 0xd0, 0x44, 0xe9,
+ 0xcb, 0x27, 0xa7, 0xe5, 0xb7, 0x66, 0xa1, 0x11, 0x24, 0xee, 0x6d, 0x55, 0x71, 0xb3, 0xa1, 0x24,
+ 0xe3, 0x91, 0xb8, 0x77, 0xa0, 0xbb, 0xc4, 0x44, 0x5f, 0x81, 0xac, 0x04, 0xa6, 0x4a, 0xa5, 0x93,
+ 0xd3, 0xf2, 0xcd, 0x59, 0xe0, 0x14, 0x87, 0x7b, 0xdb, 0xd5, 0xe7, 0x4d, 0x25, 0x1d, 0x8f, 0xc3,
+ 0x3d, 0x5b, 0x3f, 0x22, 0xe8, 0x5d, 0xc8, 0x08, 0x58, 0xa6, 0x74, 0xeb, 0xe4, 0xb4, 0xfc, 0xa5,
+ 0xd7, 0xd4, 0x31, 0x54, 0x69, 0xf5, 0x37, 0xff, 0x70, 0x6d, 0xee, 0xaf, 0xfe, 0x68, 0x4d, 0x99,
+ 0x65, 0x97, 0xfe, 0x3b, 0x01, 0x0b, 0x17, 0xd6, 0x0e, 0x52, 0x21, 0xeb, 0x50, 0x83, 0x8e, 0xc4,
+ 0x41, 0x98, 0xab, 0xc1, 0xf9, 0xd9, 0x7a, 0xb6, 0x4d, 0xeb, 0x74, 0x34, 0xc1, 0x92, 0x83, 0x9e,
+ 0xcd, 0x1c, 0xe5, 0x4f, 0xde, 0x70, 0x61, 0xc6, 0x1e, 0xe6, 0x1f, 0xc3, 0x82, 0xe9, 0x5a, 0x47,
+ 0xc4, 0xd5, 0x0c, 0xea, 0xec, 0x5b, 0x03, 0x79, 0xc8, 0x95, 0x62, 0xf3, 0x4d, 0x0e, 0xc4, 0x45,
+ 0x21, 0x50, 0xe7, 0xf8, 0x2f, 0x70, 0x8c, 0x97, 0x9e, 0x43, 0x31, 0xba, 0xd4, 0xd9, 0xb9, 0xe4,
+ 0x59, 0xbf, 0x44, 0x64, 0x62, 0xc9, 0xd3, 0x50, 0x9c, 0x67, 0x14, 0x91, 0x56, 0xbe, 0x07, 0xe9,
+ 0x21, 0x35, 0x85, 0x9e, 0x85, 0xda, 0x0d, 0x96, 0x4d, 0xfc, 0xd3, 0xd9, 0x7a, 0x81, 0x7a, 0x95,
+ 0x0d, 0xcb, 0x26, 0x3b, 0xd4, 0x24, 0x98, 0x03, 0xd4, 0x23, 0x48, 0xb3, 0x98, 0x83, 0xbe, 0x0c,
+ 0xe9, 0x5a, 0xab, 0xdd, 0x50, 0xe6, 0x4a, 0xcb, 0x27, 0xa7, 0xe5, 0x05, 0xee, 0x12, 0xc6, 0x60,
+ 0x6b, 0x17, 0xad, 0x43, 0xf6, 0x79, 0x67, 0x7b, 0x77, 0x87, 0x2d, 0xaf, 0x1b, 0x27, 0xa7, 0xe5,
+ 0xa5, 0x90, 0x2d, 0x9c, 0x86, 0xee, 0x40, 0xa6, 0xbf, 0xd3, 0xdd, 0xe8, 0x29, 0xc9, 0x12, 0x3a,
+ 0x39, 0x2d, 0x2f, 0x86, 0x7c, 0x6e, 0x73, 0x69, 0x59, 0xce, 0x6a, 0x3e, 0xa4, 0xab, 0x3f, 0x4a,
+ 0x40, 0x21, 0xb2, 0xe1, 0xd8, 0xc2, 0x6c, 0x34, 0x37, 0xaa, 0xbb, 0xdb, 0x7d, 0x65, 0x2e, 0xb2,
+ 0x30, 0x23, 0x90, 0x06, 0xd9, 0xd7, 0xc7, 0x36, 0x8b, 0x73, 0x50, 0xef, 0xb4, 0x7b, 0xad, 0x5e,
+ 0xbf, 0xd9, 0xee, 0x2b, 0x89, 0xd2, 0xea, 0xc9, 0x69, 0x79, 0x65, 0x16, 0xbc, 0x31, 0xb6, 0x6d,
+ 0xb6, 0x34, 0xeb, 0xd5, 0xfa, 0x16, 0x5f, 0xeb, 0xd3, 0xa5, 0x19, 0x41, 0xd5, 0x75, 0xe3, 0x80,
+ 0x98, 0xe8, 0x3e, 0xe4, 0x1b, 0xcd, 0xed, 0xe6, 0x66, 0x95, 0x47, 0xf7, 0xd2, 0x9d, 0x93, 0xd3,
+ 0xf2, 0xad, 0xd7, 0x7b, 0xb7, 0xc9, 0x40, 0xf7, 0x89, 0x39, 0xb3, 0x44, 0x23, 0x10, 0xf5, 0x3f,
+ 0x93, 0xb0, 0x80, 0x59, 0x39, 0xec, 0xfa, 0x5d, 0x6a, 0x5b, 0xc6, 0x04, 0x75, 0x21, 0x6f, 0x50,
+ 0xc7, 0xb4, 0x22, 0x71, 0xe2, 0xf1, 0x25, 0x29, 0xd1, 0x54, 0x2a, 0x68, 0xd5, 0x03, 0x49, 0x3c,
+ 0x55, 0x82, 0x1e, 0x40, 0xc6, 0x24, 0xb6, 0x3e, 0x91, 0xb9, 0xd9, 0xad, 0x8a, 0x28, 0xb8, 0x2b,
+ 0x41, 0xc1, 0x5d, 0x69, 0xc8, 0x82, 0x1b, 0x0b, 0x1c, 0xaf, 0x41, 0xf4, 0x97, 0x9a, 0xee, 0xfb,
+ 0x64, 0x38, 0xf2, 0x45, 0x62, 0x96, 0xc6, 0x85, 0xa1, 0xfe, 0xb2, 0x2a, 0x49, 0xe8, 0x11, 0x64,
+ 0x8f, 0x2d, 0xc7, 0xa4, 0xc7, 0x32, 0xf7, 0xba, 0x42, 0xa9, 0x04, 0xaa, 0x27, 0x2c, 0x25, 0x99,
+ 0x31, 0x93, 0xad, 0xa1, 0x76, 0xa7, 0xdd, 0x0c, 0xd6, 0x90, 0xe4, 0x77, 0x9c, 0x36, 0x75, 0xd8,
+ 0xfe, 0x87, 0x4e, 0x5b, 0xdb, 0xa8, 0xb6, 0xb6, 0x77, 0x31, 0x5b, 0x47, 0x2b, 0x27, 0xa7, 0x65,
+ 0x25, 0x84, 0x6c, 0xe8, 0x96, 0xcd, 0x8a, 0x81, 0x5b, 0x90, 0xaa, 0xb6, 0xbf, 0xa7, 0x24, 0x4b,
+ 0xca, 0xc9, 0x69, 0xb9, 0x18, 0xb2, 0xab, 0xce, 0x64, 0xea, 0xf7, 0xd9, 0x7e, 0xd5, 0xbf, 0x4d,
+ 0x41, 0x71, 0x77, 0x64, 0xea, 0x3e, 0x11, 0xfb, 0x0c, 0x95, 0xa1, 0x30, 0xd2, 0x5d, 0xdd, 0xb6,
+ 0x89, 0x6d, 0x79, 0x43, 0x79, 0x95, 0x10, 0x25, 0xa1, 0x0f, 0xde, 0xd4, 0x8d, 0xb5, 0x1c, 0xdb,
+ 0x3b, 0x3f, 0xf8, 0x97, 0xf5, 0x44, 0xe0, 0xd0, 0x5d, 0x58, 0xdc, 0x17, 0xd6, 0x6a, 0xba, 0xc1,
+ 0x27, 0x36, 0xc5, 0x27, 0xb6, 0x12, 0x37, 0xb1, 0x51, 0xb3, 0x2a, 0x72, 0x90, 0x55, 0x2e, 0x85,
+ 0x17, 0xf6, 0xa3, 0x4d, 0xf4, 0x04, 0xe6, 0x87, 0xd4, 0xb1, 0x7c, 0xea, 0x5e, 0x3f, 0x0b, 0x01,
+ 0x12, 0xdd, 0x83, 0x65, 0x36, 0xb9, 0x81, 0x3d, 0x9c, 0xcd, 0x8f, 0xf3, 0x24, 0x5e, 0x1a, 0xea,
+ 0x2f, 0x65, 0x87, 0x98, 0x91, 0x51, 0x0d, 0x32, 0xd4, 0x65, 0xf9, 0x62, 0x96, 0x9b, 0x7b, 0xff,
+ 0x5a, 0x73, 0x45, 0xa3, 0xc3, 0x64, 0xb0, 0x10, 0x55, 0xbf, 0x09, 0x0b, 0x17, 0x06, 0xc1, 0xd2,
+ 0xa4, 0x6e, 0x75, 0xb7, 0xd7, 0x54, 0xe6, 0x50, 0x11, 0x72, 0xf5, 0x4e, 0xbb, 0xdf, 0x6a, 0xef,
+ 0xb2, 0x3c, 0xaf, 0x08, 0x39, 0xdc, 0xd9, 0xde, 0xae, 0x55, 0xeb, 0xcf, 0x94, 0xa4, 0x5a, 0x81,
+ 0x42, 0x44, 0x1b, 0x5a, 0x04, 0xe8, 0xf5, 0x3b, 0x5d, 0x6d, 0xa3, 0x85, 0x7b, 0x7d, 0x91, 0x25,
+ 0xf6, 0xfa, 0x55, 0xdc, 0x97, 0x84, 0x84, 0xfa, 0xef, 0xc9, 0x60, 0x46, 0x65, 0x62, 0x58, 0xbb,
+ 0x98, 0x18, 0x5e, 0x61, 0xbc, 0x4c, 0x0d, 0xa7, 0x8d, 0x30, 0x41, 0xfc, 0x00, 0x80, 0x2f, 0x1c,
+ 0x62, 0x6a, 0xba, 0x2f, 0x27, 0xbe, 0xf4, 0x9a, 0x93, 0xfb, 0xc1, 0x8d, 0x16, 0xce, 0x4b, 0x74,
+ 0xd5, 0x47, 0xdf, 0x82, 0xa2, 0x41, 0x87, 0x23, 0x9b, 0x48, 0xe1, 0xd4, 0xb5, 0xc2, 0x85, 0x10,
+ 0x5f, 0xf5, 0xa3, 0xa9, 0x69, 0xfa, 0x62, 0xf2, 0xfc, 0xeb, 0x89, 0xc0, 0x33, 0x31, 0xd9, 0x68,
+ 0x11, 0x72, 0xbb, 0xdd, 0x46, 0xb5, 0xdf, 0x6a, 0x6f, 0x2a, 0x09, 0x04, 0x90, 0xe5, 0xae, 0x6e,
+ 0x28, 0x49, 0x96, 0x45, 0xd7, 0x3b, 0x3b, 0xdd, 0xed, 0x26, 0x8f, 0x58, 0x68, 0x05, 0x94, 0xc0,
+ 0xd9, 0x1a, 0x77, 0x64, 0xb3, 0xa1, 0xa4, 0xd1, 0x0d, 0x58, 0x0a, 0xa9, 0x52, 0x32, 0x83, 0x6e,
+ 0x02, 0x0a, 0x89, 0x53, 0x15, 0x59, 0xf5, 0x57, 0x60, 0xa9, 0x4e, 0x1d, 0x5f, 0xb7, 0x9c, 0xb0,
+ 0xc2, 0x78, 0xcc, 0x06, 0x2d, 0x49, 0x9a, 0x25, 0x6f, 0x82, 0x6a, 0x4b, 0xe7, 0x67, 0xeb, 0x85,
+ 0x10, 0xda, 0x6a, 0xf0, 0x54, 0x49, 0x36, 0x4c, 0xb6, 0x7f, 0x47, 0x96, 0xc9, 0x9d, 0x9b, 0xa9,
+ 0xcd, 0x9f, 0x9f, 0xad, 0xa7, 0xba, 0xad, 0x06, 0x66, 0x34, 0xf4, 0x65, 0xc8, 0x93, 0x97, 0x96,
+ 0xaf, 0x19, 0xec, 0x5c, 0x62, 0x0e, 0xcc, 0xe0, 0x1c, 0x23, 0xd4, 0xd9, 0x31, 0x54, 0x03, 0xe8,
+ 0x52, 0xd7, 0x97, 0x3d, 0x7f, 0x1d, 0x32, 0x23, 0xea, 0xf2, 0xbb, 0x8b, 0x4b, 0x6f, 0xd4, 0x18,
+ 0x5c, 0x2c, 0x54, 0x2c, 0xc0, 0xea, 0xef, 0xa6, 0x00, 0xfa, 0xba, 0x77, 0x28, 0x95, 0x3c, 0x85,
+ 0x7c, 0x78, 0x3b, 0x29, 0x2f, 0x41, 0xae, 0x9c, 0xed, 0x10, 0x8c, 0x9e, 0x04, 0x8b, 0x4d, 0xd4,
+ 0x4e, 0xb1, 0x45, 0x6c, 0xd0, 0x51, 0x5c, 0xf9, 0x71, 0xb1, 0x40, 0x62, 0xc7, 0x3c, 0x71, 0x5d,
+ 0x39, 0xf3, 0xec, 0x27, 0xaa, 0xf3, 0x63, 0x41, 0x38, 0x4d, 0x66, 0xdf, 0xb1, 0xd7, 0x3e, 0x33,
+ 0x33, 0xb2, 0x35, 0x87, 0xa7, 0x72, 0xe8, 0x63, 0x28, 0xb0, 0x71, 0x6b, 0x1e, 0xe7, 0xc9, 0xc4,
+ 0xfb, 0x52, 0x57, 0x09, 0x0d, 0x18, 0x46, 0x53, 0x2f, 0xdf, 0x01, 0xd0, 0x47, 0x23, 0xdb, 0x22,
+ 0xa6, 0xb6, 0x37, 0xe1, 0x99, 0x76, 0x1e, 0xe7, 0x25, 0xa5, 0x36, 0x61, 0xdb, 0x25, 0x60, 0xeb,
+ 0x3e, 0xcf, 0x9e, 0xaf, 0x71, 0xa0, 0x44, 0x57, 0xfd, 0x9a, 0x02, 0x8b, 0xee, 0xd8, 0x61, 0x0e,
+ 0x95, 0xd6, 0xa9, 0x7f, 0x9a, 0x84, 0xb7, 0xda, 0xc4, 0x3f, 0xa6, 0xee, 0x61, 0xd5, 0xf7, 0x75,
+ 0xe3, 0x60, 0x48, 0x1c, 0x39, 0x7d, 0x91, 0x82, 0x26, 0x71, 0xa1, 0xa0, 0x59, 0x85, 0x79, 0xdd,
+ 0xb6, 0x74, 0x8f, 0x88, 0xe4, 0x2d, 0x8f, 0x83, 0x26, 0x2b, 0xbb, 0x58, 0x11, 0x47, 0x3c, 0x8f,
+ 0x88, 0x7b, 0x15, 0x66, 0x78, 0x40, 0x40, 0xdf, 0x87, 0x9b, 0x32, 0x4d, 0xd3, 0xc3, 0xae, 0x58,
+ 0x41, 0x11, 0x5c, 0xd0, 0x36, 0x63, 0xab, 0xca, 0x78, 0xe3, 0x64, 0x1e, 0x37, 0x25, 0x77, 0x46,
+ 0xbe, 0xcc, 0x0a, 0x57, 0xcc, 0x18, 0x56, 0x69, 0x13, 0x6e, 0x5d, 0x2a, 0xf2, 0xb9, 0xee, 0x6d,
+ 0xfe, 0x21, 0x09, 0xd0, 0xea, 0x56, 0x77, 0xa4, 0x93, 0x1a, 0x90, 0xdd, 0xd7, 0x87, 0x96, 0x3d,
+ 0xb9, 0x2a, 0x02, 0x4e, 0xf1, 0x95, 0xaa, 0x70, 0xc7, 0x06, 0x97, 0xc1, 0x52, 0x96, 0xd7, 0x94,
+ 0xe3, 0x3d, 0x87, 0xf8, 0x61, 0x4d, 0xc9, 0x5b, 0xcc, 0x0c, 0x57, 0x77, 0xc2, 0xa5, 0x2b, 0x1a,
+ 0x6c, 0x02, 0x58, 0xca, 0x73, 0xac, 0x4f, 0x82, 0xb0, 0x25, 0x9b, 0x68, 0x8b, 0xdf, 0x8e, 0x12,
+ 0xf7, 0x88, 0x98, 0xab, 0x19, 0xee, 0xd4, 0xeb, 0xec, 0xc1, 0x12, 0x2e, 0x7c, 0x17, 0x4a, 0x97,
+ 0x3e, 0xe2, 0x29, 0xd3, 0x94, 0xf5, 0xb9, 0x7c, 0xf4, 0x10, 0x16, 0x2e, 0x8c, 0xf3, 0xb5, 0x62,
+ 0xbe, 0xd5, 0x7d, 0xfe, 0x75, 0x25, 0x2d, 0x7f, 0x7d, 0x53, 0xc9, 0xaa, 0x7f, 0x9c, 0x12, 0x81,
+ 0x46, 0x7a, 0x35, 0xfe, 0x55, 0x20, 0xc7, 0x57, 0xb7, 0x41, 0x6d, 0x19, 0x00, 0xde, 0xbb, 0x3a,
+ 0xfe, 0xb0, 0x9a, 0x8e, 0xc3, 0x71, 0x28, 0x88, 0xd6, 0xa1, 0x20, 0x56, 0xb1, 0xc6, 0x36, 0x1c,
+ 0x77, 0xeb, 0x02, 0x06, 0x41, 0x62, 0x92, 0xe8, 0x2e, 0x2c, 0xf2, 0xcb, 0x1f, 0xef, 0x80, 0x98,
+ 0x02, 0x93, 0xe6, 0x98, 0x85, 0x90, 0xca, 0x61, 0x3b, 0x50, 0x94, 0x04, 0x8d, 0xe7, 0xf3, 0x19,
+ 0x6e, 0xd0, 0xbd, 0xeb, 0x0c, 0x12, 0x22, 0x3c, 0xcd, 0x2f, 0x8c, 0xa6, 0x0d, 0xb5, 0x01, 0xb9,
+ 0xc0, 0x58, 0xb4, 0x0a, 0xa9, 0x7e, 0xbd, 0xab, 0xcc, 0x95, 0x96, 0x4e, 0x4e, 0xcb, 0x85, 0x80,
+ 0xdc, 0xaf, 0x77, 0x19, 0x67, 0xb7, 0xd1, 0x55, 0x12, 0x17, 0x39, 0xbb, 0x8d, 0x6e, 0x29, 0xcd,
+ 0x72, 0x30, 0x75, 0x1f, 0x0a, 0x91, 0x1e, 0xd0, 0x3b, 0x30, 0xdf, 0x6a, 0x6f, 0xe2, 0x66, 0xaf,
+ 0xa7, 0xcc, 0x95, 0x6e, 0x9e, 0x9c, 0x96, 0x51, 0x84, 0xdb, 0x72, 0x06, 0x6c, 0x7e, 0xd0, 0x1d,
+ 0x48, 0x6f, 0x75, 0xd8, 0xd9, 0x2e, 0x0a, 0x88, 0x08, 0x62, 0x8b, 0x7a, 0x7e, 0xe9, 0x86, 0x4c,
+ 0xee, 0xa2, 0x8a, 0xd5, 0xdf, 0x4b, 0x40, 0x56, 0x6c, 0xa6, 0xd8, 0x89, 0xaa, 0xc2, 0x7c, 0x70,
+ 0x4d, 0x20, 0x8a, 0xbb, 0xf7, 0x2e, 0x2f, 0xc4, 0x2a, 0xb2, 0x6e, 0x12, 0xcb, 0x2f, 0x90, 0x2b,
+ 0x7d, 0x08, 0xc5, 0x28, 0xe3, 0x73, 0x2d, 0xbe, 0xef, 0x43, 0x81, 0xad, 0xef, 0xa0, 0x20, 0x7b,
+ 0x0c, 0x59, 0x11, 0x10, 0xc2, 0xb3, 0xe6, 0xf2, 0xaa, 0x50, 0x22, 0xd1, 0x53, 0x98, 0x17, 0x95,
+ 0x64, 0x70, 0x3b, 0xbc, 0x76, 0xf5, 0x2e, 0xc2, 0x01, 0x5c, 0xfd, 0x18, 0xd2, 0x5d, 0x42, 0x5c,
+ 0xe6, 0x7b, 0x87, 0x9a, 0x64, 0x7a, 0x3c, 0xcb, 0x22, 0xd8, 0x24, 0xad, 0x06, 0x2b, 0x82, 0x4d,
+ 0xd2, 0x32, 0xc3, 0xfb, 0xaf, 0x64, 0xe4, 0xfe, 0xab, 0x0f, 0xc5, 0x17, 0xc4, 0x1a, 0x1c, 0xf8,
+ 0xc4, 0xe4, 0x8a, 0xee, 0x43, 0x7a, 0x44, 0x42, 0xe3, 0x57, 0x63, 0x17, 0x18, 0x21, 0x2e, 0xe6,
+ 0x28, 0x16, 0x47, 0x8e, 0xb9, 0xb4, 0x7c, 0xd2, 0x90, 0x2d, 0xf5, 0xef, 0x93, 0xb0, 0xd8, 0xf2,
+ 0xbc, 0xb1, 0xee, 0x18, 0x41, 0xe6, 0xf6, 0xed, 0x8b, 0x99, 0x5b, 0xec, 0xdb, 0xcf, 0x45, 0x91,
+ 0x8b, 0xd7, 0x7a, 0xf2, 0xf4, 0x4c, 0x86, 0xa7, 0xa7, 0xfa, 0xd3, 0x44, 0x70, 0x77, 0x77, 0x37,
+ 0xb2, 0xdd, 0x45, 0x1d, 0x18, 0xd5, 0x44, 0x76, 0x9d, 0x43, 0x87, 0x1e, 0x3b, 0xe8, 0x6d, 0xc8,
+ 0xe0, 0x66, 0xbb, 0xf9, 0x42, 0x49, 0x88, 0xe5, 0x79, 0x01, 0x84, 0x89, 0x43, 0x8e, 0x99, 0xa6,
+ 0x6e, 0xb3, 0xdd, 0x60, 0x99, 0x56, 0x32, 0x46, 0x53, 0x97, 0x38, 0xa6, 0xe5, 0x0c, 0xd0, 0x3b,
+ 0x90, 0x6d, 0xf5, 0x7a, 0xbb, 0xbc, 0x4c, 0x7c, 0xeb, 0xe4, 0xb4, 0x7c, 0xe3, 0x02, 0x8a, 0xdf,
+ 0xdb, 0x9a, 0x0c, 0xc4, 0xca, 0x1c, 0x96, 0x83, 0xc5, 0x80, 0x58, 0xfe, 0x2c, 0x40, 0xb8, 0xd3,
+ 0xaf, 0xf6, 0x9b, 0x4a, 0x26, 0x06, 0x84, 0x29, 0xfb, 0x2b, 0xb7, 0xdb, 0x3f, 0x27, 0x41, 0xa9,
+ 0x1a, 0x06, 0x19, 0xf9, 0x8c, 0x2f, 0x2b, 0xcb, 0x3e, 0xe4, 0x46, 0xec, 0x97, 0x45, 0x82, 0x2c,
+ 0xe9, 0x69, 0xec, 0xeb, 0xe5, 0x8c, 0x5c, 0x05, 0x53, 0x9b, 0x54, 0xcd, 0xa1, 0xe5, 0x79, 0x16,
+ 0x75, 0x04, 0x0d, 0x87, 0x9a, 0x4a, 0xff, 0x91, 0x80, 0x1b, 0x31, 0x08, 0xf4, 0x10, 0xd2, 0x2e,
+ 0xb5, 0x83, 0x39, 0xbc, 0x7d, 0xd9, 0xb5, 0x2c, 0x13, 0xc5, 0x1c, 0x89, 0xd6, 0x00, 0xf4, 0xb1,
+ 0x4f, 0x75, 0xde, 0x3f, 0x9f, 0xbd, 0x1c, 0x8e, 0x50, 0xd0, 0x0b, 0xc8, 0x7a, 0xc4, 0x70, 0x49,
+ 0x90, 0x4b, 0x7f, 0xfc, 0x7f, 0xb5, 0xbe, 0xd2, 0xe3, 0x6a, 0xb0, 0x54, 0x57, 0xaa, 0x40, 0x56,
+ 0x50, 0xd8, 0xb2, 0x37, 0x75, 0x5f, 0x97, 0x97, 0xf6, 0xfc, 0x37, 0x5b, 0x4d, 0xba, 0x3d, 0x08,
+ 0x56, 0x93, 0x6e, 0x0f, 0xd4, 0xbf, 0x49, 0x02, 0x34, 0x5f, 0xfa, 0xc4, 0x75, 0x74, 0xbb, 0x5e,
+ 0x45, 0xcd, 0x48, 0xf4, 0x17, 0xa3, 0xfd, 0x6a, 0xec, 0x4b, 0x44, 0x28, 0x51, 0xa9, 0x57, 0x63,
+ 0xe2, 0xff, 0x2d, 0x48, 0x8d, 0x5d, 0xf9, 0x20, 0x2d, 0xf2, 0xe0, 0x5d, 0xbc, 0x8d, 0x19, 0x0d,
+ 0x35, 0xa7, 0x61, 0x2b, 0x75, 0xf9, 0xb3, 0x73, 0xa4, 0x83, 0xd8, 0xd0, 0xc5, 0x76, 0xbe, 0xa1,
+ 0x6b, 0x06, 0x91, 0x27, 0x47, 0x51, 0xec, 0xfc, 0x7a, 0xb5, 0x4e, 0x5c, 0x1f, 0x67, 0x0d, 0x9d,
+ 0xfd, 0xff, 0x42, 0xf1, 0xed, 0x3e, 0xc0, 0x74, 0x68, 0x68, 0x0d, 0x32, 0xf5, 0x8d, 0x5e, 0x6f,
+ 0x5b, 0x99, 0x13, 0x01, 0x7c, 0xca, 0xe2, 0x64, 0xf5, 0x2f, 0x93, 0x90, 0xab, 0x57, 0xe5, 0xb1,
+ 0x5a, 0x07, 0x85, 0x47, 0x25, 0xfe, 0xd4, 0x41, 0x5e, 0x8e, 0x2c, 0x77, 0x22, 0x03, 0xcb, 0x15,
+ 0x45, 0xed, 0x22, 0x13, 0x61, 0x56, 0x37, 0xb9, 0x00, 0xc2, 0x50, 0x24, 0xd2, 0x09, 0x9a, 0xa1,
+ 0x07, 0x31, 0x7e, 0xed, 0x6a, 0x67, 0x89, 0xf2, 0x64, 0xda, 0xf6, 0x70, 0x21, 0x50, 0x52, 0xd7,
+ 0x3d, 0xf4, 0x01, 0x2c, 0x79, 0xd6, 0xc0, 0xb1, 0x9c, 0x81, 0x16, 0x38, 0x8f, 0xbf, 0xbb, 0xd4,
+ 0x96, 0xcf, 0xcf, 0xd6, 0x17, 0x7a, 0x82, 0x25, 0x7d, 0xb8, 0x20, 0x91, 0x75, 0xee, 0x4a, 0xf4,
+ 0x4d, 0x58, 0x8c, 0x88, 0x32, 0x2f, 0x0a, 0xb7, 0x2b, 0xe7, 0x67, 0xeb, 0xc5, 0x50, 0xf2, 0x19,
+ 0x99, 0xe0, 0x62, 0x28, 0xf8, 0x8c, 0xf0, 0xfb, 0x97, 0x7d, 0xea, 0x1a, 0x44, 0x73, 0xf9, 0x9e,
+ 0xe6, 0x27, 0x78, 0x1a, 0x17, 0x38, 0x4d, 0x6c, 0x73, 0xf5, 0x39, 0xdc, 0xe8, 0xb8, 0xc6, 0x01,
+ 0xf1, 0x7c, 0xe1, 0x0a, 0xe9, 0xc5, 0x8f, 0xe1, 0xb6, 0xaf, 0x7b, 0x87, 0xda, 0x81, 0xe5, 0xf9,
+ 0xd4, 0x9d, 0x68, 0x2e, 0xf1, 0x89, 0xc3, 0xf8, 0x1a, 0x7f, 0xac, 0x95, 0x97, 0x7e, 0xb7, 0x18,
+ 0x66, 0x4b, 0x40, 0x70, 0x80, 0xd8, 0x66, 0x00, 0xb5, 0x05, 0x45, 0x56, 0xa6, 0xc8, 0x8b, 0x33,
+ 0x36, 0x7a, 0xb0, 0xe9, 0x40, 0x7b, 0xe3, 0x63, 0x2a, 0x6f, 0xd3, 0x81, 0xf8, 0xa9, 0x7e, 0x17,
+ 0x94, 0x86, 0xe5, 0x8d, 0x74, 0xdf, 0x38, 0x08, 0x6e, 0x33, 0x51, 0x03, 0x94, 0x03, 0xa2, 0xbb,
+ 0xfe, 0x1e, 0xd1, 0x7d, 0x6d, 0x44, 0x5c, 0x8b, 0x9a, 0xd7, 0xcf, 0xf2, 0x52, 0x28, 0xd2, 0xe5,
+ 0x12, 0xea, 0x7f, 0x25, 0x00, 0xb0, 0xbe, 0x1f, 0x64, 0x64, 0x5f, 0x83, 0x65, 0xcf, 0xd1, 0x47,
+ 0xde, 0x01, 0xf5, 0x35, 0xcb, 0xf1, 0x89, 0x7b, 0xa4, 0xdb, 0xf2, 0x02, 0x47, 0x09, 0x18, 0x2d,
+ 0x49, 0x47, 0xf7, 0x01, 0x1d, 0x12, 0x32, 0xd2, 0xa8, 0x6d, 0x6a, 0x01, 0x53, 0x3c, 0x25, 0xa7,
+ 0xb1, 0xc2, 0x38, 0x1d, 0xdb, 0xec, 0x05, 0x74, 0x54, 0x83, 0x35, 0x36, 0x7c, 0xe2, 0xf8, 0xae,
+ 0x45, 0x3c, 0x6d, 0x9f, 0xba, 0x9a, 0x67, 0xd3, 0x63, 0x6d, 0x9f, 0xda, 0x36, 0x3d, 0x26, 0x6e,
+ 0x70, 0x37, 0x56, 0xb2, 0xe9, 0xa0, 0x29, 0x40, 0x1b, 0xd4, 0xed, 0xd9, 0xf4, 0x78, 0x23, 0x40,
+ 0xb0, 0xb4, 0x6d, 0x3a, 0x66, 0xdf, 0x32, 0x0e, 0x83, 0xb4, 0x2d, 0xa4, 0xf6, 0x2d, 0xe3, 0x10,
+ 0xbd, 0x03, 0x0b, 0xc4, 0x26, 0xfc, 0x8a, 0x44, 0xa0, 0x32, 0x1c, 0x55, 0x0c, 0x88, 0x0c, 0xa4,
+ 0x7e, 0x02, 0x4a, 0xd3, 0x31, 0xdc, 0xc9, 0x28, 0x32, 0xe7, 0xf7, 0x01, 0xb1, 0x20, 0xa9, 0xd9,
+ 0xd4, 0x38, 0xd4, 0x86, 0xba, 0xa3, 0x0f, 0x98, 0x5d, 0xe2, 0x85, 0x4f, 0x61, 0x9c, 0x6d, 0x6a,
+ 0x1c, 0xee, 0x48, 0xba, 0xfa, 0x01, 0x40, 0x6f, 0xe4, 0x12, 0xdd, 0xec, 0xb0, 0x6c, 0x82, 0xb9,
+ 0x8e, 0xb7, 0x34, 0x53, 0xbe, 0x90, 0x52, 0x57, 0x6e, 0x75, 0x45, 0x30, 0x1a, 0x21, 0x5d, 0xfd,
+ 0x79, 0xb8, 0xd1, 0xb5, 0x75, 0x83, 0x7f, 0x2d, 0xd0, 0x0d, 0x9f, 0xac, 0xd0, 0x53, 0xc8, 0x0a,
+ 0xa8, 0x9c, 0xc9, 0xd8, 0xed, 0x36, 0xed, 0x73, 0x6b, 0x0e, 0x4b, 0x7c, 0xad, 0x08, 0x30, 0xd5,
+ 0xa3, 0xfe, 0x79, 0x02, 0xf2, 0xa1, 0x7e, 0x54, 0x16, 0x2f, 0x31, 0xbe, 0xab, 0x5b, 0x8e, 0xac,
+ 0xea, 0xf3, 0x38, 0x4a, 0x42, 0x2d, 0x28, 0x8c, 0x42, 0xe9, 0x2b, 0xf3, 0xb9, 0x18, 0xab, 0x71,
+ 0x54, 0x16, 0x7d, 0x08, 0xf9, 0xe0, 0x49, 0x3a, 0x88, 0xb0, 0x57, 0xbf, 0x60, 0x4f, 0xe1, 0xea,
+ 0xb7, 0x01, 0xbe, 0x43, 0x2d, 0xa7, 0x4f, 0x0f, 0x89, 0xc3, 0x9f, 0x58, 0x59, 0x4d, 0x48, 0x02,
+ 0x2f, 0xca, 0x16, 0x2f, 0xf5, 0xc5, 0x14, 0x84, 0x2f, 0x8d, 0xa2, 0xa9, 0xfe, 0x75, 0x12, 0xb2,
+ 0x98, 0x52, 0xbf, 0x5e, 0x45, 0x65, 0xc8, 0xca, 0x38, 0xc1, 0xcf, 0x9f, 0x5a, 0xfe, 0xfc, 0x6c,
+ 0x3d, 0x23, 0x02, 0x44, 0xc6, 0xe0, 0x91, 0x21, 0x12, 0xc1, 0x93, 0x97, 0x45, 0x70, 0xf4, 0x10,
+ 0x8a, 0x12, 0xa4, 0x1d, 0xe8, 0xde, 0x81, 0x28, 0xd0, 0x6a, 0x8b, 0xe7, 0x67, 0xeb, 0x20, 0x90,
+ 0x5b, 0xba, 0x77, 0x80, 0x41, 0xa0, 0xd9, 0x6f, 0xd4, 0x84, 0xc2, 0xa7, 0xd4, 0x72, 0x34, 0x9f,
+ 0x0f, 0x42, 0x5e, 0x26, 0xc6, 0xce, 0xe3, 0x74, 0xa8, 0xf2, 0x7b, 0x03, 0xf8, 0x74, 0x3a, 0xf8,
+ 0x26, 0x2c, 0xb8, 0x94, 0xfa, 0x22, 0x6c, 0x59, 0xd4, 0x91, 0xf7, 0x14, 0xe5, 0xd8, 0xeb, 0x6b,
+ 0x4a, 0x7d, 0x2c, 0x71, 0xb8, 0xe8, 0x46, 0x5a, 0xe8, 0x21, 0xac, 0xd8, 0xba, 0xe7, 0x6b, 0x3c,
+ 0xde, 0x99, 0x53, 0x6d, 0x59, 0xbe, 0xd5, 0x10, 0xe3, 0x6d, 0x70, 0x56, 0x20, 0xa1, 0xfe, 0x63,
+ 0x02, 0x0a, 0x6c, 0x30, 0xd6, 0xbe, 0x65, 0xb0, 0x24, 0xef, 0xf3, 0xe7, 0x1e, 0xb7, 0x20, 0x65,
+ 0x78, 0xae, 0x74, 0x2a, 0x3f, 0x7c, 0xeb, 0x3d, 0x8c, 0x19, 0x0d, 0x7d, 0x02, 0x59, 0x79, 0x5f,
+ 0x22, 0xd2, 0x0e, 0xf5, 0xfa, 0x74, 0x54, 0xfa, 0x46, 0xca, 0xf1, 0xb5, 0x3c, 0xb5, 0x4e, 0x1c,
+ 0x02, 0x38, 0x4a, 0x42, 0x37, 0x21, 0x69, 0x08, 0x77, 0xc9, 0x0f, 0x5a, 0xea, 0x6d, 0x9c, 0x34,
+ 0x1c, 0xf5, 0x47, 0x09, 0x58, 0x98, 0x6e, 0x78, 0xb6, 0x02, 0x6e, 0x43, 0xde, 0x1b, 0xef, 0x79,
+ 0x13, 0xcf, 0x27, 0xc3, 0xe0, 0xf9, 0x38, 0x24, 0xa0, 0x16, 0xe4, 0x75, 0x7b, 0x40, 0x5d, 0xcb,
+ 0x3f, 0x18, 0xca, 0x4a, 0x34, 0x3e, 0x55, 0x88, 0xea, 0xac, 0x54, 0x03, 0x11, 0x3c, 0x95, 0x0e,
+ 0xce, 0x7d, 0xf1, 0x8d, 0x01, 0x3f, 0xf7, 0xdf, 0x86, 0xa2, 0xad, 0x0f, 0xf9, 0x05, 0x92, 0x6f,
+ 0x0d, 0xc5, 0x38, 0xd2, 0xb8, 0x20, 0x69, 0x7d, 0x6b, 0x48, 0x54, 0x15, 0xf2, 0xa1, 0x32, 0xb4,
+ 0x04, 0x85, 0x6a, 0xb3, 0xa7, 0x3d, 0x7a, 0xfc, 0x54, 0xdb, 0xac, 0xef, 0x28, 0x73, 0x32, 0x37,
+ 0xfd, 0x8b, 0x04, 0x2c, 0xc8, 0x70, 0x24, 0xf3, 0xfd, 0x77, 0x60, 0xde, 0xd5, 0xf7, 0xfd, 0xa0,
+ 0x22, 0x49, 0x8b, 0x55, 0xcd, 0x22, 0x3c, 0xab, 0x48, 0x18, 0x2b, 0xbe, 0x22, 0x89, 0x7c, 0xd0,
+ 0x90, 0xba, 0xf2, 0x83, 0x86, 0xf4, 0xcf, 0xe4, 0x83, 0x06, 0xf5, 0x57, 0x01, 0x36, 0x2c, 0x9b,
+ 0xf4, 0xc5, 0x5d, 0x53, 0x5c, 0x7d, 0xc9, 0x72, 0x38, 0x79, 0x97, 0x19, 0xe4, 0x70, 0xad, 0x06,
+ 0x66, 0x34, 0xc6, 0x1a, 0x58, 0xa6, 0xdc, 0x8c, 0x9c, 0xb5, 0xc9, 0x58, 0x03, 0xcb, 0x0c, 0x5f,
+ 0xde, 0xd2, 0xd7, 0xbd, 0xbc, 0x9d, 0x26, 0x60, 0x49, 0xe6, 0xae, 0x61, 0xf8, 0xfd, 0x2a, 0xe4,
+ 0x45, 0x1a, 0x3b, 0x2d, 0xe8, 0xf8, 0x23, 0xbe, 0xc0, 0xb5, 0x1a, 0x38, 0x27, 0xd8, 0x2d, 0x13,
+ 0xad, 0x43, 0x41, 0x42, 0x23, 0x1f, 0x3f, 0x81, 0x20, 0xb5, 0x99, 0xf9, 0x5f, 0x87, 0xf4, 0xbe,
+ 0x65, 0x13, 0xb9, 0xd0, 0x63, 0x03, 0xc0, 0xd4, 0x01, 0x5b, 0x73, 0x98, 0xa3, 0x6b, 0xb9, 0xe0,
+ 0x32, 0x8e, 0xdb, 0x27, 0xcb, 0xce, 0xa8, 0x7d, 0xa2, 0x02, 0x9d, 0xb1, 0x4f, 0xe0, 0x98, 0x7d,
+ 0x82, 0x2d, 0xec, 0x93, 0xd0, 0xa8, 0x7d, 0x82, 0xf4, 0x33, 0xb1, 0x6f, 0x1b, 0x6e, 0xd6, 0x6c,
+ 0xdd, 0x38, 0xb4, 0x2d, 0xcf, 0x27, 0x66, 0x34, 0x62, 0x3c, 0x86, 0xec, 0x85, 0xa4, 0xf3, 0xaa,
+ 0x5b, 0x4b, 0x89, 0x54, 0xff, 0x2d, 0x01, 0xc5, 0x2d, 0xa2, 0xdb, 0xfe, 0xc1, 0xf4, 0x6a, 0xc8,
+ 0x27, 0x9e, 0x2f, 0x0f, 0x2b, 0xfe, 0x1b, 0x7d, 0x03, 0x72, 0x61, 0x4e, 0x72, 0xed, 0xfb, 0x5b,
+ 0x08, 0x45, 0x4f, 0x60, 0x9e, 0xed, 0x31, 0x3a, 0x0e, 0x8a, 0x9d, 0xab, 0x9e, 0x76, 0x24, 0x92,
+ 0x1d, 0x32, 0x2e, 0xe1, 0x49, 0x08, 0x5f, 0x4a, 0x19, 0x1c, 0x34, 0xd1, 0xff, 0x87, 0x22, 0x7f,
+ 0x99, 0x08, 0x72, 0xae, 0xcc, 0x75, 0x3a, 0x0b, 0xe2, 0x71, 0x51, 0xe4, 0x5b, 0xff, 0x93, 0x80,
+ 0x95, 0x1d, 0x7d, 0xb2, 0x47, 0x64, 0xd8, 0x20, 0x26, 0x26, 0x06, 0x75, 0x4d, 0xd4, 0x8d, 0x86,
+ 0x9b, 0x2b, 0xde, 0x2a, 0xe3, 0x84, 0xe3, 0xa3, 0x4e, 0x50, 0x80, 0x25, 0x23, 0x05, 0xd8, 0x0a,
+ 0x64, 0x1c, 0xea, 0x18, 0x44, 0xc6, 0x22, 0xd1, 0x50, 0xad, 0x68, 0xa8, 0x29, 0x85, 0xcf, 0x88,
+ 0xfc, 0x11, 0xb0, 0x4d, 0xfd, 0xb0, 0x37, 0xf4, 0x09, 0x94, 0x7a, 0xcd, 0x3a, 0x6e, 0xf6, 0x6b,
+ 0x9d, 0xef, 0x6a, 0xbd, 0xea, 0x76, 0xaf, 0xfa, 0xf8, 0xa1, 0xd6, 0xed, 0x6c, 0x7f, 0xef, 0xd1,
+ 0x93, 0x87, 0xdf, 0x50, 0x12, 0xa5, 0xf2, 0xc9, 0x69, 0xf9, 0x76, 0xbb, 0x5a, 0xdf, 0x16, 0x3b,
+ 0x66, 0x8f, 0xbe, 0xec, 0xe9, 0xb6, 0xa7, 0x3f, 0x7e, 0xd8, 0xa5, 0xf6, 0x84, 0x61, 0xd8, 0xb2,
+ 0x2e, 0x46, 0xcf, 0xab, 0xe8, 0x31, 0x9c, 0xb8, 0xf4, 0x18, 0x9e, 0x9e, 0xe6, 0xc9, 0x4b, 0x4e,
+ 0xf3, 0x0d, 0x58, 0x31, 0x5c, 0xea, 0x79, 0x1a, 0xcb, 0xfe, 0x89, 0x39, 0x53, 0x5f, 0x7c, 0xe9,
+ 0xfc, 0x6c, 0x7d, 0xb9, 0xce, 0xf8, 0x3d, 0xce, 0x96, 0xea, 0x97, 0x8d, 0x08, 0x89, 0xf7, 0xa4,
+ 0xfe, 0x7e, 0x8a, 0x25, 0x52, 0xd6, 0x91, 0x65, 0x93, 0x01, 0xf1, 0xd0, 0x73, 0x58, 0x32, 0x5c,
+ 0x62, 0xb2, 0xb4, 0x5e, 0xb7, 0xa3, 0x1f, 0xd1, 0xfe, 0xbf, 0xd8, 0x9c, 0x26, 0x14, 0xac, 0xd4,
+ 0x43, 0xa9, 0xde, 0x88, 0x18, 0x78, 0xd1, 0xb8, 0xd0, 0x46, 0x9f, 0xc2, 0x92, 0x47, 0x6c, 0xcb,
+ 0x19, 0xbf, 0xd4, 0x0c, 0xea, 0xf8, 0xe4, 0x65, 0xf0, 0x22, 0x76, 0x9d, 0xde, 0x5e, 0x73, 0x9b,
+ 0x49, 0xd5, 0x85, 0x50, 0x0d, 0x9d, 0x9f, 0xad, 0x2f, 0x5e, 0xa4, 0xe1, 0x45, 0xa9, 0x59, 0xb6,
+ 0x4b, 0x6d, 0x58, 0xbc, 0x68, 0x0d, 0x5a, 0x91, 0x7b, 0x9f, 0x87, 0x90, 0x60, 0x6f, 0xa3, 0xdb,
+ 0x90, 0x73, 0xc9, 0xc0, 0xf2, 0x7c, 0x57, 0xb8, 0x99, 0x71, 0x42, 0x0a, 0xdb, 0xf9, 0xe2, 0x0b,
+ 0xa8, 0xd2, 0x2f, 0xc3, 0x4c, 0x8f, 0x6c, 0xb3, 0x98, 0x96, 0xa7, 0xef, 0x49, 0x95, 0x39, 0x1c,
+ 0x34, 0xd9, 0x1a, 0x1c, 0x7b, 0x61, 0xa2, 0xc6, 0x7f, 0x33, 0x1a, 0xcf, 0x28, 0xe4, 0xf7, 0x60,
+ 0x3c, 0x67, 0x08, 0x3e, 0x2c, 0x4d, 0x47, 0x3e, 0x2c, 0x5d, 0x81, 0x8c, 0x4d, 0x8e, 0x88, 0x2d,
+ 0xce, 0x72, 0x2c, 0x1a, 0xf7, 0x1e, 0x42, 0x31, 0xf8, 0x82, 0x91, 0x7f, 0x39, 0x91, 0x83, 0x74,
+ 0xbf, 0xda, 0x7b, 0xa6, 0xcc, 0x21, 0x80, 0xac, 0x58, 0x9c, 0xe2, 0xb5, 0xae, 0xde, 0x69, 0x6f,
+ 0xb4, 0x36, 0x95, 0xe4, 0xbd, 0xdf, 0x49, 0x43, 0x3e, 0x7c, 0x2f, 0x62, 0x67, 0x47, 0xbb, 0xf9,
+ 0x22, 0x58, 0xdd, 0x21, 0xbd, 0x4d, 0x8e, 0xd1, 0xdb, 0xd3, 0x5b, 0xa8, 0x4f, 0xc4, 0x03, 0x79,
+ 0xc8, 0x0e, 0x6e, 0xa0, 0xde, 0x85, 0x5c, 0xb5, 0xd7, 0x6b, 0x6d, 0xb6, 0x9b, 0x0d, 0xe5, 0xb3,
+ 0x44, 0xe9, 0x4b, 0x27, 0xa7, 0xe5, 0xe5, 0x10, 0x54, 0xf5, 0xc4, 0xe2, 0xe3, 0xa8, 0x7a, 0xbd,
+ 0xd9, 0xed, 0x37, 0x1b, 0xca, 0xab, 0xe4, 0x2c, 0x8a, 0xdf, 0xaa, 0xf0, 0x4f, 0x77, 0xf2, 0x5d,
+ 0xdc, 0xec, 0x56, 0x31, 0xeb, 0xf0, 0xb3, 0xa4, 0xb8, 0x1c, 0x9b, 0xf6, 0xe8, 0x92, 0x91, 0xee,
+ 0xb2, 0x3e, 0xd7, 0x82, 0x6f, 0xe1, 0x5e, 0xa5, 0xc4, 0xe7, 0x1d, 0xd3, 0xc7, 0x2f, 0xa2, 0x9b,
+ 0x13, 0xd6, 0x1b, 0x7f, 0x75, 0xe4, 0x6a, 0x52, 0x33, 0xbd, 0xf5, 0x58, 0xec, 0x61, 0x5a, 0x54,
+ 0x98, 0xc7, 0xbb, 0xed, 0x36, 0x03, 0xbd, 0x4a, 0xcf, 0x8c, 0x0e, 0x8f, 0x1d, 0x56, 0x31, 0xa3,
+ 0xbb, 0x90, 0x0b, 0x1e, 0x25, 0x95, 0xcf, 0xd2, 0x33, 0x06, 0xd5, 0x83, 0x17, 0x55, 0xde, 0xe1,
+ 0xd6, 0x6e, 0x9f, 0x7f, 0xaa, 0xf7, 0x2a, 0x33, 0xdb, 0xe1, 0xc1, 0xd8, 0x37, 0xe9, 0xb1, 0xc3,
+ 0xf6, 0xac, 0xbc, 0x87, 0xfb, 0x2c, 0x23, 0x2e, 0x2d, 0x42, 0x8c, 0xbc, 0x84, 0x7b, 0x17, 0x72,
+ 0xb8, 0xf9, 0x1d, 0xf1, 0x55, 0xdf, 0xab, 0xec, 0x8c, 0x1e, 0x4c, 0x3e, 0x25, 0x06, 0xeb, 0xad,
+ 0x0c, 0x59, 0xdc, 0xdc, 0xe9, 0x3c, 0x6f, 0x2a, 0x7f, 0x90, 0x9d, 0xd1, 0x83, 0xc9, 0x90, 0xf2,
+ 0x6f, 0x9b, 0x72, 0x1d, 0xdc, 0xdd, 0xaa, 0xf2, 0x49, 0x99, 0xd5, 0xd3, 0x71, 0x47, 0x07, 0xba,
+ 0x43, 0xcc, 0xe9, 0x57, 0x30, 0x21, 0xeb, 0xde, 0x2f, 0x40, 0x2e, 0xc8, 0x5d, 0xd1, 0x1a, 0x64,
+ 0x5f, 0x74, 0xf0, 0xb3, 0x26, 0x56, 0xe6, 0x84, 0x97, 0x03, 0xce, 0x0b, 0x51, 0x75, 0x94, 0x61,
+ 0x7e, 0xa7, 0xda, 0xae, 0x6e, 0x36, 0x71, 0x70, 0x89, 0x1e, 0x00, 0x64, 0x02, 0x56, 0x52, 0x64,
+ 0x07, 0xa1, 0xce, 0xda, 0xea, 0x0f, 0x7f, 0xb2, 0x36, 0xf7, 0xe3, 0x9f, 0xac, 0xcd, 0xbd, 0x3a,
+ 0x5f, 0x4b, 0xfc, 0xf0, 0x7c, 0x2d, 0xf1, 0x77, 0xe7, 0x6b, 0x89, 0x7f, 0x3d, 0x5f, 0x4b, 0xec,
+ 0x65, 0xf9, 0x31, 0xf1, 0xe4, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x08, 0x6c, 0xc9, 0x88, 0x12,
+ 0x32, 0x00, 0x00,
}
diff --git a/vendor/github.com/docker/swarmkit/api/types.proto b/vendor/github.com/docker/swarmkit/api/types.proto
index 635d12b..3b60c54 100644
--- a/vendor/github.com/docker/swarmkit/api/types.proto
+++ b/vendor/github.com/docker/swarmkit/api/types.proto
@@ -458,7 +458,16 @@
SHUTDOWN = 640 [(gogoproto.enumvalue_customname)="TaskStateShutdown"]; // orchestrator requested shutdown
FAILED = 704 [(gogoproto.enumvalue_customname)="TaskStateFailed"]; // task execution failed with error
REJECTED = 768 [(gogoproto.enumvalue_customname)="TaskStateRejected"]; // task could not be executed here.
- ORPHANED = 832 [(gogoproto.enumvalue_customname)="TaskStateOrphaned"]; // The node on which this task is scheduled is Down for too long
+ // TaskStateRemove is used to correctly handle service deletions and scale
+ // downs. This allows us to keep track of tasks that have been marked for
+ // deletion, but can't yet be removed because the agent is in the process of
+ // shutting them down. Once the agent has shut down tasks with desired state
+ // REMOVE, the task reaper is responsible for removing them.
+ REMOVE = 800 [(gogoproto.enumvalue_customname)="TaskStateRemove"];
+ // TaskStateOrphaned is used to free up resources associated with service
+ // tasks on unresponsive nodes without having to delete those tasks. This
+ // state is directly assigned to the task by the orchestrator.
+ ORPHANED = 832 [(gogoproto.enumvalue_customname)="TaskStateOrphaned"];
// NOTE(stevvooe): The state of a task is actually a lamport clock, in that
// given two observations, the greater of the two can be considered
diff --git a/vendor/github.com/docker/swarmkit/api/watch.pb.go b/vendor/github.com/docker/swarmkit/api/watch.pb.go
index 58fa891..78f0014 100644
--- a/vendor/github.com/docker/swarmkit/api/watch.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/watch.pb.go
@@ -19,6 +19,7 @@
import raftselector "github.com/docker/swarmkit/manager/raftselector"
import codes "google.golang.org/grpc/codes"
+import status "google.golang.org/grpc/status"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import rafttime "time"
@@ -2043,12 +2044,12 @@
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
- return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
+ return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
- return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
+ return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
diff --git a/vendor/github.com/docker/swarmkit/ca/auth.go b/vendor/github.com/docker/swarmkit/ca/auth.go
index 488d34d..df4547f 100644
--- a/vendor/github.com/docker/swarmkit/ca/auth.go
+++ b/vendor/github.com/docker/swarmkit/ca/auth.go
@@ -10,10 +10,10 @@
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/status"
)
type localRequestKeyType struct{}
@@ -52,13 +52,13 @@
// getCertificateSubject extracts the subject from a verified client certificate
func getCertificateSubject(tlsState *tls.ConnectionState) (pkix.Name, error) {
if tlsState == nil {
- return pkix.Name{}, grpc.Errorf(codes.PermissionDenied, "request is not using TLS")
+ return pkix.Name{}, status.Errorf(codes.PermissionDenied, "request is not using TLS")
}
if len(tlsState.PeerCertificates) == 0 {
- return pkix.Name{}, grpc.Errorf(codes.PermissionDenied, "no client certificates in request")
+ return pkix.Name{}, status.Errorf(codes.PermissionDenied, "no client certificates in request")
}
if len(tlsState.VerifiedChains) == 0 {
- return pkix.Name{}, grpc.Errorf(codes.PermissionDenied, "no verified chains for remote certificate")
+ return pkix.Name{}, status.Errorf(codes.PermissionDenied, "no verified chains for remote certificate")
}
return tlsState.VerifiedChains[0][0].Subject, nil
@@ -67,11 +67,11 @@
func tlsConnStateFromContext(ctx context.Context) (*tls.ConnectionState, error) {
peer, ok := peer.FromContext(ctx)
if !ok {
- return nil, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info")
+ return nil, status.Errorf(codes.PermissionDenied, "Permission denied: no peer info")
}
tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo)
if !ok {
- return nil, grpc.Errorf(codes.PermissionDenied, "Permission denied: peer didn't not present valid peer certificate")
+ return nil, status.Errorf(codes.PermissionDenied, "Permission denied: peer didn't not present valid peer certificate")
}
return &tlsInfo.State, nil
}
@@ -98,21 +98,21 @@
return authorizeOrg(certSubj, org, blacklistedCerts)
}
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of OUs: %v", ou)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of OUs: %v", ou)
}
// authorizeOrg takes in a certificate subject and an organization, and returns
// the Node ID of the node.
func authorizeOrg(certSubj pkix.Name, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) {
if _, ok := blacklistedCerts[certSubj.CommonName]; ok {
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: node %s was removed from swarm", certSubj.CommonName)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: node %s was removed from swarm", certSubj.CommonName)
}
if len(certSubj.Organization) > 0 && certSubj.Organization[0] == org {
return certSubj.CommonName, nil
}
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of organization: %s", org)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of organization: %s", org)
}
// AuthorizeForwardedRoleAndOrg checks for proper roles and organization of caller. The RPC may have
@@ -123,7 +123,7 @@
if isForwardedRequest(ctx) {
_, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, forwarderRoles...)
if err != nil {
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarder role: %v", err)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarder role: %v", err)
}
// This was a forwarded request. Authorize the forwarder, and
@@ -132,15 +132,15 @@
_, forwardedID, forwardedOrg, forwardedOUs := forwardedTLSInfoFromContext(ctx)
if len(forwardedOUs) == 0 || forwardedID == "" || forwardedOrg == "" {
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
}
if !intersectArrays(forwardedOUs, authorizedRoles) {
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarded role, expecting: %v", authorizedRoles)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarded role, expecting: %v", authorizedRoles)
}
if forwardedOrg != org {
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: organization mismatch, expecting: %s", org)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: organization mismatch, expecting: %s", org)
}
return forwardedID, nil
@@ -152,7 +152,7 @@
return nodeID, nil
}
- return "", grpc.Errorf(codes.PermissionDenied, "Permission denied: unauthorized peer role: %v", err)
+ return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized peer role: %v", err)
}
// intersectArrays returns true when there is at least one element in common
@@ -219,7 +219,7 @@
peer, ok := peer.FromContext(ctx)
if !ok {
- return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: no peer info")
+ return RemoteNodeInfo{}, status.Errorf(codes.PermissionDenied, "Permission denied: no peer info")
}
directInfo := RemoteNodeInfo{
@@ -232,7 +232,7 @@
if isForwardedRequest(ctx) {
remoteAddr, cn, org, ous := forwardedTLSInfoFromContext(ctx)
if len(ous) == 0 || cn == "" || org == "" {
- return RemoteNodeInfo{}, grpc.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
+ return RemoteNodeInfo{}, status.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request")
}
return RemoteNodeInfo{
Roles: ous,
diff --git a/vendor/github.com/docker/swarmkit/ca/server.go b/vendor/github.com/docker/swarmkit/ca/server.go
index 16dbedc..a456df7 100644
--- a/vendor/github.com/docker/swarmkit/ca/server.go
+++ b/vendor/github.com/docker/swarmkit/ca/server.go
@@ -16,8 +16,8 @@
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
const (
@@ -149,7 +149,7 @@
// NodeCertificateStatus returns the current issuance status of an issuance request identified by the nodeID
func (s *Server) NodeCertificateStatus(ctx context.Context, request *api.NodeCertificateStatusRequest) (*api.NodeCertificateStatusResponse, error) {
if request.NodeID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
+ return nil, status.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
}
serverCtx, err := s.isRunningLocked()
@@ -180,7 +180,7 @@
// This node ID doesn't exist
if node == nil {
- return nil, grpc.Errorf(codes.NotFound, codes.NotFound.String())
+ return nil, status.Errorf(codes.NotFound, codes.NotFound.String())
}
log.G(ctx).WithFields(logrus.Fields{
@@ -236,7 +236,7 @@
func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNodeCertificateRequest) (*api.IssueNodeCertificateResponse, error) {
// First, let's see if the remote node is presenting a non-empty CSR
if len(request.CSR) == 0 {
- return nil, grpc.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
+ return nil, status.Errorf(codes.InvalidArgument, codes.InvalidArgument.String())
}
if err := s.isReadyLocked(); err != nil {
@@ -295,7 +295,7 @@
s.mu.Unlock()
if role < 0 {
- return nil, grpc.Errorf(codes.InvalidArgument, "A valid join token is necessary to join this cluster")
+ return nil, status.Errorf(codes.InvalidArgument, "A valid join token is necessary to join this cluster")
}
// Max number of collisions of ID or CN to tolerate before giving up
@@ -369,7 +369,7 @@
"method": "issueRenewCertificate",
}).Warnf("node does not exist")
// If this node doesn't exist, we shouldn't be renewing a certificate for it
- return grpc.Errorf(codes.NotFound, "node %s not found when attempting to renew certificate", nodeID)
+ return status.Errorf(codes.NotFound, "node %s not found when attempting to renew certificate", nodeID)
}
// Create a new Certificate entry for this node with the new CSR and a RENEW state
@@ -594,7 +594,7 @@
s.mu.Lock()
if !s.isRunning() {
s.mu.Unlock()
- return nil, grpc.Errorf(codes.Aborted, "CA signer is stopped")
+ return nil, status.Errorf(codes.Aborted, "CA signer is stopped")
}
ctx := s.ctx
s.mu.Unlock()
@@ -605,10 +605,10 @@
s.mu.Lock()
defer s.mu.Unlock()
if !s.isRunning() {
- return grpc.Errorf(codes.Aborted, "CA signer is stopped")
+ return status.Errorf(codes.Aborted, "CA signer is stopped")
}
if s.joinTokens == nil {
- return grpc.Errorf(codes.Aborted, "CA signer is still starting")
+ return status.Errorf(codes.Aborted, "CA signer is still starting")
}
return nil
}
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
index 53f9ffb..b89e72e 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
@@ -404,6 +404,11 @@
vipLoop:
for _, vip := range s.Endpoint.VirtualIPs {
if na.IsVIPOnIngressNetwork(vip) && networkallocator.IsIngressNetworkNeeded(s) {
+ // This checks the condition when ingress network is needed
+ // but allocation has not been done.
+ if _, ok := na.services[s.ID]; !ok {
+ return false
+ }
continue vipLoop
}
for _, net := range specNetworks {
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
index 19dcbec..7f3f1c1 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
@@ -324,9 +324,18 @@
}
portStates := allocatedPorts{}
+ hostTargetPorts := map[uint32]struct{}{}
for _, portState := range s.Endpoint.Ports {
- if portState.PublishMode == api.PublishModeIngress {
+ switch portState.PublishMode {
+ case api.PublishModeIngress:
portStates.addState(portState)
+ case api.PublishModeHost:
+ // build a map of host mode ports we've seen. if in the spec we get
+ // a host port that's not in the service, then we need to do
+ // allocation. if we get the same target port but something else
+ // has changed, then HostPublishPortsNeedUpdate will cover that
+ // case. see docker/swarmkit#2376
+ hostTargetPorts[portState.TargetPort] = struct{}{}
}
}
@@ -344,18 +353,28 @@
// Iterate portConfigs with PublishedPort == 0 (low priority)
for _, portConfig := range s.Spec.Endpoint.Ports {
// Ignore ports which are not PublishModeIngress
- if portConfig.PublishMode != api.PublishModeIngress {
- continue
- }
- if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
- return false
- }
+ switch portConfig.PublishMode {
+ case api.PublishModeIngress:
+ if portConfig.PublishedPort == 0 && portStates.delState(portConfig) == nil {
+ return false
+ }
- // If SwarmPort was not defined by user and the func
- // is called during allocator initialization state then
- // we are not allocated.
- if portConfig.PublishedPort == 0 && onInit {
- return false
+ // If SwarmPort was not defined by user and the func
+ // is called during allocator initialization state then
+ // we are not allocated.
+ if portConfig.PublishedPort == 0 && onInit {
+ return false
+ }
+ case api.PublishModeHost:
+ // check if the target port is already in the port config. if it
+ // isn't, then it's our problem.
+ if _, ok := hostTargetPorts[portConfig.TargetPort]; !ok {
+ return false
+ }
+ // NOTE(dperny) there could be a further case where we check if
+ // there are host ports in the config that aren't in the spec, but
+ // that's only possible if there's a mismatch in the number of
+ // ports, which is handled by a length check earlier in the code
}
}
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go b/vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go
index 5e8fa43..d39c7d2 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/ca_rotation.go
@@ -10,13 +10,12 @@
"net/url"
"time"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
-
"github.com/cloudflare/cfssl/helpers"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/ca"
"github.com/docker/swarmkit/log"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
var minRootExpiration = 1 * helpers.OneYear
@@ -60,7 +59,7 @@
crossSignedCert, err = oldRootCA.CrossSignCACertificate(rootCert)
}
case !newRootHasSigner: // the original CA and the new CA both require external CAs
- return nil, grpc.Errorf(codes.InvalidArgument, "rotating from one external CA to a different external CA is not supported")
+ return nil, status.Errorf(codes.InvalidArgument, "rotating from one external CA to a different external CA is not supported")
default:
// We need the same credentials but to connect to the original URLs (in case we are in the middle of a root rotation already)
var urls []string
@@ -70,7 +69,7 @@
}
}
if len(urls) == 0 {
- return nil, grpc.Errorf(codes.InvalidArgument,
+ return nil, status.Errorf(codes.InvalidArgument,
"must provide an external CA for the current external root CA to generate a cross-signed certificate")
}
rootPool := x509.NewCertPool()
@@ -83,7 +82,7 @@
if err != nil {
log.G(ctx).WithError(err).Error("unable to generate a cross-signed certificate for root rotation")
- return nil, grpc.Errorf(codes.Internal, "unable to generate a cross-signed certificate for root rotation")
+ return nil, status.Errorf(codes.Internal, "unable to generate a cross-signed certificate for root rotation")
}
copied := apiRootCA.Copy()
@@ -146,7 +145,7 @@
}
}
}
- return nil, grpc.Errorf(codes.InvalidArgument, "there must be at least one valid, reachable external CA corresponding to the %s CA certificate", desc)
+ return nil, status.Errorf(codes.InvalidArgument, "there must be at least one valid, reachable external CA corresponding to the %s CA certificate", desc)
}
// validates that the list of external CAs have valid certs associated with them, and produce a mapping of subject/pubkey:external
@@ -193,7 +192,7 @@
newConfig.SigningCACert = ca.NormalizePEMs(newConfig.SigningCACert) // ensure this is normalized before we use it
if len(newConfig.SigningCAKey) > 0 && len(newConfig.SigningCACert) == 0 {
- return nil, grpc.Errorf(codes.InvalidArgument, "if a signing CA key is provided, the signing CA cert must also be provided")
+ return nil, status.Errorf(codes.InvalidArgument, "if a signing CA key is provided, the signing CA cert must also be provided")
}
normalizedRootCA := ca.NormalizePEMs(cluster.RootCA.CACert)
@@ -216,7 +215,7 @@
if cluster.RootCA.LastForcedRotation != newConfig.ForceRotate {
newRootCA, err := ca.CreateRootCA(ca.DefaultRootCN)
if err != nil {
- return nil, grpc.Errorf(codes.Internal, err.Error())
+ return nil, status.Errorf(codes.Internal, err.Error())
}
return newRootRotationObject(ctx, securityConfig, &cluster.RootCA, newRootCA, oldCertExtCAs, newConfig.ForceRotate)
}
@@ -240,21 +239,21 @@
}
newRootCA, err := ca.NewRootCA(newConfig.SigningCACert, signingCert, newConfig.SigningCAKey, ca.DefaultNodeCertExpiration, nil)
if err != nil {
- return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
+ return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
if len(newRootCA.Pool.Subjects()) != 1 {
- return nil, grpc.Errorf(codes.InvalidArgument, "the desired CA certificate cannot contain multiple certificates")
+ return nil, status.Errorf(codes.InvalidArgument, "the desired CA certificate cannot contain multiple certificates")
}
parsedCert, err := helpers.ParseCertificatePEM(newConfig.SigningCACert)
if err != nil {
- return nil, grpc.Errorf(codes.InvalidArgument, "could not parse the desired CA certificate")
+ return nil, status.Errorf(codes.InvalidArgument, "could not parse the desired CA certificate")
}
// The new certificate's expiry must be at least one year away
if parsedCert.NotAfter.Before(time.Now().Add(minRootExpiration)) {
- return nil, grpc.Errorf(codes.InvalidArgument, "CA certificate expires too soon")
+ return nil, status.Errorf(codes.InvalidArgument, "CA certificate expires too soon")
}
if !hasSigningKey(newConfig) {
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go b/vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
index 329313a..0876113 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/cluster.go
@@ -11,8 +11,8 @@
"github.com/docker/swarmkit/manager/state/store"
gogotypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
const (
@@ -23,17 +23,17 @@
func validateClusterSpec(spec *api.ClusterSpec) error {
if spec == nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
// Validate that expiry time being provided is valid, and over our minimum
if spec.CAConfig.NodeCertExpiry != nil {
expiry, err := gogotypes.DurationFromProto(spec.CAConfig.NodeCertExpiry)
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if expiry < ca.MinNodeCertExpiration {
- return grpc.Errorf(codes.InvalidArgument, "minimum certificate expiry time is: %s", ca.MinNodeCertExpiration)
+ return status.Errorf(codes.InvalidArgument, "minimum certificate expiry time is: %s", ca.MinNodeCertExpiration)
}
}
@@ -42,7 +42,7 @@
if len(spec.AcceptancePolicy.Policies) > 0 {
for _, policy := range spec.AcceptancePolicy.Policies {
if policy.Secret != nil && strings.ToLower(policy.Secret.Alg) != "bcrypt" {
- return grpc.Errorf(codes.InvalidArgument, "hashing algorithm is not supported: %s", policy.Secret.Alg)
+ return status.Errorf(codes.InvalidArgument, "hashing algorithm is not supported: %s", policy.Secret.Alg)
}
}
}
@@ -51,13 +51,17 @@
if spec.Dispatcher.HeartbeatPeriod != nil {
heartbeatPeriod, err := gogotypes.DurationFromProto(spec.Dispatcher.HeartbeatPeriod)
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if heartbeatPeriod < 0 {
- return grpc.Errorf(codes.InvalidArgument, "heartbeat time period cannot be a negative duration")
+ return status.Errorf(codes.InvalidArgument, "heartbeat time period cannot be a negative duration")
}
}
+ if spec.Annotations.Name != store.DefaultClusterName {
+ return status.Errorf(codes.InvalidArgument, "modification of cluster name is not allowed")
+ }
+
return nil
}
@@ -66,7 +70,7 @@
// - Returns `NotFound` if the Cluster is not found.
func (s *Server) GetCluster(ctx context.Context, request *api.GetClusterRequest) (*api.GetClusterResponse, error) {
if request.ClusterID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var cluster *api.Cluster
@@ -74,7 +78,7 @@
cluster = store.GetCluster(tx, request.ClusterID)
})
if cluster == nil {
- return nil, grpc.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
+ return nil, status.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
}
redactedClusters := redactClusters([]*api.Cluster{cluster})
@@ -92,7 +96,7 @@
// - Returns an error if the update fails.
func (s *Server) UpdateCluster(ctx context.Context, request *api.UpdateClusterRequest) (*api.UpdateClusterResponse, error) {
if request.ClusterID == "" || request.ClusterVersion == nil {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateClusterSpec(request.Spec); err != nil {
return nil, err
@@ -102,7 +106,7 @@
err := s.store.Update(func(tx store.Tx) error {
cluster = store.GetCluster(tx, request.ClusterID)
if cluster == nil {
- return grpc.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
+ return status.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID)
}
// This ensures that we have the current rootCA with which to generate tokens (expiration doesn't matter
// for generating the tokens)
@@ -110,7 +114,7 @@
if err != nil {
log.G(ctx).WithField(
"method", "(*controlapi.Server).UpdateCluster").WithError(err).Error("invalid cluster root CA")
- return grpc.Errorf(codes.Internal, "error loading cluster rootCA for update")
+ return status.Errorf(codes.Internal, "error loading cluster rootCA for update")
}
cluster.Meta.Version = *request.ClusterVersion
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/common.go b/vendor/github.com/docker/swarmkit/manager/controlapi/common.go
index c016623..9e52179 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/common.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/common.go
@@ -10,8 +10,8 @@
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/allocator"
"github.com/docker/swarmkit/manager/state/store"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
var isValidDNSName = regexp.MustCompile(`^[a-zA-Z0-9](?:[-_]*[A-Za-z0-9]+)*$`)
@@ -70,25 +70,25 @@
func validateAnnotations(m api.Annotations) error {
if m.Name == "" {
- return grpc.Errorf(codes.InvalidArgument, "meta: name must be provided")
+ return status.Errorf(codes.InvalidArgument, "meta: name must be provided")
}
if !isValidDNSName.MatchString(m.Name) {
// if the name doesn't match the regex
- return grpc.Errorf(codes.InvalidArgument, "name must be valid as a DNS name component")
+ return status.Errorf(codes.InvalidArgument, "name must be valid as a DNS name component")
}
if len(m.Name) > 63 {
// DNS labels are limited to 63 characters
- return grpc.Errorf(codes.InvalidArgument, "name must be 63 characters or fewer")
+ return status.Errorf(codes.InvalidArgument, "name must be 63 characters or fewer")
}
return nil
}
func validateConfigOrSecretAnnotations(m api.Annotations) error {
if m.Name == "" {
- return grpc.Errorf(codes.InvalidArgument, "name must be provided")
+ return status.Errorf(codes.InvalidArgument, "name must be provided")
} else if len(m.Name) > 64 || !isValidConfigOrSecretName.MatchString(m.Name) {
// if the name doesn't match the regex
- return grpc.Errorf(codes.InvalidArgument,
+ return status.Errorf(codes.InvalidArgument,
"invalid name, only 64 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]")
}
return nil
@@ -102,7 +102,7 @@
}
if driver.Name == "" {
- return grpc.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
+ return status.Errorf(codes.InvalidArgument, "driver name: if driver is specified name is required")
}
// First check against the known drivers
@@ -119,16 +119,16 @@
}
if pg == nil {
- return grpc.Errorf(codes.InvalidArgument, "plugin %s not supported", driver.Name)
+ return status.Errorf(codes.InvalidArgument, "plugin %s not supported", driver.Name)
}
p, err := pg.Get(driver.Name, pluginType, plugingetter.Lookup)
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, "error during lookup of plugin %s", driver.Name)
+ return status.Errorf(codes.InvalidArgument, "error during lookup of plugin %s", driver.Name)
}
if p.IsV1() {
- return grpc.Errorf(codes.InvalidArgument, "legacy plugin %s of type %s is not supported in swarm mode", driver.Name, pluginType)
+ return status.Errorf(codes.InvalidArgument, "legacy plugin %s of type %s is not supported in swarm mode", driver.Name, pluginType)
}
return nil
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/config.go b/vendor/github.com/docker/swarmkit/manager/controlapi/config.go
index d0fe8a5..ae08885 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/config.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/config.go
@@ -10,8 +10,8 @@
"github.com/docker/swarmkit/manager/state/store"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// MaxConfigSize is the maximum byte length of the `Config.Spec.Data` field.
@@ -32,7 +32,7 @@
// - Returns an error if getting fails.
func (s *Server) GetConfig(ctx context.Context, request *api.GetConfigRequest) (*api.GetConfigResponse, error) {
if request.ConfigID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, "config ID must be provided")
+ return nil, status.Errorf(codes.InvalidArgument, "config ID must be provided")
}
var config *api.Config
@@ -41,7 +41,7 @@
})
if config == nil {
- return nil, grpc.Errorf(codes.NotFound, "config %s not found", request.ConfigID)
+ return nil, status.Errorf(codes.NotFound, "config %s not found", request.ConfigID)
}
return &api.GetConfigResponse{Config: config}, nil
@@ -53,21 +53,21 @@
// - Returns an error if the update fails.
func (s *Server) UpdateConfig(ctx context.Context, request *api.UpdateConfigRequest) (*api.UpdateConfigResponse, error) {
if request.ConfigID == "" || request.ConfigVersion == nil {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var config *api.Config
err := s.store.Update(func(tx store.Tx) error {
config = store.GetConfig(tx, request.ConfigID)
if config == nil {
- return grpc.Errorf(codes.NotFound, "config %s not found", request.ConfigID)
+ return status.Errorf(codes.NotFound, "config %s not found", request.ConfigID)
}
// Check if the Name is different than the current name, or the config is non-nil and different
// than the current config
if config.Spec.Annotations.Name != request.Spec.Annotations.Name ||
(request.Spec.Data != nil && !bytes.Equal(request.Spec.Data, config.Spec.Data)) {
- return grpc.Errorf(codes.InvalidArgument, "only updates to Labels are allowed")
+ return status.Errorf(codes.InvalidArgument, "only updates to Labels are allowed")
}
// We only allow updating Labels
@@ -164,7 +164,7 @@
switch err {
case store.ErrNameConflict:
- return nil, grpc.Errorf(codes.AlreadyExists, "config %s already exists", request.Spec.Annotations.Name)
+ return nil, status.Errorf(codes.AlreadyExists, "config %s already exists", request.Spec.Annotations.Name)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"config.Name": request.Spec.Annotations.Name,
@@ -184,20 +184,20 @@
// - Returns an error if the deletion fails.
func (s *Server) RemoveConfig(ctx context.Context, request *api.RemoveConfigRequest) (*api.RemoveConfigResponse, error) {
if request.ConfigID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, "config ID must be provided")
+ return nil, status.Errorf(codes.InvalidArgument, "config ID must be provided")
}
err := s.store.Update(func(tx store.Tx) error {
// Check if the config exists
config := store.GetConfig(tx, request.ConfigID)
if config == nil {
- return grpc.Errorf(codes.NotFound, "could not find config %s", request.ConfigID)
+ return status.Errorf(codes.NotFound, "could not find config %s", request.ConfigID)
}
// Check if any services currently reference this config, return error if so
services, err := store.FindServices(tx, store.ByReferencedConfigID(request.ConfigID))
if err != nil {
- return grpc.Errorf(codes.Internal, "could not find services using config %s: %v", request.ConfigID, err)
+ return status.Errorf(codes.Internal, "could not find services using config %s: %v", request.ConfigID, err)
}
if len(services) != 0 {
@@ -213,14 +213,14 @@
serviceStr = "service"
}
- return grpc.Errorf(codes.InvalidArgument, "config '%s' is in use by the following %s: %v", configName, serviceStr, serviceNameStr)
+ return status.Errorf(codes.InvalidArgument, "config '%s' is in use by the following %s: %v", configName, serviceStr, serviceNameStr)
}
return store.DeleteConfig(tx, request.ConfigID)
})
switch err {
case store.ErrNotExist:
- return nil, grpc.Errorf(codes.NotFound, "config %s not found", request.ConfigID)
+ return nil, status.Errorf(codes.NotFound, "config %s not found", request.ConfigID)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"config.ID": request.ConfigID,
@@ -235,14 +235,14 @@
func validateConfigSpec(spec *api.ConfigSpec) error {
if spec == nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateConfigOrSecretAnnotations(spec.Annotations); err != nil {
return err
}
if len(spec.Data) >= MaxConfigSize || len(spec.Data) < 1 {
- return grpc.Errorf(codes.InvalidArgument, "config data must be larger than 0 and less than %d bytes", MaxConfigSize)
+ return status.Errorf(codes.InvalidArgument, "config data must be larger than 0 and less than %d bytes", MaxConfigSize)
}
return nil
}
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/network.go b/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
index b150de0..481b0cf 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
@@ -12,39 +12,39 @@
"github.com/docker/swarmkit/manager/allocator/networkallocator"
"github.com/docker/swarmkit/manager/state/store"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
func validateIPAMConfiguration(ipamConf *api.IPAMConfig) error {
if ipamConf == nil {
- return grpc.Errorf(codes.InvalidArgument, "ipam configuration: cannot be empty")
+ return status.Errorf(codes.InvalidArgument, "ipam configuration: cannot be empty")
}
_, subnet, err := net.ParseCIDR(ipamConf.Subnet)
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, "ipam configuration: invalid subnet %s", ipamConf.Subnet)
+ return status.Errorf(codes.InvalidArgument, "ipam configuration: invalid subnet %s", ipamConf.Subnet)
}
if ipamConf.Range != "" {
ip, _, err := net.ParseCIDR(ipamConf.Range)
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, "ipam configuration: invalid range %s", ipamConf.Range)
+ return status.Errorf(codes.InvalidArgument, "ipam configuration: invalid range %s", ipamConf.Range)
}
if !subnet.Contains(ip) {
- return grpc.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain range %s", ipamConf.Subnet, ipamConf.Range)
+ return status.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain range %s", ipamConf.Subnet, ipamConf.Range)
}
}
if ipamConf.Gateway != "" {
ip := net.ParseIP(ipamConf.Gateway)
if ip == nil {
- return grpc.Errorf(codes.InvalidArgument, "ipam configuration: invalid gateway %s", ipamConf.Gateway)
+ return status.Errorf(codes.InvalidArgument, "ipam configuration: invalid gateway %s", ipamConf.Gateway)
}
if !subnet.Contains(ip) {
- return grpc.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain gateway %s", ipamConf.Subnet, ipamConf.Gateway)
+ return status.Errorf(codes.InvalidArgument, "ipam configuration: subnet %s does not contain gateway %s", ipamConf.Subnet, ipamConf.Gateway)
}
}
@@ -73,15 +73,15 @@
func validateNetworkSpec(spec *api.NetworkSpec, pg plugingetter.PluginGetter) error {
if spec == nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if spec.Ingress && spec.DriverConfig != nil && spec.DriverConfig.Name != "overlay" {
- return grpc.Errorf(codes.Unimplemented, "only overlay driver is currently supported for ingress network")
+ return status.Errorf(codes.Unimplemented, "only overlay driver is currently supported for ingress network")
}
if spec.Attachable && spec.Ingress {
- return grpc.Errorf(codes.InvalidArgument, "ingress network cannot be attachable")
+ return status.Errorf(codes.InvalidArgument, "ingress network cannot be attachable")
}
if err := validateAnnotations(spec.Annotations); err != nil {
@@ -89,7 +89,7 @@
}
if _, ok := spec.Annotations.Labels[networkallocator.PredefinedLabel]; ok {
- return grpc.Errorf(codes.PermissionDenied, "label %s is for internally created predefined networks and cannot be applied by users",
+ return status.Errorf(codes.PermissionDenied, "label %s is for internally created predefined networks and cannot be applied by users",
networkallocator.PredefinedLabel)
}
if err := validateDriver(spec.DriverConfig, pg, driverapi.NetworkPluginEndpointType); err != nil {
@@ -117,9 +117,9 @@
err := s.store.Update(func(tx store.Tx) error {
if request.Spec.Ingress {
if n, err := allocator.GetIngressNetwork(s.store); err == nil {
- return grpc.Errorf(codes.AlreadyExists, "ingress network (%s) is already present", n.ID)
+ return status.Errorf(codes.AlreadyExists, "ingress network (%s) is already present", n.ID)
} else if err != allocator.ErrNoIngress {
- return grpc.Errorf(codes.Internal, "failed ingress network presence check: %v", err)
+ return status.Errorf(codes.Internal, "failed ingress network presence check: %v", err)
}
}
return store.CreateNetwork(tx, n)
@@ -138,7 +138,7 @@
// - Returns `NotFound` if the Network is not found.
func (s *Server) GetNetwork(ctx context.Context, request *api.GetNetworkRequest) (*api.GetNetworkResponse, error) {
if request.NetworkID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var n *api.Network
@@ -146,7 +146,7 @@
n = store.GetNetwork(tx, request.NetworkID)
})
if n == nil {
- return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
+ return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
}
return &api.GetNetworkResponse{
Network: n,
@@ -159,7 +159,7 @@
// - Returns an error if the deletion fails.
func (s *Server) RemoveNetwork(ctx context.Context, request *api.RemoveNetworkRequest) (*api.RemoveNetworkResponse, error) {
if request.NetworkID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var (
@@ -171,7 +171,7 @@
n = store.GetNetwork(tx, request.NetworkID)
})
if n == nil {
- return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
+ return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
}
if allocator.IsIngressNetwork(n) {
@@ -179,13 +179,13 @@
}
if v, ok := n.Spec.Annotations.Labels[networkallocator.PredefinedLabel]; ok && v == "true" {
- return nil, grpc.Errorf(codes.FailedPrecondition, "network %s (%s) is a swarm predefined network and cannot be removed",
+ return nil, status.Errorf(codes.FailedPrecondition, "network %s (%s) is a swarm predefined network and cannot be removed",
request.NetworkID, n.Spec.Annotations.Name)
}
if err := rm(n.ID); err != nil {
if err == store.ErrNotExist {
- return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
+ return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
}
return nil, err
}
@@ -196,21 +196,21 @@
return s.store.Update(func(tx store.Tx) error {
services, err := store.FindServices(tx, store.ByReferencedNetworkID(id))
if err != nil {
- return grpc.Errorf(codes.Internal, "could not find services using network %s: %v", id, err)
+ return status.Errorf(codes.Internal, "could not find services using network %s: %v", id, err)
}
if len(services) != 0 {
- return grpc.Errorf(codes.FailedPrecondition, "network %s is in use by service %s", id, services[0].ID)
+ return status.Errorf(codes.FailedPrecondition, "network %s is in use by service %s", id, services[0].ID)
}
tasks, err := store.FindTasks(tx, store.ByReferencedNetworkID(id))
if err != nil {
- return grpc.Errorf(codes.Internal, "could not find tasks using network %s: %v", id, err)
+ return status.Errorf(codes.Internal, "could not find tasks using network %s: %v", id, err)
}
for _, t := range tasks {
if t.DesiredState <= api.TaskStateRunning && t.Status.State <= api.TaskStateRunning {
- return grpc.Errorf(codes.FailedPrecondition, "network %s is in use by task %s", id, t.ID)
+ return status.Errorf(codes.FailedPrecondition, "network %s is in use by task %s", id, t.ID)
}
}
@@ -222,11 +222,11 @@
return s.store.Update(func(tx store.Tx) error {
services, err := store.FindServices(tx, store.All)
if err != nil {
- return grpc.Errorf(codes.Internal, "could not find services using network %s: %v", id, err)
+ return status.Errorf(codes.Internal, "could not find services using network %s: %v", id, err)
}
for _, srv := range services {
if allocator.IsIngressNetworkNeeded(srv) {
- return grpc.Errorf(codes.FailedPrecondition, "ingress network cannot be removed because service %s depends on it", srv.ID)
+ return status.Errorf(codes.FailedPrecondition, "ingress network cannot be removed because service %s depends on it", srv.ID)
}
}
return store.DeleteNetwork(tx, id)
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/node.go b/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
index bac6b80..e1fe3de 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
@@ -9,13 +9,13 @@
"github.com/docker/swarmkit/manager/state/store"
gogotypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
func validateNodeSpec(spec *api.NodeSpec) error {
if spec == nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
return nil
}
@@ -25,7 +25,7 @@
// - Returns `NotFound` if the Node is not found.
func (s *Server) GetNode(ctx context.Context, request *api.GetNodeRequest) (*api.GetNodeResponse, error) {
if request.NodeID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var node *api.Node
@@ -33,7 +33,7 @@
node = store.GetNode(tx, request.NodeID)
})
if node == nil {
- return nil, grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
+ return nil, status.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
if s.raft != nil {
@@ -196,7 +196,7 @@
// - Returns an error if the update fails.
func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) {
if request.NodeID == "" || request.NodeVersion == nil {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateNodeSpec(request.Spec); err != nil {
return nil, err
@@ -210,7 +210,7 @@
err := s.store.Update(func(tx store.Tx) error {
node = store.GetNode(tx, request.NodeID)
if node == nil {
- return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
+ return status.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
// Demotion sanity checks.
@@ -218,20 +218,20 @@
// Check for manager entries in Store.
managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager))
if err != nil {
- return grpc.Errorf(codes.Internal, "internal store error: %v", err)
+ return status.Errorf(codes.Internal, "internal store error: %v", err)
}
if len(managers) == 1 && managers[0].ID == node.ID {
- return grpc.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
+ return status.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
}
// Check for node in memberlist
if member = s.raft.GetMemberByNodeID(request.NodeID); member == nil {
- return grpc.Errorf(codes.NotFound, "can't find manager in raft memberlist")
+ return status.Errorf(codes.NotFound, "can't find manager in raft memberlist")
}
// Quorum safeguard
if !s.raft.CanRemoveMember(member.RaftID) {
- return grpc.Errorf(codes.FailedPrecondition, "can't remove member from the raft: this would result in a loss of quorum")
+ return status.Errorf(codes.FailedPrecondition, "can't remove member from the raft: this would result in a loss of quorum")
}
}
@@ -278,33 +278,33 @@
// - Returns an error if the delete fails.
func (s *Server) RemoveNode(ctx context.Context, request *api.RemoveNodeRequest) (*api.RemoveNodeResponse, error) {
if request.NodeID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
node := store.GetNode(tx, request.NodeID)
if node == nil {
- return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
+ return status.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
if node.Spec.DesiredRole == api.NodeRoleManager {
if s.raft == nil {
- return grpc.Errorf(codes.FailedPrecondition, "node %s is a manager but cannot access node information from the raft memberlist", request.NodeID)
+ return status.Errorf(codes.FailedPrecondition, "node %s is a manager but cannot access node information from the raft memberlist", request.NodeID)
}
if member := s.raft.GetMemberByNodeID(request.NodeID); member != nil {
- return grpc.Errorf(codes.FailedPrecondition, "node %s is a cluster manager and is a member of the raft cluster. It must be demoted to worker before removal", request.NodeID)
+ return status.Errorf(codes.FailedPrecondition, "node %s is a cluster manager and is a member of the raft cluster. It must be demoted to worker before removal", request.NodeID)
}
}
if !request.Force && node.Status.State == api.NodeStatus_READY {
- return grpc.Errorf(codes.FailedPrecondition, "node %s is not down and can't be removed", request.NodeID)
+ return status.Errorf(codes.FailedPrecondition, "node %s is not down and can't be removed", request.NodeID)
}
// lookup the cluster
- clusters, err := store.FindClusters(tx, store.ByName("default"))
+ clusters, err := store.FindClusters(tx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if len(clusters) != 1 {
- return grpc.Errorf(codes.Internal, "could not fetch cluster object")
+ return status.Errorf(codes.Internal, "could not fetch cluster object")
}
cluster := clusters[0]
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go b/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go
index bac4c10..fdcd2c4 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/secret.go
@@ -11,8 +11,8 @@
"github.com/docker/swarmkit/manager/state/store"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// assumes spec is not nil
@@ -30,7 +30,7 @@
// - Returns an error if getting fails.
func (s *Server) GetSecret(ctx context.Context, request *api.GetSecretRequest) (*api.GetSecretResponse, error) {
if request.SecretID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, "secret ID must be provided")
+ return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided")
}
var secret *api.Secret
@@ -39,7 +39,7 @@
})
if secret == nil {
- return nil, grpc.Errorf(codes.NotFound, "secret %s not found", request.SecretID)
+ return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID)
}
secret.Spec.Data = nil // clean the actual secret data so it's never returned
@@ -52,20 +52,20 @@
// - Returns an error if the update fails.
func (s *Server) UpdateSecret(ctx context.Context, request *api.UpdateSecretRequest) (*api.UpdateSecretResponse, error) {
if request.SecretID == "" || request.SecretVersion == nil {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var secret *api.Secret
err := s.store.Update(func(tx store.Tx) error {
secret = store.GetSecret(tx, request.SecretID)
if secret == nil {
- return grpc.Errorf(codes.NotFound, "secret %s not found", request.SecretID)
+ return status.Errorf(codes.NotFound, "secret %s not found", request.SecretID)
}
// Check if the Name is different than the current name, or the secret is non-nil and different
// than the current secret
if secret.Spec.Annotations.Name != request.Spec.Annotations.Name ||
(request.Spec.Data != nil && subtle.ConstantTimeCompare(request.Spec.Data, secret.Spec.Data) == 0) {
- return grpc.Errorf(codes.InvalidArgument, "only updates to Labels are allowed")
+ return status.Errorf(codes.InvalidArgument, "only updates to Labels are allowed")
}
// We only allow updating Labels
@@ -171,7 +171,7 @@
switch err {
case store.ErrNameConflict:
- return nil, grpc.Errorf(codes.AlreadyExists, "secret %s already exists", request.Spec.Annotations.Name)
+ return nil, status.Errorf(codes.AlreadyExists, "secret %s already exists", request.Spec.Annotations.Name)
case nil:
secret.Spec.Data = nil // clean the actual secret data so it's never returned
log.G(ctx).WithFields(logrus.Fields{
@@ -192,20 +192,20 @@
// - Returns an error if the deletion fails.
func (s *Server) RemoveSecret(ctx context.Context, request *api.RemoveSecretRequest) (*api.RemoveSecretResponse, error) {
if request.SecretID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, "secret ID must be provided")
+ return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided")
}
err := s.store.Update(func(tx store.Tx) error {
// Check if the secret exists
secret := store.GetSecret(tx, request.SecretID)
if secret == nil {
- return grpc.Errorf(codes.NotFound, "could not find secret %s", request.SecretID)
+ return status.Errorf(codes.NotFound, "could not find secret %s", request.SecretID)
}
// Check if any services currently reference this secret, return error if so
services, err := store.FindServices(tx, store.ByReferencedSecretID(request.SecretID))
if err != nil {
- return grpc.Errorf(codes.Internal, "could not find services using secret %s: %v", request.SecretID, err)
+ return status.Errorf(codes.Internal, "could not find services using secret %s: %v", request.SecretID, err)
}
if len(services) != 0 {
@@ -221,14 +221,14 @@
serviceStr = "service"
}
- return grpc.Errorf(codes.InvalidArgument, "secret '%s' is in use by the following %s: %v", secretName, serviceStr, serviceNameStr)
+ return status.Errorf(codes.InvalidArgument, "secret '%s' is in use by the following %s: %v", secretName, serviceStr, serviceNameStr)
}
return store.DeleteSecret(tx, request.SecretID)
})
switch err {
case store.ErrNotExist:
- return nil, grpc.Errorf(codes.NotFound, "secret %s not found", request.SecretID)
+ return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"secret.ID": request.SecretID,
@@ -243,7 +243,7 @@
func validateSecretSpec(spec *api.SecretSpec) error {
if spec == nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateConfigOrSecretAnnotations(spec.Annotations); err != nil {
return err
@@ -252,12 +252,12 @@
if spec.Driver != nil {
// Ensure secret driver has a name
if spec.Driver.Name == "" {
- return grpc.Errorf(codes.InvalidArgument, "secret driver must have a name")
+ return status.Errorf(codes.InvalidArgument, "secret driver must have a name")
}
return nil
}
if err := validation.ValidateSecretPayload(spec.Data); err != nil {
- return grpc.Errorf(codes.InvalidArgument, "%s", err.Error())
+ return status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
return nil
}
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/service.go b/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
index 951c27b..e4c27df 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
@@ -19,8 +19,8 @@
"github.com/docker/swarmkit/template"
gogotypes "github.com/gogo/protobuf/types"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
var (
@@ -37,11 +37,11 @@
}
if r.NanoCPUs != 0 && r.NanoCPUs < 1e6 {
- return grpc.Errorf(codes.InvalidArgument, "invalid cpu value %g: Must be at least %g", float64(r.NanoCPUs)/1e9, 1e6/1e9)
+ return status.Errorf(codes.InvalidArgument, "invalid cpu value %g: Must be at least %g", float64(r.NanoCPUs)/1e9, 1e6/1e9)
}
if r.MemoryBytes != 0 && r.MemoryBytes < 4*1024*1024 {
- return grpc.Errorf(codes.InvalidArgument, "invalid memory value %d: Must be at least 4MiB", r.MemoryBytes)
+ return status.Errorf(codes.InvalidArgument, "invalid memory value %d: Must be at least 4MiB", r.MemoryBytes)
}
if err := genericresource.ValidateTask(r); err != nil {
return nil
@@ -70,7 +70,7 @@
return err
}
if delay < 0 {
- return grpc.Errorf(codes.InvalidArgument, "TaskSpec: restart-delay cannot be negative")
+ return status.Errorf(codes.InvalidArgument, "TaskSpec: restart-delay cannot be negative")
}
}
@@ -80,7 +80,7 @@
return err
}
if win < 0 {
- return grpc.Errorf(codes.InvalidArgument, "TaskSpec: restart-window cannot be negative")
+ return status.Errorf(codes.InvalidArgument, "TaskSpec: restart-window cannot be negative")
}
}
@@ -101,7 +101,7 @@
}
if uc.Delay < 0 {
- return grpc.Errorf(codes.InvalidArgument, "TaskSpec: update-delay cannot be negative")
+ return status.Errorf(codes.InvalidArgument, "TaskSpec: update-delay cannot be negative")
}
if uc.Monitor != nil {
@@ -110,12 +110,12 @@
return err
}
if monitor < 0 {
- return grpc.Errorf(codes.InvalidArgument, "TaskSpec: update-monitor cannot be negative")
+ return status.Errorf(codes.InvalidArgument, "TaskSpec: update-monitor cannot be negative")
}
}
if uc.MaxFailureRatio < 0 || uc.MaxFailureRatio > 1 {
- return grpc.Errorf(codes.InvalidArgument, "TaskSpec: update-maxfailureratio cannot be less than 0 or bigger than 1")
+ return status.Errorf(codes.InvalidArgument, "TaskSpec: update-maxfailureratio cannot be less than 0 or bigger than 1")
}
return nil
@@ -147,7 +147,7 @@
LogDriver: taskSpec.LogDriver,
})
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, err.Error())
+ return status.Errorf(codes.InvalidArgument, err.Error())
}
if err := validateImage(container.Image); err != nil {
@@ -164,11 +164,11 @@
// validateImage validates image name in containerSpec
func validateImage(image string) error {
if image == "" {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
}
if _, err := reference.ParseNormalizedNamed(image); err != nil {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", image)
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", image)
}
return nil
}
@@ -178,7 +178,7 @@
mountMap := make(map[string]bool)
for _, mount := range mounts {
if _, exists := mountMap[mount.Target]; exists {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target)
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target)
}
mountMap[mount.Target] = true
}
@@ -198,7 +198,7 @@
return err
}
if interval != 0 && interval < time.Duration(minimumDuration) {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: Interval in HealthConfig cannot be less than %s", minimumDuration)
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: Interval in HealthConfig cannot be less than %s", minimumDuration)
}
}
@@ -208,7 +208,7 @@
return err
}
if timeout != 0 && timeout < time.Duration(minimumDuration) {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: Timeout in HealthConfig cannot be less than %s", minimumDuration)
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: Timeout in HealthConfig cannot be less than %s", minimumDuration)
}
}
@@ -218,12 +218,12 @@
return err
}
if sp != 0 && sp < time.Duration(minimumDuration) {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: StartPeriod in HealthConfig cannot be less than %s", minimumDuration)
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: StartPeriod in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.Retries < 0 {
- return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: Retries in HealthConfig cannot be negative")
+ return status.Errorf(codes.InvalidArgument, "ContainerSpec: Retries in HealthConfig cannot be negative")
}
return nil
@@ -233,28 +233,28 @@
generic := taskSpec.GetGeneric()
if len(generic.Kind) < 3 {
- return grpc.Errorf(codes.InvalidArgument, "Generic runtime: Invalid name %q", generic.Kind)
+ return status.Errorf(codes.InvalidArgument, "Generic runtime: Invalid name %q", generic.Kind)
}
reservedNames := []string{"container", "attachment"}
for _, n := range reservedNames {
if strings.ToLower(generic.Kind) == n {
- return grpc.Errorf(codes.InvalidArgument, "Generic runtime: %q is a reserved name", generic.Kind)
+ return status.Errorf(codes.InvalidArgument, "Generic runtime: %q is a reserved name", generic.Kind)
}
}
payload := generic.Payload
if payload == nil {
- return grpc.Errorf(codes.InvalidArgument, "Generic runtime is missing payload")
+ return status.Errorf(codes.InvalidArgument, "Generic runtime is missing payload")
}
if payload.TypeUrl == "" {
- return grpc.Errorf(codes.InvalidArgument, "Generic runtime is missing payload type")
+ return status.Errorf(codes.InvalidArgument, "Generic runtime is missing payload type")
}
if len(payload.Value) == 0 {
- return grpc.Errorf(codes.InvalidArgument, "Generic runtime has an empty payload")
+ return status.Errorf(codes.InvalidArgument, "Generic runtime has an empty payload")
}
return nil
@@ -284,7 +284,7 @@
}
if taskSpec.GetRuntime() == nil {
- return grpc.Errorf(codes.InvalidArgument, "TaskSpec: missing runtime")
+ return status.Errorf(codes.InvalidArgument, "TaskSpec: missing runtime")
}
switch taskSpec.GetRuntime().(type) {
@@ -297,7 +297,7 @@
return err
}
default:
- return grpc.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec")
+ return status.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec")
}
return nil
@@ -324,7 +324,7 @@
// for the backend network and hence we accept that configuration.
if epSpec.Mode == api.ResolutionModeDNSRoundRobin && port.PublishMode == api.PublishModeIngress {
- return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: port published with ingress mode can't be used with dnsrr mode")
+ return status.Errorf(codes.InvalidArgument, "EndpointSpec: port published with ingress mode can't be used with dnsrr mode")
}
// If published port is not specified, it does not conflict
@@ -335,7 +335,7 @@
portSpec := portSpec{publishedPort: port.PublishedPort, protocol: port.Protocol}
if _, ok := portSet[portSpec]; ok {
- return grpc.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate published ports provided")
+ return status.Errorf(codes.InvalidArgument, "EndpointSpec: duplicate published ports provided")
}
portSet[portSpec] = struct{}{}
@@ -358,23 +358,23 @@
for _, secretRef := range container.Secrets {
// SecretID and SecretName are mandatory, we have invalid references without them
if secretRef.SecretID == "" || secretRef.SecretName == "" {
- return grpc.Errorf(codes.InvalidArgument, "malformed secret reference")
+ return status.Errorf(codes.InvalidArgument, "malformed secret reference")
}
// Every secret reference requires a Target
if secretRef.GetTarget() == nil {
- return grpc.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided")
+ return status.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided")
}
// If this is a file target, we will ensure filename uniqueness
if secretRef.GetFile() != nil {
fileName := secretRef.GetFile().Name
if fileName == "" {
- return grpc.Errorf(codes.InvalidArgument, "malformed file secret reference, invalid target file name provided")
+ return status.Errorf(codes.InvalidArgument, "malformed file secret reference, invalid target file name provided")
}
// If this target is already in use, we have conflicting targets
if prevSecretName, ok := existingTargets[fileName]; ok {
- return grpc.Errorf(codes.InvalidArgument, "secret references '%s' and '%s' have a conflicting target: '%s'", prevSecretName, secretRef.SecretName, fileName)
+ return status.Errorf(codes.InvalidArgument, "secret references '%s' and '%s' have a conflicting target: '%s'", prevSecretName, secretRef.SecretName, fileName)
}
existingTargets[fileName] = secretRef.SecretName
@@ -398,12 +398,12 @@
for _, configRef := range container.Configs {
// ConfigID and ConfigName are mandatory, we have invalid references without them
if configRef.ConfigID == "" || configRef.ConfigName == "" {
- return grpc.Errorf(codes.InvalidArgument, "malformed config reference")
+ return status.Errorf(codes.InvalidArgument, "malformed config reference")
}
// Every config reference requires a Target
if configRef.GetTarget() == nil {
- return grpc.Errorf(codes.InvalidArgument, "malformed config reference, no target provided")
+ return status.Errorf(codes.InvalidArgument, "malformed config reference, no target provided")
}
// If this is a file target, we will ensure filename uniqueness
@@ -411,12 +411,12 @@
fileName := configRef.GetFile().Name
// Validate the file name
if fileName == "" {
- return grpc.Errorf(codes.InvalidArgument, "malformed file config reference, invalid target file name provided")
+ return status.Errorf(codes.InvalidArgument, "malformed file config reference, invalid target file name provided")
}
// If this target is already in use, we have conflicting targets
if prevConfigName, ok := existingTargets[fileName]; ok {
- return grpc.Errorf(codes.InvalidArgument, "config references '%s' and '%s' have a conflicting target: '%s'", prevConfigName, configRef.ConfigName, fileName)
+ return status.Errorf(codes.InvalidArgument, "config references '%s' and '%s' have a conflicting target: '%s'", prevConfigName, configRef.ConfigName, fileName)
}
existingTargets[fileName] = configRef.ConfigName
@@ -436,7 +436,7 @@
continue
}
if allocator.IsIngressNetwork(network) {
- return grpc.Errorf(codes.InvalidArgument,
+ return status.Errorf(codes.InvalidArgument,
"Service cannot be explicitly attached to the ingress network %q", network.Spec.Annotations.Name)
}
}
@@ -448,11 +448,11 @@
switch m.(type) {
case *api.ServiceSpec_Replicated:
if int64(m.(*api.ServiceSpec_Replicated).Replicated.Replicas) < 0 {
- return grpc.Errorf(codes.InvalidArgument, "Number of replicas must be non-negative")
+ return status.Errorf(codes.InvalidArgument, "Number of replicas must be non-negative")
}
case *api.ServiceSpec_Global:
default:
- return grpc.Errorf(codes.InvalidArgument, "Unrecognized service mode")
+ return status.Errorf(codes.InvalidArgument, "Unrecognized service mode")
}
return nil
@@ -460,7 +460,7 @@
func validateServiceSpec(spec *api.ServiceSpec) error {
if spec == nil {
- return grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateAnnotations(spec.Annotations); err != nil {
return err
@@ -536,7 +536,7 @@
switch pc.PublishMode {
case api.PublishModeHost:
if _, ok := ingressPorts[pcToStruct(pc)]; ok {
- return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as a host-published port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)
+ return status.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as a host-published port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)
}
// Multiple services with same port in host publish mode can
@@ -546,7 +546,7 @@
_, ingressConflict := ingressPorts[pcToStruct(pc)]
_, hostModeConflict := hostModePorts[pcToStruct(pc)]
if ingressConflict || hostModeConflict {
- return grpc.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as an ingress port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)
+ return status.Errorf(codes.InvalidArgument, "port '%d' is already in use by service '%s' (%s) as an ingress port", pc.PublishedPort, service.Spec.Annotations.Name, service.ID)
}
}
@@ -598,7 +598,7 @@
secretStr = "secret"
}
- return grpc.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(failedSecrets, ", "))
+ return status.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(failedSecrets, ", "))
}
@@ -627,7 +627,7 @@
configStr = "config"
}
- return grpc.Errorf(codes.InvalidArgument, "%s not found: %v", configStr, strings.Join(failedConfigs, ", "))
+ return status.Errorf(codes.InvalidArgument, "%s not found: %v", configStr, strings.Join(failedConfigs, ", "))
}
@@ -662,7 +662,7 @@
if allocator.IsIngressNetworkNeeded(service) {
if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress {
- return nil, grpc.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
+ return nil, status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
}
}
@@ -694,7 +694,7 @@
// - Returns `NotFound` if the Service is not found.
func (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) {
if request.ServiceID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var service *api.Service
@@ -702,7 +702,7 @@
service = store.GetService(tx, request.ServiceID)
})
if service == nil {
- return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
if request.InsertDefaults {
@@ -721,7 +721,7 @@
// - Returns an error if the update fails.
func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRequest) (*api.UpdateServiceResponse, error) {
if request.ServiceID == "" || request.ServiceVersion == nil {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateServiceSpec(request.Spec); err != nil {
return nil, err
@@ -732,7 +732,7 @@
service = store.GetService(tx, request.ServiceID)
})
if service == nil {
- return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
if request.Spec.Endpoint != nil && !reflect.DeepEqual(request.Spec.Endpoint, service.Spec.Endpoint) {
@@ -744,7 +744,7 @@
err := s.store.Update(func(tx store.Tx) error {
service = store.GetService(tx, request.ServiceID)
if service == nil {
- return grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ return status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
// It's not okay to update Service.Spec.Networks on its own.
@@ -754,7 +754,7 @@
if (len(request.Spec.Networks) != 0 || len(service.Spec.Networks) != 0) &&
!reflect.DeepEqual(request.Spec.Networks, service.Spec.Networks) &&
reflect.DeepEqual(request.Spec.Task.Networks, service.Spec.Task.Networks) {
- return grpc.Errorf(codes.Unimplemented, errNetworkUpdateNotSupported.Error())
+ return status.Errorf(codes.Unimplemented, errNetworkUpdateNotSupported.Error())
}
// Check to see if all the secrets being added exist as objects
@@ -773,18 +773,18 @@
// with service mode change (comparing current config with previous config).
// proper way to change service mode is to delete and re-add.
if reflect.TypeOf(service.Spec.Mode) != reflect.TypeOf(request.Spec.Mode) {
- return grpc.Errorf(codes.Unimplemented, errModeChangeNotAllowed.Error())
+ return status.Errorf(codes.Unimplemented, errModeChangeNotAllowed.Error())
}
if service.Spec.Annotations.Name != request.Spec.Annotations.Name {
- return grpc.Errorf(codes.Unimplemented, errRenameNotSupported.Error())
+ return status.Errorf(codes.Unimplemented, errRenameNotSupported.Error())
}
service.Meta.Version = *request.ServiceVersion
if request.Rollback == api.UpdateServiceRequest_PREVIOUS {
if service.PreviousSpec == nil {
- return grpc.Errorf(codes.FailedPrecondition, "service %s does not have a previous spec", request.ServiceID)
+ return status.Errorf(codes.FailedPrecondition, "service %s does not have a previous spec", request.ServiceID)
}
curSpec := service.Spec.Copy()
@@ -815,7 +815,7 @@
if allocator.IsIngressNetworkNeeded(service) {
if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress {
- return grpc.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
+ return status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
}
}
@@ -836,7 +836,7 @@
// - Returns an error if the deletion fails.
func (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) {
if request.ServiceID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
@@ -844,7 +844,7 @@
})
if err != nil {
if err == store.ErrNotExist {
- return nil, grpc.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
+ return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
return nil, err
}
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/task.go b/vendor/github.com/docker/swarmkit/manager/controlapi/task.go
index d2ae215..51b7bf8 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/task.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/task.go
@@ -6,8 +6,8 @@
"github.com/docker/swarmkit/manager/orchestrator"
"github.com/docker/swarmkit/manager/state/store"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// GetTask returns a Task given a TaskID.
@@ -15,7 +15,7 @@
// - Returns `NotFound` if the Task is not found.
func (s *Server) GetTask(ctx context.Context, request *api.GetTaskRequest) (*api.GetTaskResponse, error) {
if request.TaskID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var task *api.Task
@@ -23,7 +23,7 @@
task = store.GetTask(tx, request.TaskID)
})
if task == nil {
- return nil, grpc.Errorf(codes.NotFound, "task %s not found", request.TaskID)
+ return nil, status.Errorf(codes.NotFound, "task %s not found", request.TaskID)
}
return &api.GetTaskResponse{
Task: task,
@@ -36,7 +36,7 @@
// - Returns an error if the deletion fails.
func (s *Server) RemoveTask(ctx context.Context, request *api.RemoveTaskRequest) (*api.RemoveTaskResponse, error) {
if request.TaskID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
@@ -44,7 +44,7 @@
})
if err != nil {
if err == store.ErrNotExist {
- return nil, grpc.Errorf(codes.NotFound, "task %s not found", request.TaskID)
+ return nil, status.Errorf(codes.NotFound, "task %s not found", request.TaskID)
}
return nil, err
}
diff --git a/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go b/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
index 4de6a30..13d6829 100644
--- a/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
+++ b/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
@@ -7,11 +7,8 @@
"sync"
"time"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/transport"
-
"github.com/docker/go-events"
+ "github.com/docker/go-metrics"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/equality"
"github.com/docker/swarmkit/ca"
@@ -25,6 +22,9 @@
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
const (
@@ -66,8 +66,18 @@
ErrSessionInvalid = errors.New("session invalid")
// ErrNodeNotFound returned when the Node doesn't exist in raft.
ErrNodeNotFound = errors.New("node not found")
+
+ // Scheduling delay timer.
+ schedulingDelayTimer metrics.Timer
)
+func init() {
+ ns := metrics.NewNamespace("swarm", "dispatcher", nil)
+ schedulingDelayTimer = ns.NewTimer("scheduling_delay",
+ "Scheduling delay is the time a task takes to go from NEW to RUNNING state.")
+ metrics.Register(ns)
+}
+
// Config is configuration for Dispatcher. For default you should use
// DefaultConfig.
type Config struct {
@@ -322,7 +332,7 @@
d.mu.Lock()
if !d.isRunning() {
d.mu.Unlock()
- return nil, grpc.Errorf(codes.Aborted, "dispatcher is stopped")
+ return nil, status.Errorf(codes.Aborted, "dispatcher is stopped")
}
ctx := d.ctx
d.mu.Unlock()
@@ -556,7 +566,7 @@
}
if t.NodeID != nodeID {
- err := grpc.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node")
+ err := status.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node")
log.WithField("task.id", u.TaskID).Error(err)
return nil, err
}
@@ -632,6 +642,17 @@
return nil
}
+ // Update scheduling delay metric for running tasks.
+ // We use the status update time on the leader to calculate the scheduling delay.
+ // Because of this, the recorded scheduling delay will be an overestimate and include
+ // the network delay between the worker and the leader.
+ // This is not ideal, but its a known overestimation, rather than using the status update time
+ // from the worker node, which may cause unknown incorrect results due to possible clock skew.
+ if status.State == api.TaskStateRunning {
+ start := time.Unix(status.AppliedAt.GetSeconds(), int64(status.AppliedAt.GetNanos()))
+ schedulingDelayTimer.UpdateSince(start)
+ }
+
task.Status = *status
task.Status.AppliedBy = d.securityConfig.ClientTLSCreds.NodeID()
task.Status.AppliedAt = ptypes.MustTimestampProto(time.Now())
@@ -1189,7 +1210,7 @@
log.WithError(err).Error("failed to remove node")
}
// still return an abort if the transport closure was ineffective.
- return grpc.Errorf(codes.Aborted, "node must disconnect")
+ return status.Errorf(codes.Aborted, "node must disconnect")
}
for {
diff --git a/vendor/github.com/docker/swarmkit/manager/dispatcher/nodes.go b/vendor/github.com/docker/swarmkit/manager/dispatcher/nodes.go
index 8a0de55..cf35bb8 100644
--- a/vendor/github.com/docker/swarmkit/manager/dispatcher/nodes.go
+++ b/vendor/github.com/docker/swarmkit/manager/dispatcher/nodes.go
@@ -4,12 +4,11 @@
"sync"
"time"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
-
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/identity"
"github.com/docker/swarmkit/manager/dispatcher/heartbeat"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
const rateLimitCount = 3
@@ -36,7 +35,7 @@
// changed. If it has, we will the stream and make the node
// re-register.
if sessionID == "" || rn.SessionID != sessionID {
- return grpc.Errorf(codes.InvalidArgument, ErrSessionInvalid.Error())
+ return status.Errorf(codes.InvalidArgument, ErrSessionInvalid.Error())
}
return nil
@@ -97,7 +96,7 @@
}
existRn.Attempts++
if existRn.Attempts > rateLimitCount {
- return grpc.Errorf(codes.Unavailable, "node %s exceeded rate limit count of registrations", id)
+ return status.Errorf(codes.Unavailable, "node %s exceeded rate limit count of registrations", id)
}
existRn.Registered = time.Now()
}
@@ -136,7 +135,7 @@
rn, ok := s.nodes[id]
s.mu.RUnlock()
if !ok {
- return nil, grpc.Errorf(codes.NotFound, ErrNodeNotRegistered.Error())
+ return nil, status.Errorf(codes.NotFound, ErrNodeNotRegistered.Error())
}
return rn, nil
}
@@ -146,7 +145,7 @@
rn, ok := s.nodes[id]
s.mu.RUnlock()
if !ok {
- return nil, grpc.Errorf(codes.NotFound, ErrNodeNotRegistered.Error())
+ return nil, status.Errorf(codes.NotFound, ErrNodeNotRegistered.Error())
}
return rn, rn.checkSessionID(sid)
}
diff --git a/vendor/github.com/docker/swarmkit/manager/health/health.go b/vendor/github.com/docker/swarmkit/manager/health/health.go
index bf220bd..ef6658b 100644
--- a/vendor/github.com/docker/swarmkit/manager/health/health.go
+++ b/vendor/github.com/docker/swarmkit/manager/health/health.go
@@ -12,8 +12,8 @@
"github.com/docker/swarmkit/api"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// Server represents a Health Check server to check
@@ -46,7 +46,7 @@
Status: status,
}, nil
}
- return nil, grpc.Errorf(codes.NotFound, "unknown service")
+ return nil, status.Errorf(codes.NotFound, "unknown service")
}
// SetServingStatus is called when need to reset the serving status of a service
diff --git a/vendor/github.com/docker/swarmkit/manager/logbroker/broker.go b/vendor/github.com/docker/swarmkit/manager/logbroker/broker.go
index 860b55c..de2b936 100644
--- a/vendor/github.com/docker/swarmkit/manager/logbroker/broker.go
+++ b/vendor/github.com/docker/swarmkit/manager/logbroker/broker.go
@@ -6,9 +6,6 @@
"io"
"sync"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
-
"github.com/docker/go-events"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/ca"
@@ -18,6 +15,8 @@
"github.com/docker/swarmkit/watch"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
var (
@@ -93,11 +92,11 @@
func validateSelector(selector *api.LogSelector) error {
if selector == nil {
- return grpc.Errorf(codes.InvalidArgument, "log selector must be provided")
+ return status.Errorf(codes.InvalidArgument, "log selector must be provided")
}
if len(selector.ServiceIDs) == 0 && len(selector.TaskIDs) == 0 && len(selector.NodeIDs) == 0 {
- return grpc.Errorf(codes.InvalidArgument, "log selector must not be empty")
+ return status.Errorf(codes.InvalidArgument, "log selector must not be empty")
}
return nil
@@ -401,17 +400,17 @@
}
if logMsg.SubscriptionID == "" {
- return grpc.Errorf(codes.InvalidArgument, "missing subscription ID")
+ return status.Errorf(codes.InvalidArgument, "missing subscription ID")
}
if currentSubscription == nil {
currentSubscription = lb.getSubscription(logMsg.SubscriptionID)
if currentSubscription == nil {
- return grpc.Errorf(codes.NotFound, "unknown subscription ID")
+ return status.Errorf(codes.NotFound, "unknown subscription ID")
}
} else {
if logMsg.SubscriptionID != currentSubscription.message.ID {
- return grpc.Errorf(codes.InvalidArgument, "different subscription IDs in the same session")
+ return status.Errorf(codes.InvalidArgument, "different subscription IDs in the same session")
}
}
@@ -427,7 +426,7 @@
// Make sure logs are emitted using the right Node ID to avoid impersonation.
for _, msg := range logMsg.Messages {
if msg.Context.NodeID != remote.NodeID {
- return grpc.Errorf(codes.PermissionDenied, "invalid NodeID: expected=%s;received=%s", remote.NodeID, msg.Context.NodeID)
+ return status.Errorf(codes.PermissionDenied, "invalid NodeID: expected=%s;received=%s", remote.NodeID, msg.Context.NodeID)
}
}
diff --git a/vendor/github.com/docker/swarmkit/manager/manager.go b/vendor/github.com/docker/swarmkit/manager/manager.go
index 39db22b..1ce727d 100644
--- a/vendor/github.com/docker/swarmkit/manager/manager.go
+++ b/vendor/github.com/docker/swarmkit/manager/manager.go
@@ -38,6 +38,7 @@
"github.com/docker/swarmkit/manager/resourceapi"
"github.com/docker/swarmkit/manager/scheduler"
"github.com/docker/swarmkit/manager/state/raft"
+ "github.com/docker/swarmkit/manager/state/raft/transport"
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/manager/watchapi"
"github.com/docker/swarmkit/remotes"
@@ -54,9 +55,6 @@
const (
// defaultTaskHistoryRetentionLimit is the number of tasks to keep.
defaultTaskHistoryRetentionLimit = 5
-
- // Default value for grpc max message size.
- grpcMaxMessageSize = 128 << 20
)
// RemoteAddrs provides a listening address and an optional advertise address
@@ -234,7 +232,7 @@
grpc.Creds(config.SecurityConfig.ServerTLSCreds),
grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
- grpc.MaxMsgSize(grpcMaxMessageSize),
+ grpc.MaxMsgSize(transport.GRPCMaxMsgSize),
}
m := &Manager{
@@ -404,7 +402,7 @@
)
m.raftNode.MemoryStore().View(func(readTx store.ReadTx) {
- clusters, err = store.FindClusters(readTx, store.ByName("default"))
+ clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
})
@@ -954,13 +952,18 @@
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
- store.CreateCluster(tx, defaultClusterObject(
+ err := store.CreateCluster(tx, defaultClusterObject(
clusterID,
initialCAConfig,
raftCfg,
api.EncryptionConfig{AutoLockManagers: m.config.AutoLockManagers},
unlockKeys,
rootCA))
+
+ if err != nil && err != store.ErrExist {
+ log.G(ctx).WithError(err).Errorf("error creating cluster object")
+ }
+
// Add Node entry for ourself, if one
// doesn't exist already.
freshCluster := nil == store.CreateNode(tx, managerNode(nodeID, m.config.Availability))
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
index a1d2873..f6217ff 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/global/global.go
@@ -73,7 +73,7 @@
var err error
g.store.View(func(readTx store.ReadTx) {
var clusters []*api.Cluster
- clusters, err = store.FindClusters(readTx, store.ByName("default"))
+ clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if len(clusters) != 1 {
return // just pick up the cluster when it is created.
@@ -147,7 +147,7 @@
if !orchestrator.IsGlobalService(v.Service) {
continue
}
- orchestrator.DeleteServiceTasks(ctx, g.store, v.Service)
+ orchestrator.SetServiceTasksRemove(ctx, g.store, v.Service)
// delete the service from service map
delete(g.globalServices, v.Service.ID)
g.restarts.ClearServiceHistory(v.Service.ID)
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go
index f4d0511..04aea87 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/services.go
@@ -17,7 +17,7 @@
// responds to changes in individual tasks (or nodes which run them).
func (r *Orchestrator) initCluster(readTx store.ReadTx) error {
- clusters, err := store.FindClusters(readTx, store.ByName("default"))
+ clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
@@ -50,7 +50,7 @@
if !orchestrator.IsReplicatedService(v.Service) {
return
}
- orchestrator.DeleteServiceTasks(ctx, r.store, v.Service)
+ orchestrator.SetServiceTasksRemove(ctx, r.store, v.Service)
r.restarts.ClearServiceHistory(v.Service.ID)
delete(r.reconcileServices, v.Service.ID)
case api.EventCreateService:
@@ -86,6 +86,12 @@
return service
}
+// reconcile decides what actions must be taken depending on the number of
+// specificed slots and actual running slots. If the actual running slots are
+// fewer than what is requested, it creates new tasks. If the actual running
+// slots are more than requested, then it decides which slots must be removed
+// and sets desired state of those tasks to REMOVE (the actual removal is handled
+// by the task reaper, after the agent shuts the tasks down).
func (r *Orchestrator) reconcile(ctx context.Context, service *api.Service) {
runningSlots, deadSlots, err := r.updatableAndDeadSlots(ctx, service)
if err != nil {
@@ -157,7 +163,11 @@
r.updater.Update(ctx, r.cluster, service, sortedSlots[:specifiedSlots])
err = r.store.Batch(func(batch *store.Batch) error {
r.deleteTasksMap(ctx, batch, deadSlots)
- r.deleteTasks(ctx, batch, sortedSlots[specifiedSlots:])
+ // for all slots that we are removing, we set the desired state of those tasks
+ // to REMOVE. Then, the agent is responsible for shutting them down, and the
+ // task reaper is responsible for actually removing them from the store after
+ // shutdown.
+ r.setTasksDesiredState(ctx, batch, sortedSlots[specifiedSlots:], api.TaskStateRemove)
return nil
})
if err != nil {
@@ -198,10 +208,34 @@
}
}
-func (r *Orchestrator) deleteTasks(ctx context.Context, batch *store.Batch, slots []orchestrator.Slot) {
+// setTasksDesiredState sets the desired state for all tasks for the given slots to the
+// requested state
+func (r *Orchestrator) setTasksDesiredState(ctx context.Context, batch *store.Batch, slots []orchestrator.Slot, newDesiredState api.TaskState) {
for _, slot := range slots {
for _, t := range slot {
- r.deleteTask(ctx, batch, t)
+ err := batch.Update(func(tx store.Tx) error {
+ // time travel is not allowed. if the current desired state is
+ // above the one we're trying to go to we can't go backwards.
+ // we have nothing to do and we should skip to the next task
+ if t.DesiredState > newDesiredState {
+ // log a warning, though. we shouln't be trying to rewrite
+ // a state to an earlier state
+ log.G(ctx).Warnf(
+ "cannot update task %v in desired state %v to an earlier desired state %v",
+ t.ID, t.DesiredState, newDesiredState,
+ )
+ return nil
+ }
+ // update desired state
+ t.DesiredState = newDesiredState
+
+ return store.UpdateTask(tx, t)
+ })
+
+ // log an error if we get one
+ if err != nil {
+ log.G(ctx).WithError(err).Errorf("failed to update task to %v", newDesiredState.String())
+ }
}
}
}
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go
index bdc25d9..cee9fe1 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/replicated/slot.go
@@ -12,6 +12,8 @@
func (is slotsByRunningState) Len() int { return len(is) }
func (is slotsByRunningState) Swap(i, j int) { is[i], is[j] = is[j], is[i] }
+// Less returns true if the first task should be preferred over the second task,
+// all other things being equal in terms of node balance.
func (is slotsByRunningState) Less(i, j int) bool {
iRunning := false
jRunning := false
@@ -29,7 +31,19 @@
}
}
- return iRunning && !jRunning
+ if iRunning && !jRunning {
+ return true
+ }
+
+ if !iRunning && jRunning {
+ return false
+ }
+
+ // Use Slot number as a tie-breaker to prefer to remove tasks in reverse
+ // order of Slot number. This would help us avoid unnecessary master
+ // migration when scaling down a stateful service because the master
+ // task of a stateful service is usually in a low numbered Slot.
+ return is[i][0].Slot < is[j][0].Slot
}
type slotWithIndex struct {
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/service.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/service.go
index 4e52c83..7356c38 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/service.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/service.go
@@ -27,8 +27,10 @@
return ok
}
-// DeleteServiceTasks deletes the tasks associated with a service.
-func DeleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
+// SetServiceTasksRemove sets the desired state of tasks associated with a service
+// to REMOVE, so that they can be properly shut down by the agent and later removed
+// by the task reaper.
+func SetServiceTasksRemove(ctx context.Context, s *store.MemoryStore, service *api.Service) {
var (
tasks []*api.Task
err error
@@ -44,8 +46,23 @@
err = s.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
- if err := store.DeleteTask(tx, t.ID); err != nil {
- log.G(ctx).WithError(err).Errorf("failed to delete task")
+ // time travel is not allowed. if the current desired state is
+ // above the one we're trying to go to we can't go backwards.
+ // we have nothing to do and we should skip to the next task
+ if t.DesiredState > api.TaskStateRemove {
+ // log a warning, though. we shouln't be trying to rewrite
+ // a state to an earlier state
+ log.G(ctx).Warnf(
+ "cannot update task %v in desired state %v to an earlier desired state %v",
+ t.ID, t.DesiredState, api.TaskStateRemove,
+ )
+ return nil
+ }
+ // update desired state to REMOVE
+ t.DesiredState = api.TaskStateRemove
+
+ if err := store.UpdateTask(tx, t); err != nil {
+ log.G(ctx).WithError(err).Errorf("failed transaction: update task desired state to REMOVE")
}
return nil
})
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
index 577319c..d702783 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/taskreaper/task_reaper.go
@@ -23,12 +23,19 @@
// exist for the same service/instance or service/nodeid combination.
type TaskReaper struct {
store *store.MemoryStore
+
// taskHistory is the number of tasks to keep
taskHistory int64
- dirty map[orchestrator.SlotTuple]struct{}
- orphaned []string
- stopChan chan struct{}
- doneChan chan struct{}
+
+ // List of slot tubles to be inspected for task history cleanup.
+ dirty map[orchestrator.SlotTuple]struct{}
+
+ // List of tasks collected for cleanup, which includes two kinds of tasks
+ // - serviceless orphaned tasks
+ // - tasks with desired state REMOVE that have already been shut down
+ cleanup []string
+ stopChan chan struct{}
+ doneChan chan struct{}
}
// New creates a new TaskReaper.
@@ -41,7 +48,13 @@
}
}
-// Run is the TaskReaper's main loop.
+// Run is the TaskReaper's watch loop which collects candidates for cleanup.
+// Task history is mainly used in task restarts but is also available for administrative purposes.
+// Note that the task history is stored per-slot-per-service for replicated services
+// and per-node-per-service for global services. History does not apply to serviceless
+// since they are not attached to a service. In addition, the TaskReaper watch loop is also
+// responsible for cleaning up tasks associated with slots that were removed as part of
+// service scale down or service removal.
func (tr *TaskReaper) Run(ctx context.Context) {
watcher, watchCancel := state.Watch(tr.store.WatchQueue(), api.EventCreateTask{}, api.EventUpdateTask{}, api.EventUpdateCluster{})
@@ -50,7 +63,8 @@
watchCancel()
}()
- var tasks []*api.Task
+ var orphanedTasks []*api.Task
+ var removeTasks []*api.Task
tr.store.View(func(readTx store.ReadTx) {
var err error
@@ -59,29 +73,54 @@
tr.taskHistory = clusters[0].Spec.Orchestration.TaskHistoryRetentionLimit
}
- tasks, err = store.FindTasks(readTx, store.ByTaskState(api.TaskStateOrphaned))
+ // On startup, scan the entire store and inspect orphaned tasks from previous life.
+ orphanedTasks, err = store.FindTasks(readTx, store.ByTaskState(api.TaskStateOrphaned))
if err != nil {
log.G(ctx).WithError(err).Error("failed to find Orphaned tasks in task reaper init")
}
+ removeTasks, err = store.FindTasks(readTx, store.ByDesiredState(api.TaskStateRemove))
+ if err != nil {
+ log.G(ctx).WithError(err).Error("failed to find tasks with desired state REMOVE in task reaper init")
+ }
})
- if len(tasks) > 0 {
- for _, t := range tasks {
- // Do not reap service tasks immediately
+ if len(orphanedTasks)+len(removeTasks) > 0 {
+ for _, t := range orphanedTasks {
+ // Do not reap service tasks immediately.
+ // Let them go through the regular history cleanup process
+ // of checking TaskHistoryRetentionLimit.
if t.ServiceID != "" {
continue
}
- tr.orphaned = append(tr.orphaned, t.ID)
+ // Serviceless tasks can be cleaned up right away since they are not attached to a service.
+ tr.cleanup = append(tr.cleanup, t.ID)
}
-
- if len(tr.orphaned) > 0 {
+ // tasks with desired state REMOVE that have progressed beyond COMPLETE can be cleaned up
+ // right away
+ for _, t := range removeTasks {
+ if t.Status.State >= api.TaskStateCompleted {
+ tr.cleanup = append(tr.cleanup, t.ID)
+ }
+ }
+ // Clean up tasks in 'cleanup' right away
+ if len(tr.cleanup) > 0 {
tr.tick()
}
}
+ // Clean up when we hit TaskHistoryRetentionLimit or when the timer expires,
+ // whichever happens first.
timer := time.NewTimer(reaperBatchingInterval)
+ // Watch for:
+ // 1. EventCreateTask for cleaning slots, which is the best time to cleanup that node/slot.
+ // 2. EventUpdateTask for cleaning
+ // - serviceless orphaned tasks (when orchestrator updates the task status to ORPHANED)
+ // - tasks which have desired state REMOVE and have been shut down by the agent
+ // (these are tasks which are associated with slots removed as part of service
+ // remove or scale down)
+ // 3. EventUpdateCluster for TaskHistoryRetentionLimit update.
for {
select {
case event := <-watcher:
@@ -95,14 +134,21 @@
}] = struct{}{}
case api.EventUpdateTask:
t := v.Task
+ // add serviceless orphaned tasks
if t.Status.State >= api.TaskStateOrphaned && t.ServiceID == "" {
- tr.orphaned = append(tr.orphaned, t.ID)
+ tr.cleanup = append(tr.cleanup, t.ID)
+ }
+ // add tasks that have progressed beyond COMPLETE and have desired state REMOVE. These
+ // tasks are associated with slots that were removed as part of a service scale down
+ // or service removal.
+ if t.DesiredState == api.TaskStateRemove && t.Status.State >= api.TaskStateCompleted {
+ tr.cleanup = append(tr.cleanup, t.ID)
}
case api.EventUpdateCluster:
tr.taskHistory = v.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit
}
- if len(tr.dirty)+len(tr.orphaned) > maxDirty {
+ if len(tr.dirty)+len(tr.cleanup) > maxDirty {
timer.Stop()
tr.tick()
} else {
@@ -118,19 +164,22 @@
}
}
+// tick performs task history cleanup.
func (tr *TaskReaper) tick() {
- if len(tr.dirty) == 0 && len(tr.orphaned) == 0 {
+ if len(tr.dirty) == 0 && len(tr.cleanup) == 0 {
return
}
defer func() {
- tr.orphaned = nil
+ tr.cleanup = nil
}()
deleteTasks := make(map[string]struct{})
- for _, tID := range tr.orphaned {
+ for _, tID := range tr.cleanup {
deleteTasks[tID] = struct{}{}
}
+
+ // Check history of dirty tasks for cleanup.
tr.store.View(func(tx store.ReadTx) {
for dirty := range tr.dirty {
service := store.GetService(tx, dirty.ServiceID)
@@ -141,8 +190,8 @@
taskHistory := tr.taskHistory
// If MaxAttempts is set, keep at least one more than
- // that number of tasks. This is necessary reconstruct
- // restart history when the orchestrator starts up.
+ // that number of tasks (this overrides TaskHistoryRetentionLimit).
+ // This is necessary to reconstruct restart history when the orchestrator starts up.
// TODO(aaronl): Consider hiding tasks beyond the normal
// retention limit in the UI.
// TODO(aaronl): There are some ways to cut down the
@@ -156,6 +205,7 @@
taskHistory = int64(service.Spec.Task.Restart.MaxAttempts) + 1
}
+ // Negative value for TaskHistoryRetentionLimit is an indication to never clean up task history.
if taskHistory < 0 {
continue
}
@@ -164,6 +214,7 @@
switch service.Spec.GetMode().(type) {
case *api.ServiceSpec_Replicated:
+ // Clean out the slot for which we received EventCreateTask.
var err error
historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.ServiceID, dirty.Slot))
if err != nil {
@@ -171,6 +222,7 @@
}
case *api.ServiceSpec_Global:
+ // Clean out the node history in case of global services.
tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.NodeID))
if err != nil {
continue
@@ -215,6 +267,7 @@
}
})
+ // Perform cleanup.
if len(deleteTasks) > 0 {
tr.store.Batch(func(batch *store.Batch) error {
for taskID := range deleteTasks {
@@ -229,6 +282,8 @@
// Stop stops the TaskReaper and waits for the main loop to exit.
func (tr *TaskReaper) Stop() {
+ // TODO(dperny) calling stop on the task reaper twice will cause a panic
+ // because we try to close a channel that will already have been closed.
close(tr.stopChan)
<-tr.doneChan
}
diff --git a/vendor/github.com/docker/swarmkit/manager/resourceapi/allocator.go b/vendor/github.com/docker/swarmkit/manager/resourceapi/allocator.go
index 87b01eb..ec19fba 100644
--- a/vendor/github.com/docker/swarmkit/manager/resourceapi/allocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/resourceapi/allocator.go
@@ -10,8 +10,8 @@
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/protobuf/ptypes"
"golang.org/x/net/context"
- "google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
var (
@@ -50,11 +50,11 @@
}
})
if network == nil {
- return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.Config.Target)
+ return nil, status.Errorf(codes.NotFound, "network %s not found", request.Config.Target)
}
if !network.Spec.Attachable {
- return nil, grpc.Errorf(codes.PermissionDenied, "network %s not manually attachable", request.Config.Target)
+ return nil, status.Errorf(codes.PermissionDenied, "network %s not manually attachable", request.Config.Target)
}
t := &api.Task{
@@ -98,7 +98,7 @@
// - Returns an error if the deletion fails.
func (ra *ResourceAllocator) DetachNetwork(ctx context.Context, request *api.DetachNetworkRequest) (*api.DetachNetworkResponse, error) {
if request.AttachmentID == "" {
- return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
+ return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
nodeInfo, err := ca.RemoteNode(ctx)
@@ -109,10 +109,10 @@
if err := ra.store.Update(func(tx store.Tx) error {
t := store.GetTask(tx, request.AttachmentID)
if t == nil {
- return grpc.Errorf(codes.NotFound, "attachment %s not found", request.AttachmentID)
+ return status.Errorf(codes.NotFound, "attachment %s not found", request.AttachmentID)
}
if t.NodeID != nodeInfo.NodeID {
- return grpc.Errorf(codes.PermissionDenied, "attachment %s doesn't belong to this node", request.AttachmentID)
+ return status.Errorf(codes.PermissionDenied, "attachment %s doesn't belong to this node", request.AttachmentID)
}
return store.DeleteTask(tx, request.AttachmentID)
diff --git a/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go b/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
index 3b1c73f..dab3c66 100644
--- a/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
+++ b/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
@@ -169,7 +169,10 @@
}
}
- if f.t.Spec.LogDriver != nil && f.t.Spec.LogDriver.Name != "none" {
+ // It's possible that the LogDriver object does not carry a name, just some
+ // configuration options. In that case, the plugin filter shouldn't fail to
+ // schedule the task
+ if f.t.Spec.LogDriver != nil && f.t.Spec.LogDriver.Name != "none" && f.t.Spec.LogDriver.Name != "" {
// If there are no log driver types in the list at all, most likely this is
// an older daemon that did not report this information. In this case don't filter
if typeFound, exists := f.pluginExistsOnNode("Log", f.t.Spec.LogDriver.Name, nodePlugins); !exists && typeFound {
@@ -294,6 +297,14 @@
nodePlatform.Architecture = "amd64"
}
+ // normalize "aarch64" architectures to "arm64"
+ if imgPlatform.Architecture == "aarch64" {
+ imgPlatform.Architecture = "arm64"
+ }
+ if nodePlatform.Architecture == "aarch64" {
+ nodePlatform.Architecture = "arm64"
+ }
+
if (imgPlatform.Architecture == "" || imgPlatform.Architecture == nodePlatform.Architecture) && (imgPlatform.OS == "" || imgPlatform.OS == nodePlatform.OS) {
return true
}
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go b/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
index 28c7cfa..56b7c7c 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
@@ -2,6 +2,7 @@
import (
"fmt"
+ "io"
"math"
"math/rand"
"net"
@@ -14,6 +15,7 @@
"github.com/coreos/etcd/raft/raftpb"
"github.com/docker/docker/pkg/signal"
"github.com/docker/go-events"
+ "github.com/docker/go-metrics"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/ca"
"github.com/docker/swarmkit/log"
@@ -34,6 +36,7 @@
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/status"
)
var (
@@ -62,6 +65,9 @@
// work around lint
lostQuorumMessage = "The swarm does not have a leader. It's possible that too few managers are online. Make sure more than half of the managers are online."
errLostQuorum = errors.New(lostQuorumMessage)
+
+ // Timer to capture ProposeValue() latency.
+ proposeLatencyTimer metrics.Timer
)
// LeadershipState indicates whether the node is a leader or follower.
@@ -180,12 +186,9 @@
ClockSource clock.Clock
// SendTimeout is the timeout on the sending messages to other raft
// nodes. Leave this as 0 to get the default value.
- SendTimeout time.Duration
- // LargeSendTimeout is the timeout on the sending snapshots to other raft
- // nodes. Leave this as 0 to get the default value.
- LargeSendTimeout time.Duration
- TLSCredentials credentials.TransportCredentials
- KeyRotator EncryptionKeyRotator
+ SendTimeout time.Duration
+ TLSCredentials credentials.TransportCredentials
+ KeyRotator EncryptionKeyRotator
// DisableStackDump prevents Run from dumping goroutine stacks when the
// store becomes stuck.
DisableStackDump bool
@@ -193,6 +196,9 @@
func init() {
rand.Seed(time.Now().UnixNano())
+ ns := metrics.NewNamespace("swarm", "raft", nil)
+ proposeLatencyTimer = ns.NewTimer("transaction_latency", "Raft transaction latency.")
+ metrics.Register(ns)
}
// NewNode generates a new Raft node
@@ -207,11 +213,6 @@
if opts.SendTimeout == 0 {
opts.SendTimeout = 2 * time.Second
}
- if opts.LargeSendTimeout == 0 {
- // a "slow" 100Mbps connection can send over 240MB data in 20 seconds
- // which is well over the gRPC message limit of 128MB allowed by SwarmKit
- opts.LargeSendTimeout = 20 * time.Second
- }
raftStore := raft.NewMemoryStorage()
@@ -357,7 +358,6 @@
transportConfig := &transport.Config{
HeartbeatInterval: time.Duration(n.Config.ElectionTick) * n.opts.TickInterval,
SendTimeout: n.opts.SendTimeout,
- LargeSendTimeout: n.opts.LargeSendTimeout,
Credentials: n.opts.TLSCredentials,
Raft: n,
}
@@ -664,7 +664,7 @@
if n.snapshotInProgress == nil &&
(n.needsSnapshot(ctx) || raftConfig.SnapshotInterval > 0 &&
n.appliedIndex-n.snapshotMeta.Index >= raftConfig.SnapshotInterval) {
- n.doSnapshot(ctx, raftConfig)
+ n.triggerSnapshot(ctx, raftConfig)
}
if wasLeader && atomic.LoadUint32(&n.signalledLeadership) != 1 {
@@ -706,7 +706,7 @@
// there was a key rotation that took place before while the snapshot
// was in progress - we have to take another snapshot and encrypt with the new key
n.rotationQueued = false
- n.doSnapshot(ctx, raftConfig)
+ n.triggerSnapshot(ctx, raftConfig)
}
case <-n.keyRotator.RotationNotify():
// There are 2 separate checks: rotationQueued, and n.needsSnapshot().
@@ -719,7 +719,7 @@
case n.snapshotInProgress != nil:
n.rotationQueued = true
case n.needsSnapshot(ctx):
- n.doSnapshot(ctx, n.getCurrentRaftConfig())
+ n.triggerSnapshot(ctx, n.getCurrentRaftConfig())
}
case <-ctx.Done():
return nil
@@ -929,11 +929,11 @@
defer n.membershipLock.Unlock()
if !n.IsMember() {
- return nil, grpc.Errorf(codes.FailedPrecondition, "%s", ErrNoRaftMember.Error())
+ return nil, status.Errorf(codes.FailedPrecondition, "%s", ErrNoRaftMember.Error())
}
if !n.isLeader() {
- return nil, grpc.Errorf(codes.FailedPrecondition, "%s", ErrLostLeadership.Error())
+ return nil, status.Errorf(codes.FailedPrecondition, "%s", ErrLostLeadership.Error())
}
remoteAddr := req.Addr
@@ -944,7 +944,7 @@
requestHost, requestPort, err := net.SplitHostPort(remoteAddr)
if err != nil {
- return nil, grpc.Errorf(codes.InvalidArgument, "invalid address %s in raft join request", remoteAddr)
+ return nil, status.Errorf(codes.InvalidArgument, "invalid address %s in raft join request", remoteAddr)
}
requestIP := net.ParseIP(requestHost)
@@ -1118,7 +1118,7 @@
// membership to an active member of the raft
func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResponse, error) {
if req.Node == nil {
- return nil, grpc.Errorf(codes.InvalidArgument, "no node information provided")
+ return nil, status.Errorf(codes.InvalidArgument, "no node information provided")
}
nodeInfo, err := ca.RemoteNode(ctx)
@@ -1302,6 +1302,82 @@
return n.transport.UpdatePeerAddr(id, newAddr)
}
+// StreamRaftMessage is the server endpoint for streaming Raft messages.
+// It accepts a stream of raft messages to be processed on this raft member,
+// returning a StreamRaftMessageResponse when processing of the streamed
+// messages is complete.
+// It is called from the Raft leader, which uses it to stream messages
+// to this raft member.
+// A single stream corresponds to a single raft message,
+// which may be disassembled and streamed by the sender
+// as individual messages. Therefore, each of the messages
+// received by the stream will have the same raft message type and index.
+// Currently, only messages of type raftpb.MsgSnap can be disassembled, sent
+// and received on the stream.
+func (n *Node) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error {
+ // recvdMsg is the current messasge received from the stream.
+ // assembledMessage is where the data from recvdMsg is appended to.
+ var recvdMsg, assembledMessage *api.StreamRaftMessageRequest
+ var err error
+
+ // First message index.
+ var raftMsgIndex uint64
+
+ for {
+ recvdMsg, err = stream.Recv()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ log.G(stream.Context()).WithError(err).Error("error while reading from stream")
+ return err
+ }
+
+ // Initialized the message to be used for assembling
+ // the raft message.
+ if assembledMessage == nil {
+ // For all message types except raftpb.MsgSnap,
+ // we don't expect more than a single message
+ // on the stream so we'll get an EOF on the next Recv()
+ // and go on to process the received message.
+ assembledMessage = recvdMsg
+ raftMsgIndex = recvdMsg.Message.Index
+ continue
+ }
+
+ // Verify raft message index.
+ if recvdMsg.Message.Index != raftMsgIndex {
+ errMsg := fmt.Sprintf("Raft message chunk with index %d is different from the previously received raft message index %d",
+ recvdMsg.Message.Index, raftMsgIndex)
+ log.G(stream.Context()).Errorf(errMsg)
+ return status.Errorf(codes.InvalidArgument, "%s", errMsg)
+ }
+
+ // Verify that multiple message received on a stream
+ // can only be of type raftpb.MsgSnap.
+ if recvdMsg.Message.Type != raftpb.MsgSnap {
+ errMsg := fmt.Sprintf("Raft message chunk is not of type %d",
+ raftpb.MsgSnap)
+ log.G(stream.Context()).Errorf(errMsg)
+ return status.Errorf(codes.InvalidArgument, "%s", errMsg)
+ }
+
+ // Append the received snapshot data.
+ assembledMessage.Message.Snapshot.Data = append(assembledMessage.Message.Snapshot.Data, recvdMsg.Message.Snapshot.Data...)
+ }
+
+ // We should have the complete snapshot. Verify and process.
+ if err == io.EOF {
+ _, err = n.ProcessRaftMessage(stream.Context(), &api.ProcessRaftMessageRequest{Message: assembledMessage.Message})
+ if err == nil {
+ // Translate the response of ProcessRaftMessage() from
+ // ProcessRaftMessageResponse to StreamRaftMessageResponse if needed.
+ return stream.SendAndClose(&api.StreamRaftMessageResponse{})
+ }
+ }
+
+ return err
+}
+
// ProcessRaftMessage calls 'Step' which advances the
// raft state machine with the provided message on the
// receiving node
@@ -1315,7 +1391,7 @@
// a node in the remove set
if n.cluster.IsIDRemoved(msg.Message.From) {
n.processRaftMessageLogger(ctx, msg).Debug("received message from removed member")
- return nil, grpc.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error())
+ return nil, status.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error())
}
ctx, cancel := n.WithContext(ctx)
@@ -1393,7 +1469,7 @@
member := n.cluster.GetMember(msg.RaftID)
if member == nil {
- return nil, grpc.Errorf(codes.NotFound, "member %x not found", msg.RaftID)
+ return nil, status.Errorf(codes.NotFound, "member %x not found", msg.RaftID)
}
return &api.ResolveAddressResponse{Addr: member.Addr}, nil
}
@@ -1497,9 +1573,11 @@
// ProposeValue calls Propose on the underlying raft library(etcd/raft) and waits
// on the commit log action before returning a result
func (n *Node) ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error {
+ defer metrics.StartTimer(proposeLatencyTimer)()
ctx, cancel := n.WithContext(ctx)
defer cancel()
_, err := n.processInternalRaftRequest(ctx, &api.InternalRaftRequest{Action: storeAction}, cb)
+
if err != nil {
return err
}
@@ -1808,12 +1886,13 @@
}
if !n.wait.trigger(r.ID, r) {
- log.G(ctx).Errorf("wait not found for raft request id %x", r.ID)
-
// There was no wait on this ID, meaning we don't have a
// transaction in progress that would be committed to the
- // memory store by the "trigger" call. Either a different node
- // wrote this to raft, or we wrote it before losing the leader
+ // memory store by the "trigger" call. This could mean that:
+ // 1. Startup is in progress, and the raft WAL is being parsed,
+ // processed and applied to the store, or
+ // 2. Either a different node wrote this to raft,
+ // or we wrote it before losing the leader
// position and cancelling the transaction. This entry still needs
// to be committed since other nodes have already committed it.
// Create a new transaction to commit this entry.
@@ -1827,7 +1906,6 @@
err := n.memoryStore.ApplyStoreActions(r.Action)
if err != nil {
log.G(ctx).WithError(err).Error("failed to apply actions from raft")
- // TODO(anshul) return err here ?
}
}
return nil
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/storage.go b/vendor/github.com/docker/swarmkit/manager/state/raft/storage.go
index d12c43a..f538317 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/storage.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/storage.go
@@ -5,6 +5,7 @@
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
+ "github.com/docker/go-metrics"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/encryption"
@@ -15,6 +16,18 @@
"golang.org/x/net/context"
)
+var (
+ // Snapshot create latency timer.
+ snapshotLatencyTimer metrics.Timer
+)
+
+func init() {
+ ns := metrics.NewNamespace("swarm", "raft", nil)
+ snapshotLatencyTimer = ns.NewTimer("snapshot_latency",
+ "Raft snapshot create latency.")
+ metrics.Register(ns)
+}
+
func (n *Node) readFromDisk(ctx context.Context) (*raftpb.Snapshot, storage.WALData, error) {
keys := n.keyRotator.GetKeys()
@@ -169,7 +182,7 @@
return raft.Peer{ID: n.Config.ID, Context: metadata}, nil
}
-func (n *Node) doSnapshot(ctx context.Context, raftConfig api.RaftConfig) {
+func (n *Node) triggerSnapshot(ctx context.Context, raftConfig api.RaftConfig) {
snapshot := api.Snapshot{Version: api.Snapshot_V0}
for _, member := range n.cluster.Members() {
snapshot.Membership.Members = append(snapshot.Membership.Members,
@@ -185,6 +198,9 @@
n.asyncTasks.Add(1)
n.snapshotInProgress = make(chan raftpb.SnapshotMetadata, 1) // buffered in case Shutdown is called during the snapshot
go func(appliedIndex uint64, snapshotMeta raftpb.SnapshotMetadata) {
+ // Deferred latency capture.
+ defer metrics.StartTimer(snapshotLatencyTimer)()
+
defer func() {
n.asyncTasks.Done()
n.snapshotInProgress <- snapshotMeta
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
index 8c7ca75..eb849c0 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
@@ -18,6 +18,11 @@
"github.com/pkg/errors"
)
+const (
+ // GRPCMaxMsgSize is the max allowed gRPC message size for raft messages.
+ GRPCMaxMsgSize = 4 << 20
+)
+
type peer struct {
id uint64
@@ -132,17 +137,112 @@
return resp.Addr, nil
}
-func (p *peer) sendProcessMessage(ctx context.Context, m raftpb.Message) error {
- timeout := p.tr.config.SendTimeout
- // if a snapshot is being sent, set timeout to LargeSendTimeout because
- // sending snapshots can take more time than other messages sent between peers.
- // The same applies to AppendEntries as well, where messages can get large.
- if m.Type == raftpb.MsgSnap || m.Type == raftpb.MsgApp {
- timeout = p.tr.config.LargeSendTimeout
+// Returns the raft message struct size (not including the payload size) for the given raftpb.Message.
+// The payload is typically the snapshot or append entries.
+func raftMessageStructSize(m *raftpb.Message) int {
+ return (&api.ProcessRaftMessageRequest{Message: m}).Size() - len(m.Snapshot.Data)
+}
+
+// Returns the max allowable payload based on MaxRaftMsgSize and
+// the struct size for the given raftpb.Message.
+func raftMessagePayloadSize(m *raftpb.Message) int {
+ return GRPCMaxMsgSize - raftMessageStructSize(m)
+}
+
+// Split a large raft message into smaller messages.
+// Currently this means splitting the []Snapshot.Data into chunks whose size
+// is dictacted by MaxRaftMsgSize.
+func splitSnapshotData(ctx context.Context, m *raftpb.Message) []api.StreamRaftMessageRequest {
+ var messages []api.StreamRaftMessageRequest
+ if m.Type != raftpb.MsgSnap {
+ return messages
}
- ctx, cancel := context.WithTimeout(ctx, timeout)
+
+ // get the size of the data to be split.
+ size := len(m.Snapshot.Data)
+
+ // Get the max payload size.
+ payloadSize := raftMessagePayloadSize(m)
+
+ // split the snapshot into smaller messages.
+ for snapDataIndex := 0; snapDataIndex < size; {
+ chunkSize := size - snapDataIndex
+ if chunkSize > payloadSize {
+ chunkSize = payloadSize
+ }
+
+ raftMsg := *m
+
+ // sub-slice for this snapshot chunk.
+ raftMsg.Snapshot.Data = m.Snapshot.Data[snapDataIndex : snapDataIndex+chunkSize]
+
+ snapDataIndex += chunkSize
+
+ // add message to the list of messages to be sent.
+ msg := api.StreamRaftMessageRequest{Message: &raftMsg}
+ messages = append(messages, msg)
+ }
+
+ return messages
+}
+
+// Function to check if this message needs to be split to be streamed
+// (because it is larger than GRPCMaxMsgSize).
+// Returns true if the message type is MsgSnap
+// and size larger than MaxRaftMsgSize.
+func needsSplitting(m *raftpb.Message) bool {
+ raftMsg := api.ProcessRaftMessageRequest{Message: m}
+ return m.Type == raftpb.MsgSnap && raftMsg.Size() > GRPCMaxMsgSize
+}
+
+func (p *peer) sendProcessMessage(ctx context.Context, m raftpb.Message) error {
+ ctx, cancel := context.WithTimeout(ctx, p.tr.config.SendTimeout)
defer cancel()
- _, err := api.NewRaftClient(p.conn()).ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m})
+
+ var err error
+ var stream api.Raft_StreamRaftMessageClient
+ stream, err = api.NewRaftClient(p.conn()).StreamRaftMessage(ctx)
+
+ if err == nil {
+ // Split the message if needed.
+ // Currently only supported for MsgSnap.
+ var msgs []api.StreamRaftMessageRequest
+ if needsSplitting(&m) {
+ msgs = splitSnapshotData(ctx, &m)
+ } else {
+ raftMsg := api.StreamRaftMessageRequest{Message: &m}
+ msgs = append(msgs, raftMsg)
+ }
+
+ // Stream
+ for _, msg := range msgs {
+ err = stream.Send(&msg)
+ if err != nil {
+ log.G(ctx).WithError(err).Error("error streaming message to peer")
+ stream.CloseAndRecv()
+ break
+ }
+ }
+
+ // Finished sending all the messages.
+ // Close and receive response.
+ if err == nil {
+ _, err = stream.CloseAndRecv()
+
+ if err != nil {
+ log.G(ctx).WithError(err).Error("error receiving response")
+ }
+ }
+ } else {
+ log.G(ctx).WithError(err).Error("error sending message to peer")
+ }
+
+ // Try doing a regular rpc if the receiver doesn't support streaming.
+ if grpc.Code(err) == codes.Unimplemented {
+ _, err = api.NewRaftClient(p.conn()).ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m})
+ }
+
+ // Handle errors.
if grpc.Code(err) == codes.NotFound && grpc.ErrorDesc(err) == membership.ErrMemberRemoved.Error() {
p.tr.config.NodeRemoved()
}
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
index 6f096ef..b741c4a 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
@@ -36,7 +36,6 @@
type Config struct {
HeartbeatInterval time.Duration
SendTimeout time.Duration
- LargeSendTimeout time.Duration
Credentials credentials.TransportCredentials
RaftID string
diff --git a/vendor/github.com/docker/swarmkit/manager/state/store/memory.go b/vendor/github.com/docker/swarmkit/manager/state/store/memory.go
index 01245a6..e64565f 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/store/memory.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/store/memory.go
@@ -11,6 +11,7 @@
"time"
"github.com/docker/go-events"
+ "github.com/docker/go-metrics"
"github.com/docker/swarmkit/api"
pb "github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/state"
@@ -76,8 +77,38 @@
// WedgeTimeout is the maximum amount of time the store lock may be
// held before declaring a suspected deadlock.
WedgeTimeout = 30 * time.Second
+
+ // update()/write tx latency timer.
+ updateLatencyTimer metrics.Timer
+
+ // view()/read tx latency timer.
+ viewLatencyTimer metrics.Timer
+
+ // lookup() latency timer.
+ lookupLatencyTimer metrics.Timer
+
+ // Batch() latency timer.
+ batchLatencyTimer metrics.Timer
+
+ // timer to capture the duration for which the memory store mutex is locked.
+ storeLockDurationTimer metrics.Timer
)
+func init() {
+ ns := metrics.NewNamespace("swarm", "store", nil)
+ updateLatencyTimer = ns.NewTimer("write_tx_latency",
+ "Raft store write tx latency.")
+ viewLatencyTimer = ns.NewTimer("read_tx_latency",
+ "Raft store read tx latency.")
+ lookupLatencyTimer = ns.NewTimer("lookup_latency",
+ "Raft store read latency.")
+ batchLatencyTimer = ns.NewTimer("batch_latency",
+ "Raft store batch latency.")
+ storeLockDurationTimer = ns.NewTimer("memory_store_lock_duration",
+ "Duration for which the raft memory store lock was held.")
+ metrics.Register(ns)
+}
+
func register(os ObjectStoreConfig) {
objectStorers = append(objectStorers, os)
schema.Tables[os.Table.Name] = os.Table
@@ -94,8 +125,13 @@
m.lockedAt.Store(time.Now())
}
+// Unlocks the timedMutex and captures the duration
+// for which it was locked in a metric.
func (m *timedMutex) Unlock() {
+ unlockedTimestamp := m.lockedAt.Load()
m.Mutex.Unlock()
+ lockedFor := time.Since(unlockedTimestamp.(time.Time))
+ storeLockDurationTimer.Update(lockedFor)
m.lockedAt.Store(time.Time{})
}
@@ -184,6 +220,7 @@
// View executes a read transaction.
func (s *MemoryStore) View(cb func(ReadTx)) {
+ defer metrics.StartTimer(viewLatencyTimer)()
memDBTx := s.memDB.Txn(false)
readTx := readTx{
@@ -280,6 +317,7 @@
}
func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error {
+ defer metrics.StartTimer(updateLatencyTimer)()
s.updateLock.Lock()
memDBTx := s.memDB.Txn(true)
@@ -329,7 +367,6 @@
}
s.updateLock.Unlock()
return err
-
}
func (s *MemoryStore) updateLocal(cb func(Tx) error) error {
@@ -458,6 +495,7 @@
// If Batch returns an error, no guarantees are made about how many updates
// were committed successfully.
func (s *MemoryStore) Batch(cb func(*Batch) error) error {
+ defer metrics.StartTimer(batchLatencyTimer)()
s.updateLock.Lock()
batch := Batch{
@@ -498,6 +536,7 @@
// lookup is an internal typed wrapper around memdb.
func (tx readTx) lookup(table, index, id string) api.StoreObject {
+ defer metrics.StartTimer(lookupLatencyTimer)()
j, err := tx.memDBTx.First(table, index, id)
if err != nil {
return nil
diff --git a/vendor/github.com/docker/swarmkit/manager/watchapi/watch.go b/vendor/github.com/docker/swarmkit/manager/watchapi/watch.go
index 53bed49..223dcb5 100644
--- a/vendor/github.com/docker/swarmkit/manager/watchapi/watch.go
+++ b/vendor/github.com/docker/swarmkit/manager/watchapi/watch.go
@@ -1,12 +1,11 @@
package watchapi
import (
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
-
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/manager/state"
"github.com/docker/swarmkit/manager/state/store"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// Watch starts a stream that returns any changes to objects that match
@@ -26,7 +25,7 @@
watchArgs, err := api.ConvertWatchArgs(request.Entries)
if err != nil {
- return grpc.Errorf(codes.InvalidArgument, "%s", err.Error())
+ return status.Errorf(codes.InvalidArgument, "%s", err.Error())
}
watchArgs = append(watchArgs, state.EventCommit{})
diff --git a/vendor/github.com/fluent/fluent-logger-golang/README.md b/vendor/github.com/fluent/fluent-logger-golang/README.md
index a6b9902..cbb8bdc 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/README.md
+++ b/vendor/github.com/fluent/fluent-logger-golang/README.md
@@ -21,7 +21,7 @@
GoDoc: http://godoc.org/github.com/fluent/fluent-logger-golang/fluent
-##Example
+## Example
```go
package main
@@ -44,14 +44,14 @@
"hoge": "hoge",
}
error := logger.Post(tag, data)
- // error := logger.Post(tag, time.Time.Now(), data)
+ // error := logger.PostWithTime(tag, time.Now(), data)
if error != nil {
panic(error)
}
}
```
-`data` must be a value like `map[string]literal`, `map[string]interface{}` or `struct`. Logger refers tags `msg` or `codec` of each fields of structs.
+`data` must be a value like `map[string]literal`, `map[string]interface{}`, `struct` or [`msgp.Marshaler`](http://godoc.org/github.com/tinylib/msgp/msgp#Marshaler). Logger refers tags `msg` or `codec` of each fields of structs.
## Setting config values
@@ -59,6 +59,11 @@
f := fluent.New(fluent.Config{FluentPort: 80, FluentHost: "example.com"})
```
+### WriteTimeout
+
+Sets the timeout for Write call of logger.Post.
+Since the default is zero value, Write will not time out.
+
## Tests
```
go test
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
index 655f623..4693c5c 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
@@ -4,13 +4,14 @@
"encoding/json"
"errors"
"fmt"
- "io"
"math"
"net"
"reflect"
"strconv"
"sync"
"time"
+
+ "github.com/tinylib/msgp/msgp"
)
const (
@@ -19,10 +20,14 @@
defaultSocketPath = ""
defaultPort = 24224
defaultTimeout = 3 * time.Second
+ defaultWriteTimeout = time.Duration(0) // Write() will not time out
defaultBufferLimit = 8 * 1024 * 1024
defaultRetryWait = 500
defaultMaxRetry = 13
defaultReconnectWaitIncreRate = 1.5
+ // Default sub-second precision value to false since it is only compatible
+ // with fluentd versions v0.14 and above.
+ defaultSubSecondPrecision = false
)
type Config struct {
@@ -31,12 +36,17 @@
FluentNetwork string `json:"fluent_network"`
FluentSocketPath string `json:"fluent_socket_path"`
Timeout time.Duration `json:"timeout"`
+ WriteTimeout time.Duration `json:"write_timeout"`
BufferLimit int `json:"buffer_limit"`
RetryWait int `json:"retry_wait"`
MaxRetry int `json:"max_retry"`
TagPrefix string `json:"tag_prefix"`
AsyncConnect bool `json:"async_connect"`
MarshalAsJSON bool `json:"marshal_as_json"`
+
+ // Sub-second precision timestamps are only possible for those using fluentd
+ // v0.14+ and serializing their messages with msgpack.
+ SubSecondPrecision bool `json:"sub_second_precision"`
}
type Fluent struct {
@@ -46,7 +56,7 @@
pending []byte
muconn sync.Mutex
- conn io.WriteCloser
+ conn net.Conn
reconnecting bool
}
@@ -67,6 +77,9 @@
if config.Timeout == 0 {
config.Timeout = defaultTimeout
}
+ if config.WriteTimeout == 0 {
+ config.WriteTimeout = defaultWriteTimeout
+ }
if config.BufferLimit == 0 {
config.BufferLimit = defaultBufferLimit
}
@@ -90,9 +103,6 @@
//
// Examples:
//
-// // send string
-// f.Post("tag_name", "data")
-//
// // send map[string]
// mapStringData := map[string]string{
// "foo": "bar",
@@ -124,6 +134,10 @@
tag = f.TagPrefix + "." + tag
}
+ if m, ok := message.(msgp.Marshaler); ok {
+ return f.EncodeAndPostData(tag, tm, m)
+ }
+
msg := reflect.ValueOf(message)
msgtype := msg.Type()
@@ -203,6 +217,9 @@
msg := Message{Tag: tag, Time: timeUnix, Record: message}
chunk := &MessageChunk{message: msg}
data, err = json.Marshal(chunk)
+ } else if f.Config.SubSecondPrecision {
+ msg := &MessageExt{Tag: tag, Time: EventTime(tm), Record: message}
+ data, err = msg.MarshalMsg(nil)
} else {
msg := &Message{Tag: tag, Time: timeUnix, Record: message}
data, err = msg.MarshalMsg(nil)
@@ -297,6 +314,12 @@
var err error
if len(f.pending) > 0 {
+ t := f.Config.WriteTimeout
+ if time.Duration(0) < t {
+ f.conn.SetWriteDeadline(time.Now().Add(t))
+ } else {
+ f.conn.SetWriteDeadline(time.Time{})
+ }
_, err = f.conn.Write(f.pending)
if err != nil {
f.conn.Close()
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
index 268d614..158e22d 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
@@ -2,6 +2,12 @@
package fluent
+import (
+ "time"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
//msgp:tuple Entry
type Entry struct {
Time int64 `msg:"time"`
@@ -22,3 +28,69 @@
Record interface{} `msg:"record"`
Option interface{} `msg:"option"`
}
+
+//msgp:tuple MessageExt
+type MessageExt struct {
+ Tag string `msg:"tag"`
+ Time EventTime `msg:"time,extension"`
+ Record interface{} `msg:"record"`
+ Option interface{} `msg:"option"`
+}
+
+// EventTime is an extension to the serialized time value. It builds in support
+// for sub-second (nanosecond) precision in serialized timestamps.
+//
+// You can find the full specification for the msgpack message payload here:
+// https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1.
+//
+// You can find more information on msgpack extension types here:
+// https://github.com/tinylib/msgp/wiki/Using-Extensions.
+type EventTime time.Time
+
+const (
+ extensionType = 0
+ length = 8
+)
+
+func init() {
+ msgp.RegisterExtension(extensionType, func() msgp.Extension { return new(EventTime) })
+}
+
+func (t *EventTime) ExtensionType() int8 { return extensionType }
+
+func (t *EventTime) Len() int { return length }
+
+func (t *EventTime) MarshalBinaryTo(b []byte) error {
+ // Unwrap to Golang time
+ goTime := time.Time(*t)
+
+ // There's no support for timezones in fluentd's protocol for EventTime.
+ // Convert to UTC.
+ utc := goTime.UTC()
+
+ // Warning! Converting seconds to an int32 is a lossy operation. This code
+ // will hit the "Year 2038" problem.
+ sec := int32(utc.Unix())
+ nsec := utc.Nanosecond()
+
+ // Fill the buffer with 4 bytes for the second component of the timestamp.
+ b[0] = byte(sec >> 24)
+ b[1] = byte(sec >> 16)
+ b[2] = byte(sec >> 8)
+ b[3] = byte(sec)
+
+ // Fill the buffer with 4 bytes for the nanosecond component of the
+ // timestamp.
+ b[4] = byte(nsec >> 24)
+ b[5] = byte(nsec >> 16)
+ b[6] = byte(nsec >> 8)
+ b[7] = byte(nsec)
+
+ return nil
+}
+
+// UnmarshalBinary is not implemented since decoding messages is not supported
+// by this library.
+func (t *EventTime) UnmarshalBinary(b []byte) error {
+ return nil
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
index afb9d6d..5b88a68 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
@@ -10,13 +10,13 @@
// DecodeMsg implements msgp.Decodable
func (z *Entry) DecodeMsg(dc *msgp.Reader) (err error) {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ var zxvk uint32
+ zxvk, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
+ if zxvk != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zxvk}
return
}
z.Time, err = dc.ReadInt64()
@@ -32,9 +32,10 @@
// EncodeMsg implements msgp.Encodable
func (z Entry) EncodeMsg(en *msgp.Writer) (err error) {
- err = en.WriteArrayHeader(2)
+ // array header, size 2
+ err = en.Append(0x92)
if err != nil {
- return
+ return err
}
err = en.WriteInt64(z.Time)
if err != nil {
@@ -50,7 +51,8 @@
// MarshalMsg implements msgp.Marshaler
func (z Entry) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendArrayHeader(o, 2)
+ // array header, size 2
+ o = append(o, 0x92)
o = msgp.AppendInt64(o, z.Time)
o, err = msgp.AppendIntf(o, z.Record)
if err != nil {
@@ -61,16 +63,14 @@
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Entry) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
- return
- }
+ var zbzg uint32
+ zbzg, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zbzg != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zbzg}
+ return
}
z.Time, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
@@ -84,51 +84,52 @@
return
}
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Entry) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Record)
+ s = 1 + msgp.Int64Size + msgp.GuessSize(z.Record)
return
}
// DecodeMsg implements msgp.Decodable
func (z *Forward) DecodeMsg(dc *msgp.Reader) (err error) {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ var zcmr uint32
+ zcmr, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 3 {
- err = msgp.ArrayError{Wanted: 3, Got: ssz}
+ if zcmr != 3 {
+ err = msgp.ArrayError{Wanted: 3, Got: zcmr}
return
}
z.Tag, err = dc.ReadString()
if err != nil {
return
}
- var xsz uint32
- xsz, err = dc.ReadArrayHeader()
+ var zajw uint32
+ zajw, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if cap(z.Entries) >= int(xsz) {
- z.Entries = z.Entries[:xsz]
+ if cap(z.Entries) >= int(zajw) {
+ z.Entries = (z.Entries)[:zajw]
} else {
- z.Entries = make([]Entry, xsz)
+ z.Entries = make([]Entry, zajw)
}
- for xvk := range z.Entries {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ for zbai := range z.Entries {
+ var zwht uint32
+ zwht, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
+ if zwht != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zwht}
return
}
- z.Entries[xvk].Time, err = dc.ReadInt64()
+ z.Entries[zbai].Time, err = dc.ReadInt64()
if err != nil {
return
}
- z.Entries[xvk].Record, err = dc.ReadIntf()
+ z.Entries[zbai].Record, err = dc.ReadIntf()
if err != nil {
return
}
@@ -142,9 +143,10 @@
// EncodeMsg implements msgp.Encodable
func (z *Forward) EncodeMsg(en *msgp.Writer) (err error) {
- err = en.WriteArrayHeader(3)
+ // array header, size 3
+ err = en.Append(0x93)
if err != nil {
- return
+ return err
}
err = en.WriteString(z.Tag)
if err != nil {
@@ -154,16 +156,17 @@
if err != nil {
return
}
- for xvk := range z.Entries {
- err = en.WriteArrayHeader(2)
+ for zbai := range z.Entries {
+ // array header, size 2
+ err = en.Append(0x92)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.Entries[zbai].Time)
if err != nil {
return
}
- err = en.WriteInt64(z.Entries[xvk].Time)
- if err != nil {
- return
- }
- err = en.WriteIntf(z.Entries[xvk].Record)
+ err = en.WriteIntf(z.Entries[zbai].Record)
if err != nil {
return
}
@@ -178,13 +181,15 @@
// MarshalMsg implements msgp.Marshaler
func (z *Forward) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendArrayHeader(o, 3)
+ // array header, size 3
+ o = append(o, 0x93)
o = msgp.AppendString(o, z.Tag)
o = msgp.AppendArrayHeader(o, uint32(len(z.Entries)))
- for xvk := range z.Entries {
- o = msgp.AppendArrayHeader(o, 2)
- o = msgp.AppendInt64(o, z.Entries[xvk].Time)
- o, err = msgp.AppendIntf(o, z.Entries[xvk].Record)
+ for zbai := range z.Entries {
+ // array header, size 2
+ o = append(o, 0x92)
+ o = msgp.AppendInt64(o, z.Entries[zbai].Time)
+ o, err = msgp.AppendIntf(o, z.Entries[zbai].Record)
if err != nil {
return
}
@@ -198,48 +203,44 @@
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Forward) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 3 {
- err = msgp.ArrayError{Wanted: 3, Got: ssz}
- return
- }
+ var zhct uint32
+ zhct, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zhct != 3 {
+ err = msgp.ArrayError{Wanted: 3, Got: zhct}
+ return
}
z.Tag, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
- var xsz uint32
- xsz, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zcua uint32
+ zcua, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
- if cap(z.Entries) >= int(xsz) {
- z.Entries = z.Entries[:xsz]
+ if cap(z.Entries) >= int(zcua) {
+ z.Entries = (z.Entries)[:zcua]
} else {
- z.Entries = make([]Entry, xsz)
+ z.Entries = make([]Entry, zcua)
}
- for xvk := range z.Entries {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
- return
- }
- }
- z.Entries[xvk].Time, bts, err = msgp.ReadInt64Bytes(bts)
+ for zbai := range z.Entries {
+ var zxhx uint32
+ zxhx, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
- z.Entries[xvk].Record, bts, err = msgp.ReadIntfBytes(bts)
+ if zxhx != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zxhx}
+ return
+ }
+ z.Entries[zbai].Time, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ z.Entries[zbai].Record, bts, err = msgp.ReadIntfBytes(bts)
if err != nil {
return
}
@@ -252,10 +253,11 @@
return
}
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Forward) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize
- for xvk := range z.Entries {
- s += msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Entries[xvk].Record)
+ s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize
+ for zbai := range z.Entries {
+ s += 1 + msgp.Int64Size + msgp.GuessSize(z.Entries[zbai].Record)
}
s += msgp.GuessSize(z.Option)
return
@@ -263,13 +265,13 @@
// DecodeMsg implements msgp.Decodable
func (z *Message) DecodeMsg(dc *msgp.Reader) (err error) {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ var zlqf uint32
+ zlqf, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 4 {
- err = msgp.ArrayError{Wanted: 4, Got: ssz}
+ if zlqf != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zlqf}
return
}
z.Tag, err = dc.ReadString()
@@ -293,9 +295,10 @@
// EncodeMsg implements msgp.Encodable
func (z *Message) EncodeMsg(en *msgp.Writer) (err error) {
- err = en.WriteArrayHeader(4)
+ // array header, size 4
+ err = en.Append(0x94)
if err != nil {
- return
+ return err
}
err = en.WriteString(z.Tag)
if err != nil {
@@ -319,7 +322,8 @@
// MarshalMsg implements msgp.Marshaler
func (z *Message) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendArrayHeader(o, 4)
+ // array header, size 4
+ o = append(o, 0x94)
o = msgp.AppendString(o, z.Tag)
o = msgp.AppendInt64(o, z.Time)
o, err = msgp.AppendIntf(o, z.Record)
@@ -335,16 +339,14 @@
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 4 {
- err = msgp.ArrayError{Wanted: 4, Got: ssz}
- return
- }
+ var zdaf uint32
+ zdaf, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zdaf != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zdaf}
+ return
}
z.Tag, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
@@ -366,7 +368,122 @@
return
}
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Message) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
+ s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *MessageExt) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zpks uint32
+ zpks, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if zpks != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zpks}
+ return
+ }
+ z.Tag, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ err = dc.ReadExtension(&z.Time)
+ if err != nil {
+ return
+ }
+ z.Record, err = dc.ReadIntf()
+ if err != nil {
+ return
+ }
+ z.Option, err = dc.ReadIntf()
+ if err != nil {
+ return
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *MessageExt) EncodeMsg(en *msgp.Writer) (err error) {
+ // array header, size 4
+ err = en.Append(0x94)
+ if err != nil {
+ return err
+ }
+ err = en.WriteString(z.Tag)
+ if err != nil {
+ return
+ }
+ err = en.WriteExtension(&z.Time)
+ if err != nil {
+ return
+ }
+ err = en.WriteIntf(z.Record)
+ if err != nil {
+ return
+ }
+ err = en.WriteIntf(z.Option)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *MessageExt) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // array header, size 4
+ o = append(o, 0x94)
+ o = msgp.AppendString(o, z.Tag)
+ o, err = msgp.AppendExtension(o, &z.Time)
+ if err != nil {
+ return
+ }
+ o, err = msgp.AppendIntf(o, z.Record)
+ if err != nil {
+ return
+ }
+ o, err = msgp.AppendIntf(o, z.Option)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *MessageExt) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zjfb uint32
+ zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zjfb != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zjfb}
+ return
+ }
+ z.Tag, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ bts, err = msgp.ReadExtensionBytes(bts, &z.Time)
+ if err != nil {
+ return
+ }
+ z.Record, bts, err = msgp.ReadIntfBytes(bts)
+ if err != nil {
+ return
+ }
+ z.Option, bts, err = msgp.ReadIntfBytes(bts)
+ if err != nil {
+ return
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *MessageExt) Msgsize() (s int) {
+ s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.ExtensionPrefixSize + z.Time.Len() + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
return
}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go
new file mode 100644
index 0000000..dcf5baa
--- /dev/null
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go
@@ -0,0 +1,7 @@
+package fluent
+
+//go:generate msgp
+type TestMessage struct {
+ Foo string `msg:"foo" json:"foo,omitempty"`
+ Hoge string `msg:"hoge" json:"hoge,omitempty"`
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go
new file mode 100644
index 0000000..17a45e2
--- /dev/null
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go
@@ -0,0 +1,125 @@
+package fluent
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *TestMessage) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zxvk uint32
+ zxvk, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zxvk > 0 {
+ zxvk--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "foo":
+ z.Foo, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "hoge":
+ z.Hoge, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z TestMessage) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "foo"
+ err = en.Append(0x82, 0xa3, 0x66, 0x6f, 0x6f)
+ if err != nil {
+ return err
+ }
+ err = en.WriteString(z.Foo)
+ if err != nil {
+ return
+ }
+ // write "hoge"
+ err = en.Append(0xa4, 0x68, 0x6f, 0x67, 0x65)
+ if err != nil {
+ return err
+ }
+ err = en.WriteString(z.Hoge)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z TestMessage) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "foo"
+ o = append(o, 0x82, 0xa3, 0x66, 0x6f, 0x6f)
+ o = msgp.AppendString(o, z.Foo)
+ // string "hoge"
+ o = append(o, 0xa4, 0x68, 0x6f, 0x67, 0x65)
+ o = msgp.AppendString(o, z.Hoge)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *TestMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zbzg uint32
+ zbzg, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zbzg > 0 {
+ zbzg--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "foo":
+ z.Foo, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "hoge":
+ z.Hoge, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z TestMessage) Msgsize() (s int) {
+ s = 1 + 4 + msgp.StringPrefixSize + len(z.Foo) + 5 + msgp.StringPrefixSize + len(z.Hoge)
+ return
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
index 8904726..c6ec7e4 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
@@ -1,3 +1,3 @@
package fluent
-const Version = "1.2.1"
+const Version = "1.3.0"
diff --git a/vendor/github.com/stevvooe/ttrpc/config.go b/vendor/github.com/stevvooe/ttrpc/config.go
new file mode 100644
index 0000000..23bc603
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/config.go
@@ -0,0 +1,23 @@
+package ttrpc
+
+import "github.com/pkg/errors"
+
+type serverConfig struct {
+ handshaker Handshaker
+}
+
+type ServerOpt func(*serverConfig) error
+
+// WithServerHandshaker can be passed to NewServer to ensure that the
+// handshaker is called before every connection attempt.
+//
+// Only one handshaker is allowed per server.
+func WithServerHandshaker(handshaker Handshaker) ServerOpt {
+ return func(c *serverConfig) error {
+ if c.handshaker != nil {
+ return errors.New("only one handshaker allowed per server")
+ }
+ c.handshaker = handshaker
+ return nil
+ }
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/handshake.go b/vendor/github.com/stevvooe/ttrpc/handshake.go
new file mode 100644
index 0000000..a08ae8e
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/handshake.go
@@ -0,0 +1,34 @@
+package ttrpc
+
+import (
+ "context"
+ "net"
+)
+
+// Handshaker defines the interface for connection handshakes performed on the
+// server or client when first connecting.
+type Handshaker interface {
+ // Handshake should confirm or decorate a connection that may be incoming
+ // to a server or outgoing from a client.
+ //
+ // If this returns without an error, the caller should use the connection
+ // in place of the original connection.
+ //
+ // The second return value can contain credential specific data, such as
+ // unix socket credentials or TLS information.
+ //
+ // While we currently only have implementations on the server-side, this
+ // interface should be sufficient to implement similar handshakes on the
+ // client-side.
+ Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
+}
+
+type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
+
+func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+ return fn(ctx, conn)
+}
+
+func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+ return conn, nil, nil
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/server.go b/vendor/github.com/stevvooe/ttrpc/server.go
index ed2d14c..edfca0c 100644
--- a/vendor/github.com/stevvooe/ttrpc/server.go
+++ b/vendor/github.com/stevvooe/ttrpc/server.go
@@ -2,6 +2,7 @@
import (
"context"
+ "io"
"math/rand"
"net"
"sync"
@@ -19,6 +20,7 @@
)
type Server struct {
+ config *serverConfig
services *serviceSet
codec codec
@@ -28,13 +30,21 @@
done chan struct{} // marks point at which we stop serving requests
}
-func NewServer() *Server {
+func NewServer(opts ...ServerOpt) (*Server, error) {
+ config := &serverConfig{}
+ for _, opt := range opts {
+ if err := opt(config); err != nil {
+ return nil, err
+ }
+ }
+
return &Server{
+ config: config,
services: newServiceSet(),
done: make(chan struct{}),
listeners: make(map[net.Listener]struct{}),
connections: make(map[*serverConn]struct{}),
- }
+ }, nil
}
func (s *Server) Register(name string, methods map[string]Method) {
@@ -46,10 +56,15 @@
defer s.closeListener(l)
var (
- ctx = context.Background()
- backoff time.Duration
+ ctx = context.Background()
+ backoff time.Duration
+ handshaker = s.config.handshaker
)
+ if handshaker == nil {
+ handshaker = handshakerFunc(noopHandshake)
+ }
+
for {
conn, err := l.Accept()
if err != nil {
@@ -82,7 +97,15 @@
}
backoff = 0
- sc := s.newConn(conn)
+
+ approved, handshake, err := handshaker.Handshake(ctx, conn)
+ if err != nil {
+ log.L.WithError(err).Errorf("ttrpc: refusing connection after handshake")
+ conn.Close()
+ continue
+ }
+
+ sc := s.newConn(approved, handshake)
go sc.run(ctx)
}
}
@@ -205,11 +228,12 @@
}
}
-func (s *Server) newConn(conn net.Conn) *serverConn {
+func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
c := &serverConn{
- server: s,
- conn: conn,
- shutdown: make(chan struct{}),
+ server: s,
+ conn: conn,
+ handshake: handshake,
+ shutdown: make(chan struct{}),
}
c.setState(connStateIdle)
s.addConnection(c)
@@ -217,9 +241,10 @@
}
type serverConn struct {
- server *Server
- conn net.Conn
- state atomic.Value
+ server *Server
+ conn net.Conn
+ handshake interface{} // data from handshake, not used for now
+ state atomic.Value
shutdownOnce sync.Once
shutdown chan struct{} // forced shutdown, used by close
@@ -406,7 +431,7 @@
// branch. Basically, it means that we are no longer receiving
// requests due to a terminal error.
recvErr = nil // connection is now "closing"
- if err != nil {
+ if err != nil && err != io.EOF {
log.L.WithError(err).Error("error receiving message")
}
case <-shutdown:
diff --git a/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go
new file mode 100644
index 0000000..812d927
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go
@@ -0,0 +1,92 @@
+package ttrpc
+
+import (
+ "context"
+ "net"
+ "os"
+ "syscall"
+
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+type UnixCredentialsFunc func(*unix.Ucred) error
+
+func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+ uc, err := requireUnixSocket(conn)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket")
+ }
+
+ rs, err := uc.SyscallConn()
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed")
+ }
+ var (
+ ucred *unix.Ucred
+ ucredErr error
+ )
+ if err := rs.Control(func(fd uintptr) {
+ ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED)
+ }); err != nil {
+ return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed")
+ }
+
+ if ucredErr != nil {
+ return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials")
+ }
+
+ if err := fn(ucred); err != nil {
+ return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed")
+ }
+
+ return uc, ucred, nil
+}
+
+// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID.
+//
+// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an
+// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001.
+// So calling this function with uid=0 allows a connection from effective UID 0 but rejects
+// a connection from effective UID 1001.
+//
+// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)."
+func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc {
+ return func(ucred *unix.Ucred) error {
+ return requireUidGid(ucred, uid, gid)
+ }
+}
+
+func UnixSocketRequireRoot() UnixCredentialsFunc {
+ return UnixSocketRequireUidGid(0, 0)
+}
+
+// UnixSocketRequireSameUser resolves the current effective unix user and returns a
+// UnixCredentialsFunc that will validate incoming unix connections against the
+// current credentials.
+//
+// This is useful when using abstract sockets that are accessible by all users.
+func UnixSocketRequireSameUser() UnixCredentialsFunc {
+ euid, egid := os.Geteuid(), os.Getegid()
+ return UnixSocketRequireUidGid(euid, egid)
+}
+
+func requireRoot(ucred *unix.Ucred) error {
+ return requireUidGid(ucred, 0, 0)
+}
+
+func requireUidGid(ucred *unix.Ucred, uid, gid int) error {
+ if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) {
+ return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials")
+ }
+ return nil
+}
+
+func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) {
+ uc, ok := conn.(*net.UnixConn)
+ if !ok {
+ return nil, errors.New("a unix socket connection is required")
+ }
+
+ return uc, nil
+}
diff --git a/vendor/github.com/tinylib/msgp/README.md b/vendor/github.com/tinylib/msgp/README.md
index a7cc849..1328cca 100644
--- a/vendor/github.com/tinylib/msgp/README.md
+++ b/vendor/github.com/tinylib/msgp/README.md
@@ -1,15 +1,12 @@
MessagePack Code Generator [![Build Status](https://travis-ci.org/tinylib/msgp.svg?branch=master)](https://travis-ci.org/tinylib/msgp)
=======
-[![forthebadge](http://forthebadge.com/badges/uses-badges.svg)](http://forthebadge.com)
-[![forthebadge](http://forthebadge.com/badges/ages-12.svg)](http://forthebadge.com)
-
-This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). It is targeted at the `go generate` [tool](http://tip.golang.org/cmd/go/#hdr-Generate_Go_files_by_processing_source). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
+This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
### Why?
- Use Go as your schema language
-- Speeeeeed (400MB/s on modern hardware)
+- Performance
- [JSON interop](http://godoc.org/github.com/tinylib/msgp/msgp#CopyToJSON)
- [User-defined extensions](http://github.com/tinylib/msgp/wiki/Using-Extensions)
- Type safety
@@ -17,8 +14,6 @@
### Quickstart
-Note: you need at least go 1.3 to compile this package, and at least go 1.4 to use `go generate`.
-
In a source file, include the following directive:
```go
@@ -45,7 +40,7 @@
By default, the code generator will satisfy `msgp.Sizer`, `msgp.Encodable`, `msgp.Decodable`,
`msgp.Marshaler`, and `msgp.Unmarshaler`. Carefully-designed applications can use these methods to do
-marshalling/unmarshalling with zero allocations.
+marshalling/unmarshalling with zero heap allocations.
While `msgp.Marshaler` and `msgp.Unmarshaler` are quite similar to the standard library's
`json.Marshaler` and `json.Unmarshaler`, `msgp.Encodable` and `msgp.Decodable` are useful for
@@ -62,6 +57,7 @@
- Generation of both `[]byte`-oriented and `io.Reader/io.Writer`-oriented methods
- Support for arbitrary type system extensions
- [Preprocessor directives](http://github.com/tinylib/msgp/wiki/Preprocessor-Directives)
+ - File-based dependency model means fast codegen regardless of source tree size.
Consider the following:
```go
@@ -84,21 +80,23 @@
### Status
-Alpha. I _will_ break stuff. There is an open milestone for Beta stability (targeted for January.) Only the `/msgp` sub-directory will have a stability guarantee.
+Mostly stable, in that no breaking changes have been made to the `/msgp` library in more than a year. Newer versions
+of the code may generate different code than older versions for performance reasons. I (@philhofer) am aware of a
+number of stability-critical commercial applications that use this code with good results. But, caveat emptor.
You can read more about how `msgp` maps MessagePack types onto Go types [in the wiki](http://github.com/tinylib/msgp/wiki).
Here some of the known limitations/restrictions:
- - Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
- - Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
- - Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
- - _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
+- Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
+- Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
+- Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
+- _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
If the output compiles, then there's a pretty good chance things are fine. (Plus, we generate tests for you.) *Please, please, please* file an issue if you think the generator is writing broken code.
### Performance
-If you like benchmarks, see [here.](https://github.com/alecthomas/go_serialization_benchmarks)
+If you like benchmarks, see [here](http://bravenewgeek.com/so-you-wanna-go-fast/) and [here](https://github.com/alecthomas/go_serialization_benchmarks).
-As one might expect, the generated methods that deal with `[]byte` are faster, but the `io.Reader/Writer` methods are generally more memory-efficient for large (> 2KB) objects.
+As one might expect, the generated methods that deal with `[]byte` are faster for small objects, but the `io.Reader/Writer` methods are generally more memory-efficient (and, at some point, faster) for large (> 2KB) objects.
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
new file mode 100644
index 0000000..6c6bb37
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
@@ -0,0 +1,24 @@
+// +build linux,!appengine
+
+package msgp
+
+import (
+ "os"
+ "syscall"
+)
+
+func adviseRead(mem []byte) {
+ syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
+}
+
+func adviseWrite(mem []byte) {
+ syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
+}
+
+func fallocate(f *os.File, sz int64) error {
+ err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
+ if err == syscall.ENOTSUP {
+ return f.Truncate(sz)
+ }
+ return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
new file mode 100644
index 0000000..da65ea5
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
@@ -0,0 +1,17 @@
+// +build !linux appengine
+
+package msgp
+
+import (
+ "os"
+)
+
+// TODO: darwin, BSD support
+
+func adviseRead(mem []byte) {}
+
+func adviseWrite(mem []byte) {}
+
+func fallocate(f *os.File, sz int64) error {
+ return f.Truncate(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/appengine.go b/vendor/github.com/tinylib/msgp/msgp/appengine.go
new file mode 100644
index 0000000..bff9e76
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package msgp
+
+// let's just assume appengine
+// uses 64-bit hardware...
+const smallint = false
+
+func UnsafeString(b []byte) string {
+ return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go
index 35583ba..a0434c7 100644
--- a/vendor/github.com/tinylib/msgp/msgp/circular.go
+++ b/vendor/github.com/tinylib/msgp/msgp/circular.go
@@ -1,20 +1,21 @@
package msgp
-import (
- "testing"
-)
+type timer interface {
+ StartTimer()
+ StopTimer()
+}
// EndlessReader is an io.Reader
// that loops over the same data
// endlessly. It is used for benchmarking.
type EndlessReader struct {
- tb *testing.B
+ tb timer
data []byte
offset int
}
// NewEndlessReader returns a new endless reader
-func NewEndlessReader(b []byte, tb *testing.B) *EndlessReader {
+func NewEndlessReader(b []byte, tb timer) *EndlessReader {
return &EndlessReader{tb: tb, data: b, offset: 0}
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go
index 32a0ada..588b18f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/extension.go
+++ b/vendor/github.com/tinylib/msgp/msgp/extension.go
@@ -226,7 +226,7 @@
// peek at the extension type, assuming the next
// kind to be read is Extension
func (m *Reader) peekExtensionType() (int8, error) {
- p, err := m.r.Peek(2)
+ p, err := m.R.Peek(2)
if err != nil {
return 0, err
}
@@ -238,7 +238,7 @@
return int8(p[1]), nil
}
size := spec.size
- p, err = m.r.Peek(int(size))
+ p, err = m.R.Peek(int(size))
if err != nil {
return 0, err
}
@@ -273,7 +273,7 @@
// e.Type() is not the same as the wire type.
func (m *Reader) ReadExtension(e Extension) (err error) {
var p []byte
- p, err = m.r.Peek(2)
+ p, err = m.R.Peek(2)
if err != nil {
return
}
@@ -286,13 +286,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(3)
+ p, err = m.R.Peek(3)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(3)
+ _, err = m.R.Skip(3)
}
return
@@ -301,13 +301,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(4)
+ p, err = m.R.Peek(4)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(4)
+ _, err = m.R.Skip(4)
}
return
@@ -316,13 +316,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(6)
+ p, err = m.R.Peek(6)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(6)
+ _, err = m.R.Skip(6)
}
return
@@ -331,13 +331,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(10)
+ p, err = m.R.Peek(10)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(10)
+ _, err = m.R.Skip(10)
}
return
@@ -346,18 +346,18 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(18)
+ p, err = m.R.Peek(18)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(18)
+ _, err = m.R.Skip(18)
}
return
case mext8:
- p, err = m.r.Peek(3)
+ p, err = m.R.Peek(3)
if err != nil {
return
}
@@ -369,7 +369,7 @@
off = 3
case mext16:
- p, err = m.r.Peek(4)
+ p, err = m.R.Peek(4)
if err != nil {
return
}
@@ -381,7 +381,7 @@
off = 4
case mext32:
- p, err = m.r.Peek(6)
+ p, err = m.R.Peek(6)
if err != nil {
return
}
@@ -397,13 +397,13 @@
return
}
- p, err = m.r.Peek(read + off)
+ p, err = m.R.Peek(read + off)
if err != nil {
return
}
err = e.UnmarshalBinary(p[off:])
if err == nil {
- _, err = m.r.Skip(read + off)
+ _, err = m.R.Skip(read + off)
}
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go
new file mode 100644
index 0000000..8e7370e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file.go
@@ -0,0 +1,92 @@
+// +build linux darwin dragonfly freebsd netbsd openbsd
+// +build !appengine
+
+package msgp
+
+import (
+ "os"
+ "syscall"
+)
+
+// ReadFile reads a file into 'dst' using
+// a read-only memory mapping. Consequently,
+// the file must be mmap-able, and the
+// Unmarshaler should never write to
+// the source memory. (Methods generated
+// by the msgp tool obey that constraint, but
+// user-defined implementations may not.)
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+func ReadFile(dst Unmarshaler, file *os.File) error {
+ stat, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+ adviseRead(data)
+ _, err = dst.UnmarshalMsg(data)
+ uerr := syscall.Munmap(data)
+ if err == nil {
+ err = uerr
+ }
+ return err
+}
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+// WriteFile writes a file from 'src' using
+// memory mapping. It overwrites the entire
+// contents of the previous file.
+// The mapping size is calculated
+// using the `Msgsize()` method
+// of 'src', so it must produce a result
+// equal to or greater than the actual encoded
+// size of the object. Otherwise,
+// a fault (SIGBUS) will occur.
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+// NOTE: The performance of this call
+// is highly OS- and filesystem-dependent.
+// Users should take care to test that this
+// performs as expected in a production environment.
+// (Linux users should run a kernel and filesystem
+// that support fallocate(2) for the best results.)
+func WriteFile(src MarshalSizer, file *os.File) error {
+ sz := src.Msgsize()
+ err := fallocate(file, int64(sz))
+ if err != nil {
+ return err
+ }
+ data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+ adviseWrite(data)
+ chunk := data[:0]
+ chunk, err = src.MarshalMsg(chunk)
+ if err != nil {
+ return err
+ }
+ uerr := syscall.Munmap(data)
+ if uerr != nil {
+ return uerr
+ }
+ return file.Truncate(int64(len(chunk)))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go
new file mode 100644
index 0000000..6e654db
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go
@@ -0,0 +1,47 @@
+// +build windows appengine
+
+package msgp
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+func ReadFile(dst Unmarshaler, file *os.File) error {
+ if u, ok := dst.(Decodable); ok {
+ return u.DecodeMsg(NewReader(file))
+ }
+
+ data, err := ioutil.ReadAll(file)
+ if err != nil {
+ return err
+ }
+ _, err = dst.UnmarshalMsg(data)
+ return err
+}
+
+func WriteFile(src MarshalSizer, file *os.File) error {
+ if e, ok := src.(Encodable); ok {
+ w := NewWriter(file)
+ err := e.EncodeMsg(w)
+ if err == nil {
+ err = w.Flush()
+ }
+ return err
+ }
+
+ raw, err := src.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ _, err = file.Write(raw)
+ return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go
index 5c799ff..4325860 100644
--- a/vendor/github.com/tinylib/msgp/msgp/json.go
+++ b/vendor/github.com/tinylib/msgp/msgp/json.go
@@ -66,7 +66,7 @@
if jsw, ok := w.(jsWriter); ok {
j = jsw
} else {
- bf = bufio.NewWriterSize(w, 512)
+ bf = bufio.NewWriter(w)
j = bf
}
var nn int
@@ -333,7 +333,7 @@
func rwString(dst jsWriter, src *Reader) (n int, err error) {
var p []byte
- p, err = src.r.Peek(1)
+ p, err = src.R.Peek(1)
if err != nil {
return
}
@@ -342,25 +342,25 @@
if isfixstr(lead) {
read = int(rfixstr(lead))
- src.r.Skip(1)
+ src.R.Skip(1)
goto write
}
switch lead {
case mstr8:
- p, err = src.r.Next(2)
+ p, err = src.R.Next(2)
if err != nil {
return
}
read = int(uint8(p[1]))
case mstr16:
- p, err = src.r.Next(3)
+ p, err = src.R.Next(3)
if err != nil {
return
}
read = int(big.Uint16(p[1:]))
case mstr32:
- p, err = src.r.Next(5)
+ p, err = src.R.Next(5)
if err != nil {
return
}
@@ -370,7 +370,7 @@
return
}
write:
- p, err = src.r.Next(read)
+ p, err = src.R.Next(read)
if err != nil {
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go
index 69afc8a..ad07ef9 100644
--- a/vendor/github.com/tinylib/msgp/msgp/number.go
+++ b/vendor/github.com/tinylib/msgp/msgp/number.go
@@ -1,11 +1,105 @@
package msgp
import (
+ "math"
"strconv"
)
// The portable parts of the Number implementation
+// Number can be
+// an int64, uint64, float32,
+// or float64 internally.
+// It can decode itself
+// from any of the native
+// messagepack number types.
+// The zero-value of Number
+// is Int(0). Using the equality
+// operator with Number compares
+// both the type and the value
+// of the number.
+type Number struct {
+ // internally, this
+ // is just a tagged union.
+ // the raw bits of the number
+ // are stored the same way regardless.
+ bits uint64
+ typ Type
+}
+
+// AsInt sets the number to an int64.
+func (n *Number) AsInt(i int64) {
+
+ // we always store int(0)
+ // as {0, InvalidType} in
+ // order to preserve
+ // the behavior of the == operator
+ if i == 0 {
+ n.typ = InvalidType
+ n.bits = 0
+ return
+ }
+
+ n.typ = IntType
+ n.bits = uint64(i)
+}
+
+// AsUint sets the number to a uint64.
+func (n *Number) AsUint(u uint64) {
+ n.typ = UintType
+ n.bits = u
+}
+
+// AsFloat32 sets the value of the number
+// to a float32.
+func (n *Number) AsFloat32(f float32) {
+ n.typ = Float32Type
+ n.bits = uint64(math.Float32bits(f))
+}
+
+// AsFloat64 sets the value of the
+// number to a float64.
+func (n *Number) AsFloat64(f float64) {
+ n.typ = Float64Type
+ n.bits = math.Float64bits(f)
+}
+
+// Int casts the number as an int64, and
+// returns whether or not that was the
+// underlying type.
+func (n *Number) Int() (int64, bool) {
+ return int64(n.bits), n.typ == IntType || n.typ == InvalidType
+}
+
+// Uint casts the number as a uint64, and returns
+// whether or not that was the underlying type.
+func (n *Number) Uint() (uint64, bool) {
+ return n.bits, n.typ == UintType
+}
+
+// Float casts the number to a float64, and
+// returns whether or not that was the underlying
+// type (either a float64 or a float32).
+func (n *Number) Float() (float64, bool) {
+ switch n.typ {
+ case Float32Type:
+ return float64(math.Float32frombits(uint32(n.bits))), true
+ case Float64Type:
+ return math.Float64frombits(n.bits), true
+ default:
+ return 0.0, false
+ }
+}
+
+// Type will return one of:
+// Float64Type, Float32Type, UintType, or IntType.
+func (n *Number) Type() Type {
+ if n.typ == InvalidType {
+ return IntType
+ }
+ return n.typ
+}
+
// DecodeMsg implements msgp.Decodable
func (n *Number) DecodeMsg(r *Reader) error {
typ, err := r.NextType()
@@ -83,6 +177,38 @@
}
}
+// MarshalMsg implements msgp.Marshaler
+func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
+ switch n.typ {
+ case IntType:
+ return AppendInt64(b, int64(n.bits)), nil
+ case UintType:
+ return AppendUint64(b, uint64(n.bits)), nil
+ case Float64Type:
+ return AppendFloat64(b, math.Float64frombits(n.bits)), nil
+ case Float32Type:
+ return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
+ default:
+ return AppendInt64(b, 0), nil
+ }
+}
+
+// EncodeMsg implements msgp.Encodable
+func (n *Number) EncodeMsg(w *Writer) error {
+ switch n.typ {
+ case IntType:
+ return w.WriteInt64(int64(n.bits))
+ case UintType:
+ return w.WriteUint64(n.bits)
+ case Float64Type:
+ return w.WriteFloat64(math.Float64frombits(n.bits))
+ case Float32Type:
+ return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
+ default:
+ return w.WriteInt64(0)
+ }
+}
+
// Msgsize implements msgp.Sizer
func (n *Number) Msgsize() int {
switch n.typ {
@@ -121,6 +247,7 @@
}
}
+// String implements fmt.Stringer
func (n *Number) String() string {
switch n.typ {
case InvalidType:
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_appengine.go b/vendor/github.com/tinylib/msgp/msgp/number_appengine.go
deleted file mode 100644
index c94140d..0000000
--- a/vendor/github.com/tinylib/msgp/msgp/number_appengine.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// +build appengine
-
-package msgp
-
-// let's just assume appengine
-// uses 64-bit hardware...
-const smallint = false
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Number struct {
- ibits uint64 // zero or bits
- fbits float64 // zero or bits
- typ Type // zero or type
-}
-
-func (n *Number) AsFloat64(f float64) {
- n.typ = Float64Type
- n.fbits = f
- n.ibits = 0
-}
-
-func (n *Number) AsFloat32(f float32) {
- n.typ = Float32Type
- n.fbits = float64(f)
- n.ibits = 0
-}
-
-func (n *Number) AsInt(i int64) {
- n.fbits = 0
- if i == 0 {
- n.typ = InvalidType
- n.ibits = 0
- return
- }
- n.ibits = uint64(i)
- n.typ = IntType
-}
-
-func (n *Number) AsUint(u uint64) {
- n.ibits = u
- n.fbits = 0
- n.typ = UintType
-}
-
-func (n *Number) Float() (float64, bool) {
- return n.fbits, n.typ == Float64Type || n.typ == Float32Type
-}
-
-func (n *Number) Int() (int64, bool) {
- return int64(n.ibits), n.typ == IntType
-}
-
-func (n *Number) Uint() (uint64, bool) {
- return n.ibits, n.typ == UintType
-}
-
-func (n *Number) Type() Type {
- if n.typ == InvalidType {
- return IntType
- }
- return n.typ
-}
-
-func (n *Number) MarshalMsg(o []byte) ([]byte, error) {
- switch n.typ {
- case InvalidType:
- return AppendInt64(o, 0), nil
- case IntType:
- return AppendInt64(o, int64(n.ibits)), nil
- case UintType:
- return AppendUint64(o, n.ibits), nil
- case Float32Type:
- return AppendFloat32(o, float32(n.fbits)), nil
- case Float64Type:
- return AppendFloat64(o, n.fbits), nil
- }
- panic("unreachable code!")
-}
-
-func (n *Number) EncodeMsg(w *Writer) error {
- switch n.typ {
- case InvalidType:
- return w.WriteInt64(0)
- case IntType:
- return w.WriteInt64(int64(n.ibits))
- case UintType:
- return w.WriteUint64(n.ibits)
- case Float32Type:
- return w.WriteFloat32(float32(n.fbits))
- case Float64Type:
- return w.WriteFloat64(n.fbits)
- }
- panic("unreachable code!")
-}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go b/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go
deleted file mode 100644
index 8ea0462..0000000
--- a/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// +build !appengine
-
-package msgp
-
-import (
- "reflect"
- "unsafe"
-)
-
-const (
- // spec says int and uint are always
- // the same size, but that int/uint
- // size may not be machine word size
- smallint = unsafe.Sizeof(int(0)) == 4
-)
-
-// UnsafeString returns the byte slice as a volatile string
-// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
-// THIS IS EVIL CODE.
-// YOU HAVE BEEN WARNED.
-func UnsafeString(b []byte) string {
- return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)}))
-}
-
-// UnsafeBytes returns the string as a byte slice
-// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
-// THIS IS EVIL CODE.
-// YOU HAVE BEEN WARNED.
-func UnsafeBytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Len: len(s),
- Cap: len(s),
- Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
- }))
-}
-
-// Number can be
-// an int64, uint64, float32,
-// or float64 internally.
-// It can decode itself
-// from any of the native
-// messagepack number types.
-// The zero-value of Number
-// is Int(0). Using the equality
-// operator with Number compares
-// both the type and the value
-// of the number.
-type Number struct {
- // internally, this
- // is just a tagged union.
- // the raw bits of the number
- // are stored the same way regardless.
- bits uint64
- typ Type
-}
-
-// AsFloat64 sets the number to
-// a float64.
-func (n *Number) AsFloat64(f float64) {
- n.typ = Float64Type
- n.bits = *(*uint64)(unsafe.Pointer(&f))
-}
-
-// AsInt sets the number to an int64.
-func (n *Number) AsInt(i int64) {
-
- // we always store int(0)
- // as {0, InvalidType} in
- // order to preserve
- // the behavior of the == operator
- if i == 0 {
- n.typ = InvalidType
- n.bits = 0
- return
- }
-
- n.typ = IntType
- n.bits = uint64(i)
-}
-
-// AsUint sets the number to a uint64.
-func (n *Number) AsUint(u uint64) {
- n.typ = UintType
- n.bits = u
-}
-
-// AsFloat32 sets the number to a float32.
-func (n *Number) AsFloat32(f float32) {
- n.typ = Float32Type
- g := float64(f)
- n.bits = *(*uint64)(unsafe.Pointer(&g))
-}
-
-// Type will return one of:
-// Float64Type, Float32Type, UintType, or IntType.
-func (n *Number) Type() Type {
- if n.typ == InvalidType {
- return IntType
- }
- return n.typ
-}
-
-// Float casts the number of the float,
-// and returns whether or not that was
-// the underlying type. (This is legal
-// for both float32 and float64 types.)
-func (n *Number) Float() (float64, bool) {
- return *(*float64)(unsafe.Pointer(&n.bits)), n.typ == Float64Type || n.typ == Float32Type
-}
-
-// Int casts the number as an int64, and
-// returns whether or not that was the
-// underlying type.
-func (n *Number) Int() (int64, bool) {
- return int64(n.bits), n.typ == IntType || n.typ == InvalidType
-}
-
-// Uint casts the number as a uint64, and returns
-// whether or not that was the underlying type.
-func (n *Number) Uint() (uint64, bool) {
- return n.bits, n.typ == UintType
-}
-
-// EncodeMsg implements msgp.Encodable
-func (n *Number) EncodeMsg(w *Writer) error {
- switch n.typ {
- case InvalidType:
- return w.WriteInt(0)
- case IntType:
- return w.WriteInt64(int64(n.bits))
- case UintType:
- return w.WriteUint64(n.bits)
- case Float64Type:
- return w.WriteFloat64(*(*float64)(unsafe.Pointer(&n.bits)))
- case Float32Type:
- return w.WriteFloat32(float32(*(*float64)(unsafe.Pointer(&n.bits))))
- default:
- // this should never ever happen
- panic("(*Number).typ is invalid")
- }
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
- switch n.typ {
- case InvalidType:
- return AppendInt(b, 0), nil
- case IntType:
- return AppendInt64(b, int64(n.bits)), nil
- case UintType:
- return AppendUint64(b, n.bits), nil
- case Float64Type:
- return AppendFloat64(b, *(*float64)(unsafe.Pointer(&n.bits))), nil
- case Float32Type:
- return AppendFloat32(b, float32(*(*float64)(unsafe.Pointer(&n.bits)))), nil
- default:
- panic("(*Number).typ is invalid")
- }
-}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go
index c34482e..20cd1ef 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read.go
@@ -1,11 +1,12 @@
package msgp
import (
- "github.com/philhofer/fwd"
"io"
"math"
"sync"
"time"
+
+ "github.com/philhofer/fwd"
)
// where we keep old *Readers
@@ -111,10 +112,10 @@
// reader will be buffered.
func NewReader(r io.Reader) *Reader {
p := readerPool.Get().(*Reader)
- if p.r == nil {
- p.r = fwd.NewReader(r)
+ if p.R == nil {
+ p.R = fwd.NewReader(r)
} else {
- p.r.Reset(r)
+ p.R.Reset(r)
}
return p
}
@@ -122,39 +123,96 @@
// NewReaderSize returns a *Reader with a buffer of the given size.
// (This is vastly preferable to passing the decoder a reader that is already buffered.)
func NewReaderSize(r io.Reader, sz int) *Reader {
- return &Reader{r: fwd.NewReaderSize(r, sz)}
+ return &Reader{R: fwd.NewReaderSize(r, sz)}
}
// Reader wraps an io.Reader and provides
// methods to read MessagePack-encoded values
// from it. Readers are buffered.
type Reader struct {
- r *fwd.Reader
+ // R is the buffered reader
+ // that the Reader uses
+ // to decode MessagePack.
+ // The Reader itself
+ // is stateless; all the
+ // buffering is done
+ // within R.
+ R *fwd.Reader
scratch []byte
}
// Read implements `io.Reader`
func (m *Reader) Read(p []byte) (int, error) {
- return m.r.Read(p)
+ return m.R.Read(p)
+}
+
+// CopyNext reads the next object from m without decoding it and writes it to w.
+// It avoids unnecessary copies internally.
+func (m *Reader) CopyNext(w io.Writer) (int64, error) {
+ sz, o, err := getNextSize(m.R)
+ if err != nil {
+ return 0, err
+ }
+
+ var n int64
+ // Opportunistic optimization: if we can fit the whole thing in the m.R
+ // buffer, then just get a pointer to that, and pass it to w.Write,
+ // avoiding an allocation.
+ if int(sz) <= m.R.BufferSize() {
+ var nn int
+ var buf []byte
+ buf, err = m.R.Next(int(sz))
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = ErrShortBytes
+ }
+ return 0, err
+ }
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ } else {
+ // Fall back to io.CopyN.
+ // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer)
+ n, err = io.CopyN(w, m.R, int64(sz))
+ if err == io.ErrUnexpectedEOF {
+ err = ErrShortBytes
+ }
+ }
+ if err != nil {
+ return n, err
+ } else if n < int64(sz) {
+ return n, io.ErrShortWrite
+ }
+
+ // for maps and slices, read elements
+ for x := uintptr(0); x < o; x++ {
+ var n2 int64
+ n2, err = m.CopyNext(w)
+ if err != nil {
+ return n, err
+ }
+ n += n2
+ }
+ return n, nil
}
// ReadFull implements `io.ReadFull`
func (m *Reader) ReadFull(p []byte) (int, error) {
- return m.r.ReadFull(p)
+ return m.R.ReadFull(p)
}
// Reset resets the underlying reader.
-func (m *Reader) Reset(r io.Reader) { m.r.Reset(r) }
+func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) }
// Buffered returns the number of bytes currently in the read buffer.
-func (m *Reader) Buffered() int { return m.r.Buffered() }
+func (m *Reader) Buffered() int { return m.R.Buffered() }
// BufferSize returns the capacity of the read buffer.
-func (m *Reader) BufferSize() int { return m.r.BufferSize() }
+func (m *Reader) BufferSize() int { return m.R.BufferSize() }
// NextType returns the next object type to be decoded.
func (m *Reader) NextType() (Type, error) {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
if err != nil {
return InvalidType, err
}
@@ -182,12 +240,14 @@
// IsNil returns whether or not
// the next byte is a null messagepack byte
func (m *Reader) IsNil() bool {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
return err == nil && p[0] == mnil
}
+// getNextSize returns the size of the next object on the wire.
// returns (obj size, obj elements, error)
// only maps and arrays have non-zero obj elements
+// for maps and arrays, obj size does not include elements
//
// use uintptr b/c it's guaranteed to be large enough
// to hold whatever we can fit in memory.
@@ -243,8 +303,8 @@
// we can use the faster
// method if we have enough
// buffered data
- if m.r.Buffered() >= 5 {
- p, err = m.r.Peek(5)
+ if m.R.Buffered() >= 5 {
+ p, err = m.R.Peek(5)
if err != nil {
return err
}
@@ -253,7 +313,7 @@
return err
}
} else {
- v, o, err = getNextSize(m.r)
+ v, o, err = getNextSize(m.R)
if err != nil {
return err
}
@@ -261,7 +321,7 @@
// 'v' is always non-zero
// if err == nil
- _, err = m.r.Skip(int(v))
+ _, err = m.R.Skip(int(v))
if err != nil {
return err
}
@@ -284,26 +344,26 @@
func (m *Reader) ReadMapHeader() (sz uint32, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
lead = p[0]
if isfixmap(lead) {
sz = uint32(rfixmap(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case mmap16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
sz = uint32(big.Uint16(p[1:]))
return
case mmap32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -338,7 +398,7 @@
// method; writing into the returned slice may
// corrupt future reads.
func (m *Reader) ReadMapKeyPtr() ([]byte, error) {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
if err != nil {
return nil, err
}
@@ -346,24 +406,24 @@
var read int
if isfixstr(lead) {
read = int(rfixstr(lead))
- m.r.Skip(1)
+ m.R.Skip(1)
goto fill
}
switch lead {
case mstr8, mbin8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return nil, err
}
read = int(p[1])
case mstr16, mbin16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return nil, err
}
read = int(big.Uint16(p[1:]))
case mstr32, mbin32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return nil, err
}
@@ -375,7 +435,7 @@
if read == 0 {
return nil, ErrShortBytes
}
- return m.r.Next(read)
+ return m.R.Next(read)
}
// ReadArrayHeader reads the next object as an
@@ -384,19 +444,19 @@
func (m *Reader) ReadArrayHeader() (sz uint32, err error) {
var lead byte
var p []byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
lead = p[0]
if isfixarray(lead) {
sz = uint32(rfixarray(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case marray16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
@@ -404,7 +464,7 @@
return
case marray32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -419,14 +479,14 @@
// ReadNil reads a 'nil' MessagePack byte from the reader
func (m *Reader) ReadNil() error {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
if err != nil {
return err
}
if p[0] != mnil {
return badPrefix(NilType, p[0])
}
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return err
}
@@ -435,7 +495,7 @@
// it will be up-cast to a float64.)
func (m *Reader) ReadFloat64() (f float64, err error) {
var p []byte
- p, err = m.r.Peek(9)
+ p, err = m.R.Peek(9)
if err != nil {
// we'll allow a coversion from float32 to float64,
// since we don't lose any precision
@@ -455,14 +515,14 @@
return
}
f = math.Float64frombits(getMuint64(p))
- _, err = m.r.Skip(9)
+ _, err = m.R.Skip(9)
return
}
// ReadFloat32 reads a float32 from the reader
func (m *Reader) ReadFloat32() (f float32, err error) {
var p []byte
- p, err = m.r.Peek(5)
+ p, err = m.R.Peek(5)
if err != nil {
return
}
@@ -471,14 +531,14 @@
return
}
f = math.Float32frombits(getMuint32(p))
- _, err = m.r.Skip(5)
+ _, err = m.R.Skip(5)
return
}
// ReadBool reads a bool from the reader
func (m *Reader) ReadBool() (b bool, err error) {
var p []byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -490,7 +550,7 @@
err = badPrefix(BoolType, p[0])
return
}
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
@@ -498,7 +558,7 @@
func (m *Reader) ReadInt64() (i int64, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -506,17 +566,17 @@
if isfixint(lead) {
i = int64(rfixint(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
} else if isnfixint(lead) {
i = int64(rnfixint(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case mint8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
@@ -524,7 +584,7 @@
return
case mint16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
@@ -532,7 +592,7 @@
return
case mint32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -540,7 +600,7 @@
return
case mint64:
- p, err = m.r.Next(9)
+ p, err = m.R.Next(9)
if err != nil {
return
}
@@ -607,19 +667,19 @@
func (m *Reader) ReadUint64() (u uint64, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
lead = p[0]
if isfixint(lead) {
u = uint64(rfixint(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case muint8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
@@ -627,7 +687,7 @@
return
case muint16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
@@ -635,7 +695,7 @@
return
case muint32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -643,7 +703,7 @@
return
case muint64:
- p, err = m.r.Next(9)
+ p, err = m.R.Next(9)
if err != nil {
return
}
@@ -707,6 +767,10 @@
return
}
+// ReadByte is analogous to ReadUint8.
+//
+// NOTE: this is *not* an implementation
+// of io.ByteReader.
func (m *Reader) ReadByte() (b byte, err error) {
var in uint64
in, err = m.ReadUint64()
@@ -724,7 +788,7 @@
func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(2)
+ p, err = m.R.Peek(2)
if err != nil {
return
}
@@ -733,15 +797,15 @@
switch lead {
case mbin8:
read = int64(p[1])
- m.r.Skip(2)
+ m.R.Skip(2)
case mbin16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
read = int64(big.Uint16(p[1:]))
case mbin32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -755,16 +819,55 @@
} else {
b = scratch[0:read]
}
- _, err = m.r.ReadFull(b)
+ _, err = m.R.ReadFull(b)
return
}
+// ReadBytesHeader reads the size header
+// of a MessagePack 'bin' object. The user
+// is responsible for dealing with the next
+// 'sz' bytes from the reader in an application-specific
+// way.
+func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ switch p[0] {
+ case mbin8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ sz = uint32(p[1])
+ return
+ case mbin16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mbin32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint32(p[1:]))
+ return
+ default:
+ err = badPrefix(BinType, p[0])
+ return
+ }
+}
+
// ReadExactBytes reads a MessagePack 'bin'-encoded
// object off of the wire into the provided slice. An
// ArrayError will be returned if the object is not
// exactly the length of the input slice.
func (m *Reader) ReadExactBytes(into []byte) error {
- p, err := m.r.Peek(2)
+ p, err := m.R.Peek(2)
if err != nil {
return err
}
@@ -776,14 +879,14 @@
read = int64(p[1])
skip = 2
case mbin16:
- p, err = m.r.Peek(3)
+ p, err = m.R.Peek(3)
if err != nil {
return err
}
read = int64(big.Uint16(p[1:]))
skip = 3
case mbin32:
- p, err = m.r.Peek(5)
+ p, err = m.R.Peek(5)
if err != nil {
return err
}
@@ -795,8 +898,8 @@
if read != int64(len(into)) {
return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)}
}
- m.r.Skip(skip)
- _, err = m.r.ReadFull(into)
+ m.R.Skip(skip)
+ _, err = m.R.ReadFull(into)
return err
}
@@ -806,7 +909,7 @@
func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -815,25 +918,25 @@
if isfixstr(lead) {
read = int64(rfixstr(lead))
- m.r.Skip(1)
+ m.R.Skip(1)
goto fill
}
switch lead {
case mstr8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
read = int64(uint8(p[1]))
case mstr16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
read = int64(big.Uint16(p[1:]))
case mstr32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -848,16 +951,60 @@
} else {
b = scratch[0:read]
}
- _, err = m.r.ReadFull(b)
+ _, err = m.R.ReadFull(b)
return
}
+// ReadStringHeader reads a string header
+// off of the wire. The user is then responsible
+// for dealing with the next 'sz' bytes from
+// the reader in an application-specific manner.
+func (m *Reader) ReadStringHeader() (sz uint32, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead := p[0]
+ if isfixstr(lead) {
+ sz = uint32(rfixstr(lead))
+ m.R.Skip(1)
+ return
+ }
+ switch lead {
+ case mstr8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ sz = uint32(p[1])
+ return
+ case mstr16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mstr32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = big.Uint32(p[1:])
+ return
+ default:
+ err = badPrefix(StrType, lead)
+ return
+ }
+}
+
// ReadString reads a utf-8 string from the reader
func (m *Reader) ReadString() (s string, err error) {
var p []byte
var lead byte
var read int64
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -865,25 +1012,25 @@
if isfixstr(lead) {
read = int64(rfixstr(lead))
- m.r.Skip(1)
+ m.R.Skip(1)
goto fill
}
switch lead {
case mstr8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
read = int64(uint8(p[1]))
case mstr16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
read = int64(big.Uint16(p[1:]))
case mstr32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -915,7 +1062,7 @@
// thus escape analysis *must* conclude that
// 'out' escapes.
out := make([]byte, read)
- _, err = m.r.ReadFull(out)
+ _, err = m.R.ReadFull(out)
if err != nil {
return
}
@@ -926,7 +1073,7 @@
// ReadComplex64 reads a complex64 from the reader
func (m *Reader) ReadComplex64() (f complex64, err error) {
var p []byte
- p, err = m.r.Peek(10)
+ p, err = m.R.Peek(10)
if err != nil {
return
}
@@ -940,14 +1087,14 @@
}
f = complex(math.Float32frombits(big.Uint32(p[2:])),
math.Float32frombits(big.Uint32(p[6:])))
- _, err = m.r.Skip(10)
+ _, err = m.R.Skip(10)
return
}
// ReadComplex128 reads a complex128 from the reader
func (m *Reader) ReadComplex128() (f complex128, err error) {
var p []byte
- p, err = m.r.Peek(18)
+ p, err = m.R.Peek(18)
if err != nil {
return
}
@@ -961,7 +1108,7 @@
}
f = complex(math.Float64frombits(big.Uint64(p[2:])),
math.Float64frombits(big.Uint64(p[10:])))
- _, err = m.r.Skip(18)
+ _, err = m.R.Skip(18)
return
}
@@ -996,7 +1143,7 @@
// The returned time's location will be set to time.Local.
func (m *Reader) ReadTime() (t time.Time, err error) {
var p []byte
- p, err = m.r.Peek(15)
+ p, err = m.R.Peek(15)
if err != nil {
return
}
@@ -1010,7 +1157,7 @@
}
sec, nsec := getUnix(p[3:])
t = time.Unix(sec, int64(nsec)).Local()
- _, err = m.r.Skip(15)
+ _, err = m.R.Skip(15)
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
index 732fa68..78e466f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
@@ -117,13 +117,13 @@
}
func appendNext(f *Reader, d *[]byte) error {
- amt, o, err := getNextSize(f.r)
+ amt, o, err := getNextSize(f.R)
if err != nil {
return err
}
var i int
*d, i = ensure(*d, int(amt))
- _, err = f.r.ReadFull((*d)[i:])
+ _, err = f.R.ReadFull((*d)[i:])
if err != nil {
return err
}
@@ -576,7 +576,7 @@
return uint(u), b, err
}
-// ReadByteBytes is analagous to ReadUint8Bytes
+// ReadByteBytes is analogous to ReadUint8Bytes
func ReadByteBytes(b []byte) (byte, []byte, error) {
return ReadUint8Bytes(b)
}
@@ -784,6 +784,22 @@
return string(v), o, err
}
+// ReadStringAsBytes reads a 'str' object
+// into a slice of bytes. 'v' is the value of
+// the 'str' object, which may reside in memory
+// pointed to by 'scratch.' 'o' is the remaining bytes
+// in 'b.''
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (not 'str' type)
+// - InvalidPrefixError (unknown type marker)
+func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+ var tmp []byte
+ tmp, o, err = ReadStringZC(b)
+ v = append(scratch[:0], tmp...)
+ return
+}
+
// ReadComplex128Bytes reads a complex128
// extension object from 'b' and returns the
// remaining bytes.
@@ -922,14 +938,14 @@
case ArrayType:
var sz uint32
- sz, b, err = ReadArrayHeaderBytes(b)
+ sz, o, err = ReadArrayHeaderBytes(b)
if err != nil {
return
}
j := make([]interface{}, int(sz))
i = j
for d := range j {
- j[d], b, err = ReadIntfBytes(b)
+ j[d], o, err = ReadIntfBytes(o)
if err != nil {
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
new file mode 100644
index 0000000..4bcf321
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
@@ -0,0 +1,41 @@
+// +build !appengine
+
+package msgp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE:
+// all of the definition in this file
+// should be repeated in appengine.go,
+// but without using unsafe
+
+const (
+ // spec says int and uint are always
+ // the same size, but that int/uint
+ // size may not be machine word size
+ smallint = unsafe.Sizeof(int(0)) == 4
+)
+
+// UnsafeString returns the byte slice as a volatile string
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeString(b []byte) string {
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: sh.Data, Len: sh.Len}))
+}
+
+// UnsafeBytes returns the string as a byte slice
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Len: len(s),
+ Cap: len(s),
+ Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
+ }))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go
index 216697f..da9099c 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write.go
@@ -10,13 +10,6 @@
"time"
)
-func abs(i int64) int64 {
- if i < 0 {
- return -i
- }
- return i
-}
-
// Sizer is an interface implemented
// by types that can estimate their
// size when MessagePack encoded.
@@ -59,15 +52,26 @@
// it will cause undefined behavior.
func freeW(w *Writer) { pushWriter(w) }
-// Require ensures that cap(old)-len(old) >= extra
+// Require ensures that cap(old)-len(old) >= extra.
func Require(old []byte, extra int) []byte {
- if cap(old)-len(old) >= extra {
+ l := len(old)
+ c := cap(old)
+ r := l + extra
+ if c >= r {
return old
- }
- if len(old) == 0 {
+ } else if l == 0 {
return make([]byte, 0, extra)
}
- n := make([]byte, len(old), cap(old)-len(old)+extra)
+ // the new size is the greater
+ // of double the old capacity
+ // and the sum of the old length
+ // and the number of new bytes
+ // necessary.
+ c <<= 1
+ if c < r {
+ c = r
+ }
+ n := make([]byte, l, c)
copy(n, old)
return n
}
@@ -184,6 +188,17 @@
return wl, nil
}
+func (mw *Writer) Append(b ...byte) error {
+ if mw.avail() < len(b) {
+ err := mw.flush()
+ if err != nil {
+ return err
+ }
+ }
+ mw.wloc += copy(mw.buf[mw.wloc:], b)
+ return nil
+}
+
// push one byte onto the buffer
//
// NOTE: this is a hot code path
@@ -289,9 +304,9 @@
// size to the writer
func (mw *Writer) WriteMapHeader(sz uint32) error {
switch {
- case sz < 16:
+ case sz <= 15:
return mw.push(wfixmap(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
return mw.prefix16(mmap16, uint16(sz))
default:
return mw.prefix32(mmap32, sz)
@@ -302,9 +317,9 @@
// given size to the writer
func (mw *Writer) WriteArrayHeader(sz uint32) error {
switch {
- case sz < 16:
+ case sz <= 15:
return mw.push(wfixarray(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
return mw.prefix16(marray16, uint16(sz))
default:
return mw.prefix32(marray32, sz)
@@ -328,17 +343,26 @@
// WriteInt64 writes an int64 to the writer
func (mw *Writer) WriteInt64(i int64) error {
- a := abs(i)
+ if i >= 0 {
+ switch {
+ case i <= math.MaxInt8:
+ return mw.push(wfixint(uint8(i)))
+ case i <= math.MaxInt16:
+ return mw.prefix16(mint16, uint16(i))
+ case i <= math.MaxInt32:
+ return mw.prefix32(mint32, uint32(i))
+ default:
+ return mw.prefix64(mint64, uint64(i))
+ }
+ }
switch {
- case i < 0 && i > -32:
+ case i >= -32:
return mw.push(wnfixint(int8(i)))
- case i >= 0 && i < 128:
- return mw.push(wfixint(uint8(i)))
- case a < math.MaxInt8:
+ case i >= math.MinInt8:
return mw.prefix8(mint8, uint8(i))
- case a < math.MaxInt16:
+ case i >= math.MinInt16:
return mw.prefix16(mint16, uint16(i))
- case a < math.MaxInt32:
+ case i >= math.MinInt32:
return mw.prefix32(mint32, uint32(i))
default:
return mw.prefix64(mint64, uint64(i))
@@ -360,20 +384,20 @@
// WriteUint64 writes a uint64 to the writer
func (mw *Writer) WriteUint64(u uint64) error {
switch {
- case u < (1 << 7):
+ case u <= (1<<7)-1:
return mw.push(wfixint(uint8(u)))
- case u < math.MaxUint8:
+ case u <= math.MaxUint8:
return mw.prefix8(muint8, uint8(u))
- case u < math.MaxUint16:
+ case u <= math.MaxUint16:
return mw.prefix16(muint16, uint16(u))
- case u < math.MaxUint32:
+ case u <= math.MaxUint32:
return mw.prefix32(muint32, uint32(u))
default:
return mw.prefix64(muint64, u)
}
}
-// WriteByte is analagous to WriteUint8
+// WriteByte is analogous to WriteUint8
func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
// WriteUint8 writes a uint8 to the writer
@@ -393,9 +417,9 @@
sz := uint32(len(b))
var err error
switch {
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
err = mw.prefix8(mbin8, uint8(sz))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
err = mw.prefix16(mbin16, uint16(sz))
default:
err = mw.prefix32(mbin32, sz)
@@ -407,6 +431,20 @@
return err
}
+// WriteBytesHeader writes just the size header
+// of a MessagePack 'bin' object. The user is responsible
+// for then writing 'sz' more bytes into the stream.
+func (mw *Writer) WriteBytesHeader(sz uint32) error {
+ switch {
+ case sz <= math.MaxUint8:
+ return mw.prefix8(mbin8, uint8(sz))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mbin16, uint16(sz))
+ default:
+ return mw.prefix32(mbin32, sz)
+ }
+}
+
// WriteBool writes a bool to the writer
func (mw *Writer) WriteBool(b bool) error {
if b {
@@ -421,11 +459,11 @@
sz := uint32(len(s))
var err error
switch {
- case sz < 32:
+ case sz <= 31:
err = mw.push(wfixstr(uint8(sz)))
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
err = mw.prefix8(mstr8, uint8(sz))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
err = mw.prefix16(mstr16, uint16(sz))
default:
err = mw.prefix32(mstr32, sz)
@@ -436,6 +474,45 @@
return mw.writeString(s)
}
+// WriteStringHeader writes just the string size
+// header of a MessagePack 'str' object. The user
+// is responsible for writing 'sz' more valid UTF-8
+// bytes to the stream.
+func (mw *Writer) WriteStringHeader(sz uint32) error {
+ switch {
+ case sz <= 31:
+ return mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ return mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mstr16, uint16(sz))
+ default:
+ return mw.prefix32(mstr32, sz)
+ }
+}
+
+// WriteStringFromBytes writes a 'str' object
+// from a []byte.
+func (mw *Writer) WriteStringFromBytes(str []byte) error {
+ sz := uint32(len(str))
+ var err error
+ switch {
+ case sz <= 31:
+ err = mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ err = mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ err = mw.prefix16(mstr16, uint16(sz))
+ default:
+ err = mw.prefix32(mstr32, sz)
+ }
+ if err != nil {
+ return err
+ }
+ _, err = mw.Write(str)
+ return err
+}
+
// WriteComplex64 writes a complex64 to the writer
func (mw *Writer) WriteComplex64(f complex64) error {
o, err := mw.require(10)
@@ -509,7 +586,7 @@
// elapsed since "zero" Unix time, followed by 4 bytes
// for a big-endian 32-bit signed integer denoting
// the nanosecond offset of the time. This encoding
-// is intended to ease portability accross languages.
+// is intended to ease portability across languages.
// (Note that this is *not* the standard time.Time
// binary encoding, because its implementation relies
// heavily on the internal representation used by the
@@ -612,7 +689,7 @@
}
func (mw *Writer) writeMap(v reflect.Value) (err error) {
- if v.Elem().Kind() != reflect.String {
+ if v.Type().Key().Kind() != reflect.String {
return errors.New("msgp: map keys must be strings")
}
ks := v.MapKeys()
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
index 658102e..eaa03c4 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
@@ -22,10 +22,10 @@
// given size to the slice
func AppendMapHeader(b []byte, sz uint32) []byte {
switch {
- case sz < 16:
+ case sz <= 15:
return append(b, wfixmap(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n := ensure(b, 3)
prefixu16(o[n:], mmap16, uint16(sz))
return o
@@ -41,10 +41,10 @@
// the given size to the slice
func AppendArrayHeader(b []byte, sz uint32) []byte {
switch {
- case sz < 16:
+ case sz <= 15:
return append(b, wfixarray(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n := ensure(b, 3)
prefixu16(o[n:], marray16, uint16(sz))
return o
@@ -75,29 +75,39 @@
// AppendInt64 appends an int64 to the slice
func AppendInt64(b []byte, i int64) []byte {
- a := abs(i)
+ if i >= 0 {
+ switch {
+ case i <= math.MaxInt8:
+ return append(b, wfixint(uint8(i)))
+ case i <= math.MaxInt16:
+ o, n := ensure(b, 3)
+ putMint16(o[n:], int16(i))
+ return o
+ case i <= math.MaxInt32:
+ o, n := ensure(b, 5)
+ putMint32(o[n:], int32(i))
+ return o
+ default:
+ o, n := ensure(b, 9)
+ putMint64(o[n:], i)
+ return o
+ }
+ }
switch {
- case i < 0 && i > -32:
+ case i >= -32:
return append(b, wnfixint(int8(i)))
-
- case i >= 0 && i < 128:
- return append(b, wfixint(uint8(i)))
-
- case a < math.MaxInt8:
+ case i >= math.MinInt8:
o, n := ensure(b, 2)
putMint8(o[n:], int8(i))
return o
-
- case a < math.MaxInt16:
+ case i >= math.MinInt16:
o, n := ensure(b, 3)
putMint16(o[n:], int16(i))
return o
-
- case a < math.MaxInt32:
+ case i >= math.MinInt32:
o, n := ensure(b, 5)
putMint32(o[n:], int32(i))
return o
-
default:
o, n := ensure(b, 9)
putMint64(o[n:], i)
@@ -120,20 +130,20 @@
// AppendUint64 appends a uint64 to the slice
func AppendUint64(b []byte, u uint64) []byte {
switch {
- case u < (1 << 7):
+ case u <= (1<<7)-1:
return append(b, wfixint(uint8(u)))
- case u < math.MaxUint8:
+ case u <= math.MaxUint8:
o, n := ensure(b, 2)
putMuint8(o[n:], uint8(u))
return o
- case u < math.MaxUint16:
+ case u <= math.MaxUint16:
o, n := ensure(b, 3)
putMuint16(o[n:], uint16(u))
return o
- case u < math.MaxUint32:
+ case u <= math.MaxUint32:
o, n := ensure(b, 5)
putMuint32(o[n:], uint32(u))
return o
@@ -152,7 +162,7 @@
// AppendUint8 appends a uint8 to the slice
func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
-// AppendByte is analagous to AppendUint8
+// AppendByte is analogous to AppendUint8
func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
// AppendUint16 appends a uint16 to the slice
@@ -167,11 +177,11 @@
var o []byte
var n int
switch {
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mbin8, uint8(sz))
n += 2
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mbin16, uint16(sz))
n += 3
@@ -197,15 +207,15 @@
var n int
var o []byte
switch {
- case sz < 32:
+ case sz <= 31:
o, n = ensure(b, 1+sz)
o[n] = wfixstr(uint8(sz))
n++
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mstr8, uint8(sz))
n += 2
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mstr16, uint16(sz))
n += 3
@@ -217,6 +227,33 @@
return o[:n+copy(o[n:], s)]
}
+// AppendStringFromBytes appends a []byte
+// as a MessagePack 'str' to the slice 'b.'
+func AppendStringFromBytes(b []byte, str []byte) []byte {
+ sz := len(str)
+ var n int
+ var o []byte
+ switch {
+ case sz <= 31:
+ o, n = ensure(b, 1+sz)
+ o[n] = wfixstr(uint8(sz))
+ n++
+ case sz <= math.MaxUint8:
+ o, n = ensure(b, 2+sz)
+ prefixu8(o[n:], mstr8, uint8(sz))
+ n += 2
+ case sz <= math.MaxUint16:
+ o, n = ensure(b, 3+sz)
+ prefixu16(o[n:], mstr16, uint16(sz))
+ n += 3
+ default:
+ o, n = ensure(b, 5+sz)
+ prefixu32(o[n:], mstr32, uint32(sz))
+ n += 5
+ }
+ return o[:n+copy(o[n:], str)]
+}
+
// AppendComplex64 appends a complex64 to the slice as a MessagePack extension
func AppendComplex64(b []byte, c complex64) []byte {
o, n := ensure(b, Complex64Size)
@@ -362,7 +399,12 @@
}
}
return b, nil
-
+ case reflect.Ptr:
+ if v.IsNil() {
+ return AppendNil(b), err
+ }
+ b, err = AppendIntf(b, v.Elem().Interface())
+ return b, err
default:
return b, &ErrUnsupportedType{T: v.Type()}
}
diff --git a/vendor/github.com/vishvananda/netlink/README.md b/vendor/github.com/vishvananda/netlink/README.md
index 2367fae..0b61be2 100644
--- a/vendor/github.com/vishvananda/netlink/README.md
+++ b/vendor/github.com/vishvananda/netlink/README.md
@@ -38,15 +38,18 @@
package main
import (
- "net"
+ "fmt"
"github.com/vishvananda/netlink"
)
func main() {
la := netlink.NewLinkAttrs()
la.Name = "foo"
- mybridge := &netlink.Bridge{la}}
- _ := netlink.LinkAdd(mybridge)
+ mybridge := &netlink.Bridge{LinkAttrs: la}
+ err := netlink.LinkAdd(mybridge)
+ if err != nil {
+ fmt.Printf("could not add %s: %v\n", la.Name, err)
+ }
eth1, _ := netlink.LinkByName("eth1")
netlink.LinkSetMaster(eth1, mybridge)
}
@@ -63,7 +66,6 @@
package main
import (
- "net"
"github.com/vishvananda/netlink"
)
diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go
index f33242a..8808b42 100644
--- a/vendor/github.com/vishvananda/netlink/addr_linux.go
+++ b/vendor/github.com/vishvananda/netlink/addr_linux.go
@@ -2,7 +2,6 @@
import (
"fmt"
- "log"
"net"
"strings"
"syscall"
@@ -65,7 +64,7 @@
msg := nl.NewIfAddrmsg(family)
msg.Index = uint32(base.Index)
msg.Scope = uint8(addr.Scope)
- prefixlen, _ := addr.Mask.Size()
+ prefixlen, masklen := addr.Mask.Size()
msg.Prefixlen = uint8(prefixlen)
req.AddData(msg)
@@ -103,9 +102,14 @@
}
}
- if addr.Broadcast != nil {
- req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast))
+ if addr.Broadcast == nil {
+ calcBroadcast := make(net.IP, masklen/8)
+ for i := range localAddrData {
+ calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i]
+ }
+ addr.Broadcast = calcBroadcast
}
+ req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast))
if addr.Label != "" {
labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label))
@@ -232,16 +236,34 @@
// AddrSubscribe takes a chan down which notifications will be sent
// when addresses change. Close the 'done' chan to stop subscription.
func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error {
- return addrSubscribe(netns.None(), netns.None(), ch, done)
+ return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil)
}
// AddrSubscribeAt works like AddrSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
- return addrSubscribe(ns, netns.None(), ch, done)
+ return addrSubscribeAt(ns, netns.None(), ch, done, nil)
}
-func addrSubscribe(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error {
+// AddrSubscribeOptions contains a set of options to use with
+// AddrSubscribeWithOptions.
+type AddrSubscribeOptions struct {
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+}
+
+// AddrSubscribeWithOptions work like AddrSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error {
+ if options.Namespace == nil {
+ none := netns.None()
+ options.Namespace = &none
+ }
+ return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)
+}
+
+func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error {
s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR)
if err != nil {
return err
@@ -257,20 +279,26 @@
for {
msgs, err := s.Receive()
if err != nil {
- log.Printf("netlink.AddrSubscribe: Receive() error: %v", err)
+ if cberr != nil {
+ cberr(err)
+ }
return
}
for _, m := range msgs {
msgType := m.Header.Type
if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR {
- log.Printf("netlink.AddrSubscribe: bad message type: %d", msgType)
- continue
+ if cberr != nil {
+ cberr(fmt.Errorf("bad message type: %d", msgType))
+ }
+ return
}
addr, _, ifindex, err := parseAddr(m.Data)
if err != nil {
- log.Printf("netlink.AddrSubscribe: could not parse address: %v", err)
- continue
+ if cberr != nil {
+ cberr(fmt.Errorf("could not parse address: %v", err))
+ }
+ return
}
ch <- AddrUpdate{LinkAddress: *addr.IPNet,
diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go
new file mode 100644
index 0000000..a65d6a1
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go
@@ -0,0 +1,115 @@
+package netlink
+
+import (
+ "fmt"
+ "syscall"
+
+ "github.com/vishvananda/netlink/nl"
+)
+
+// BridgeVlanList gets a map of device id to bridge vlan infos.
+// Equivalent to: `bridge vlan show`
+func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
+ return pkgHandle.BridgeVlanList()
+}
+
+// BridgeVlanList gets a map of device id to bridge vlan infos.
+// Equivalent to: `bridge vlan show`
+func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) {
+ req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP)
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ req.AddData(msg)
+ req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN))))
+
+ msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK)
+ if err != nil {
+ return nil, err
+ }
+ ret := make(map[int32][]*nl.BridgeVlanInfo)
+ for _, m := range msgs {
+ msg := nl.DeserializeIfInfomsg(m)
+
+ attrs, err := nl.ParseRouteAttr(m[msg.Len():])
+ if err != nil {
+ return nil, err
+ }
+ for _, attr := range attrs {
+ switch attr.Attr.Type {
+ case nl.IFLA_AF_SPEC:
+ //nested attr
+ nestAttrs, err := nl.ParseRouteAttr(attr.Value)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse nested attr %v", err)
+ }
+ for _, nestAttr := range nestAttrs {
+ switch nestAttr.Attr.Type {
+ case nl.IFLA_BRIDGE_VLAN_INFO:
+ vlanInfo := nl.DeserializeBridgeVlanInfo(nestAttr.Value)
+ ret[msg.Index] = append(ret[msg.Index], vlanInfo)
+ }
+ }
+ }
+ }
+ }
+ return ret, nil
+}
+
+// BridgeVlanAdd adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return pkgHandle.BridgeVlanAdd(link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanAdd adds a new vlan filter entry
+// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanDel adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return pkgHandle.BridgeVlanDel(link, vid, pvid, untagged, self, master)
+}
+
+// BridgeVlanDel adds a new vlan filter entry
+// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]`
+func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error {
+ return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master)
+}
+
+func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_BRIDGE)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ br := nl.NewRtAttr(nl.IFLA_AF_SPEC, nil)
+ var flags uint16
+ if self {
+ flags |= nl.BRIDGE_FLAGS_SELF
+ }
+ if master {
+ flags |= nl.BRIDGE_FLAGS_MASTER
+ }
+ if flags > 0 {
+ nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags))
+ }
+ vlanInfo := &nl.BridgeVlanInfo{Vid: vid}
+ if pvid {
+ vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID
+ }
+ if untagged {
+ vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED
+ }
+ nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize())
+ req.AddData(br)
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
index 20df903..ecf0445 100644
--- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go
@@ -22,7 +22,11 @@
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2
ConntrackExpectTable = 2
)
-
+const (
+ // For Parsing Mark
+ TCP_PROTO = 6
+ UDP_PROTO = 17
+)
const (
// backward compatibility with golang 1.6 which does not have io.SeekCurrent
seekCurrent = 1
@@ -56,7 +60,7 @@
// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter
// conntrack -D [table] parameters Delete conntrack or expectation
-func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
return pkgHandle.ConntrackDeleteFilter(table, family, filter)
}
@@ -88,7 +92,7 @@
// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed
// conntrack -D [table] parameters Delete conntrack or expectation
-func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) {
+func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) {
res, err := h.dumpConntrackTable(table, family)
if err != nil {
return 0, err
@@ -142,15 +146,16 @@
FamilyType uint8
Forward ipTuple
Reverse ipTuple
+ Mark uint32
}
func (s *ConntrackFlow) String() string {
// conntrack cmd output:
- // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001
- return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d",
+ // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 mark=0
+ return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d mark=%d",
nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol,
s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort,
- s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort)
+ s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Mark)
}
// This method parse the ip tuple structure
@@ -160,7 +165,7 @@
// <len, NLA_F_NESTED|nl.CTA_TUPLE_PROTO, 1 byte for the protocol, 3 bytes of padding>
// <len, CTA_PROTO_SRC_PORT, 2 bytes for the source port, 2 bytes of padding>
// <len, CTA_PROTO_DST_PORT, 2 bytes for the source port, 2 bytes of padding>
-func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) {
+func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 {
for i := 0; i < 2; i++ {
_, t, _, v := parseNfAttrTLV(reader)
switch t {
@@ -189,6 +194,7 @@
// Skip some padding 2 byte
reader.Seek(2, seekCurrent)
}
+ return tpl.Protocol
}
func parseNfAttrTLV(r *bytes.Reader) (isNested bool, attrType, len uint16, value []byte) {
@@ -216,6 +222,7 @@
func parseRawData(data []byte) *ConntrackFlow {
s := &ConntrackFlow{}
+ var proto uint8
// First there is the Nfgenmsg header
// consume only the family field
reader := bytes.NewReader(data)
@@ -234,7 +241,7 @@
nested, t, l := parseNfAttrTL(reader)
if nested && t == nl.CTA_TUPLE_ORIG {
if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
- parseIpTuple(reader, &s.Forward)
+ proto = parseIpTuple(reader, &s.Forward)
}
} else if nested && t == nl.CTA_TUPLE_REPLY {
if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP {
@@ -248,7 +255,19 @@
}
}
}
-
+ if proto == TCP_PROTO {
+ reader.Seek(64, seekCurrent)
+ _, t, _, v := parseNfAttrTLV(reader)
+ if t == nl.CTA_MARK {
+ s.Mark = uint32(v[3])
+ }
+ } else if proto == UDP_PROTO {
+ reader.Seek(16, seekCurrent)
+ _, t, _, v := parseNfAttrTLV(reader)
+ if t == nl.CTA_MARK {
+ s.Mark = uint32(v[3])
+ }
+ }
return s
}
@@ -290,6 +309,12 @@
ConntrackNatAnyIP // -any-nat ip Source or destination NAT ip
)
+type CustomConntrackFilter interface {
+ // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches
+ // the filter or false otherwise
+ MatchConntrackFlow(flow *ConntrackFlow) bool
+}
+
type ConntrackFilter struct {
ipFilter map[ConntrackFilterType]net.IP
}
@@ -342,3 +367,5 @@
return match
}
+
+var _ CustomConntrackFilter = (*ConntrackFilter)(nil)
diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go
index 938b28b..1120c79 100644
--- a/vendor/github.com/vishvananda/netlink/filter.go
+++ b/vendor/github.com/vishvananda/netlink/filter.go
@@ -2,8 +2,6 @@
import (
"fmt"
-
- "github.com/vishvananda/netlink/nl"
)
type Filter interface {
@@ -184,14 +182,6 @@
}
}
-// Constants used in TcU32Sel.Flags.
-const (
- TC_U32_TERMINAL = nl.TC_U32_TERMINAL
- TC_U32_OFFSET = nl.TC_U32_OFFSET
- TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET
- TC_U32_EAT = nl.TC_U32_EAT
-)
-
// Sel of the U32 filters that contains multiple TcU32Key. This is the copy
// and the frontend representation of nl.TcU32Sel. It is serialized into canonical
// nl.TcU32Sel with the appropriate endianness.
diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go
index dc0f90a..5025bd5 100644
--- a/vendor/github.com/vishvananda/netlink/filter_linux.go
+++ b/vendor/github.com/vishvananda/netlink/filter_linux.go
@@ -11,6 +11,14 @@
"github.com/vishvananda/netlink/nl"
)
+// Constants used in TcU32Sel.Flags.
+const (
+ TC_U32_TERMINAL = nl.TC_U32_TERMINAL
+ TC_U32_OFFSET = nl.TC_U32_OFFSET
+ TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET
+ TC_U32_EAT = nl.TC_U32_EAT
+)
+
// Fw filter filters on firewall marks
// NOTE: this is in filter_linux because it refers to nl.TcPolice which
// is defined in nl/tc_linux.go
@@ -128,9 +136,11 @@
req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type())))
options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
- if u32, ok := filter.(*U32); ok {
+
+ switch filter := filter.(type) {
+ case *U32:
// Convert TcU32Sel into nl.TcU32Sel as it is without copy.
- sel := (*nl.TcU32Sel)(unsafe.Pointer(u32.Sel))
+ sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel))
if sel == nil {
// match all
sel = &nl.TcU32Sel{
@@ -158,56 +168,56 @@
}
sel.Nkeys = uint8(len(sel.Keys))
nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize())
- if u32.ClassId != 0 {
- nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(u32.ClassId))
+ if filter.ClassId != 0 {
+ nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId))
}
actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil)
// backwards compatibility
- if u32.RedirIndex != 0 {
- u32.Actions = append([]Action{NewMirredAction(u32.RedirIndex)}, u32.Actions...)
+ if filter.RedirIndex != 0 {
+ filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...)
}
- if err := EncodeActions(actionsAttr, u32.Actions); err != nil {
+ if err := EncodeActions(actionsAttr, filter.Actions); err != nil {
return err
}
- } else if fw, ok := filter.(*Fw); ok {
- if fw.Mask != 0 {
+ case *Fw:
+ if filter.Mask != 0 {
b := make([]byte, 4)
- native.PutUint32(b, fw.Mask)
+ native.PutUint32(b, filter.Mask)
nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b)
}
- if fw.InDev != "" {
- nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(fw.InDev))
+ if filter.InDev != "" {
+ nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev))
}
- if (fw.Police != nl.TcPolice{}) {
+ if (filter.Police != nl.TcPolice{}) {
police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil)
- nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, fw.Police.Serialize())
- if (fw.Police.Rate != nl.TcRateSpec{}) {
- payload := SerializeRtab(fw.Rtab)
+ nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, filter.Police.Serialize())
+ if (filter.Police.Rate != nl.TcRateSpec{}) {
+ payload := SerializeRtab(filter.Rtab)
nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload)
}
- if (fw.Police.PeakRate != nl.TcRateSpec{}) {
- payload := SerializeRtab(fw.Ptab)
+ if (filter.Police.PeakRate != nl.TcRateSpec{}) {
+ payload := SerializeRtab(filter.Ptab)
nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload)
}
}
- if fw.ClassId != 0 {
+ if filter.ClassId != 0 {
b := make([]byte, 4)
- native.PutUint32(b, fw.ClassId)
+ native.PutUint32(b, filter.ClassId)
nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b)
}
- } else if bpf, ok := filter.(*BpfFilter); ok {
+ case *BpfFilter:
var bpfFlags uint32
- if bpf.ClassId != 0 {
- nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(bpf.ClassId))
+ if filter.ClassId != 0 {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId))
}
- if bpf.Fd >= 0 {
- nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(bpf.Fd))))
+ if filter.Fd >= 0 {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd))))
}
- if bpf.Name != "" {
- nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(bpf.Name))
+ if filter.Name != "" {
+ nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name))
}
- if bpf.DirectAction {
+ if filter.DirectAction {
bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT
}
nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags))
diff --git a/vendor/github.com/vishvananda/netlink/handle_linux.go b/vendor/github.com/vishvananda/netlink/handle_linux.go
index a04ceae..d37b087 100644
--- a/vendor/github.com/vishvananda/netlink/handle_linux.go
+++ b/vendor/github.com/vishvananda/netlink/handle_linux.go
@@ -45,12 +45,27 @@
}
tv := syscall.NsecToTimeval(to.Nanoseconds())
for _, sh := range h.sockets {
- fd := sh.Socket.GetFd()
- err := syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, &tv)
- if err != nil {
+ if err := sh.Socket.SetSendTimeout(&tv); err != nil {
return err
}
- err = syscall.SetsockoptTimeval(fd, syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, &tv)
+ if err := sh.Socket.SetReceiveTimeout(&tv); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SetSocketReceiveBufferSize sets the receive buffer size for each
+// socket in the netlink handle. The maximum value is capped by
+// /proc/sys/net/core/rmem_max.
+func (h *Handle) SetSocketReceiveBufferSize(size int, force bool) error {
+ opt := syscall.SO_RCVBUF
+ if force {
+ opt = syscall.SO_RCVBUFFORCE
+ }
+ for _, sh := range h.sockets {
+ fd := sh.Socket.GetFd()
+ err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, opt, size)
if err != nil {
return err
}
@@ -58,6 +73,24 @@
return nil
}
+// GetSocketReceiveBufferSize gets the receiver buffer size for each
+// socket in the netlink handle. The retrieved value should be the
+// double to the one set for SetSocketReceiveBufferSize.
+func (h *Handle) GetSocketReceiveBufferSize() ([]int, error) {
+ results := make([]int, len(h.sockets))
+ i := 0
+ for _, sh := range h.sockets {
+ fd := sh.Socket.GetFd()
+ size, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF)
+ if err != nil {
+ return nil, err
+ }
+ results[i] = size
+ i++
+ }
+ return results, nil
+}
+
// NewHandle returns a netlink handle on the network namespace
// specified by ns. If ns=netns.None(), current network namespace
// will be assumed
diff --git a/vendor/github.com/vishvananda/netlink/handle_unspecified.go b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
index 32cf022..7da21a6 100644
--- a/vendor/github.com/vishvananda/netlink/handle_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/handle_unspecified.go
@@ -145,6 +145,10 @@
return ErrNotImplemented
}
+func (h *Handle) LinkSetTxQLen(link Link, qlen int) error {
+ return ErrNotImplemented
+}
+
func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error {
return ErrNotImplemented
}
diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go
index 547e92e..5aa3a17 100644
--- a/vendor/github.com/vishvananda/netlink/link.go
+++ b/vendor/github.com/vishvananda/netlink/link.go
@@ -37,6 +37,7 @@
EncapType string
Protinfo *Protinfo
OperState LinkOperState
+ NetNsID int
}
// LinkOperState represents the values of the IFLA_OPERSTATE link
@@ -171,6 +172,7 @@
Fd int
Attached bool
Flags uint32
+ ProgId uint32
}
// Device links cannot be created via netlink. These links
@@ -339,6 +341,7 @@
UDPCSum bool
NoAge bool
GBP bool
+ FlowBased bool
Age int
Limit int
Port int
@@ -684,6 +687,7 @@
EncapType uint16
EncapFlags uint16
Link uint32
+ FlowBased bool
}
func (gretap *Gretap) Attrs() *LinkAttrs {
@@ -729,6 +733,28 @@
return "vti"
}
+type Gretun struct {
+ LinkAttrs
+ Link uint32
+ IFlags uint16
+ OFlags uint16
+ IKey uint32
+ OKey uint32
+ Local net.IP
+ Remote net.IP
+ Ttl uint8
+ Tos uint8
+ PMtuDisc uint8
+}
+
+func (gretun *Gretun) Attrs() *LinkAttrs {
+ return &gretun.LinkAttrs
+}
+
+func (gretun *Gretun) Type() string {
+ return "gre"
+}
+
type Vrf struct {
LinkAttrs
Table uint32
diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go
index 1c1bc52..e94fd97 100644
--- a/vendor/github.com/vishvananda/netlink/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/link_linux.go
@@ -379,6 +379,74 @@
return err
}
+// LinkSetVfSpoofchk enables/disables spoof check on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf spoofchk $check`
+func LinkSetVfSpoofchk(link Link, vf int, check bool) error {
+ return pkgHandle.LinkSetVfSpoofchk(link, vf, check)
+}
+
+// LinkSetVfSpookfchk enables/disables spoof check on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf spoofchk $check`
+func (h *Handle) LinkSetVfSpoofchk(link Link, vf int, check bool) error {
+ var setting uint32
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+ info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ if check {
+ setting = 1
+ }
+ vfmsg := nl.VfSpoofchk{
+ Vf: uint32(vf),
+ Setting: setting,
+ }
+ nl.NewRtAttrChild(info, nl.IFLA_VF_SPOOFCHK, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
+// LinkSetVfTrust enables/disables trust state on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf trust $state`
+func LinkSetVfTrust(link Link, vf int, state bool) error {
+ return pkgHandle.LinkSetVfTrust(link, vf, state)
+}
+
+// LinkSetVfTrust enables/disables trust state on a vf for the link.
+// Equivalent to: `ip link set $link vf $vf trust $state`
+func (h *Handle) LinkSetVfTrust(link Link, vf int, state bool) error {
+ var setting uint32
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ data := nl.NewRtAttr(nl.IFLA_VFINFO_LIST, nil)
+ info := nl.NewRtAttrChild(data, nl.IFLA_VF_INFO, nil)
+ if state {
+ setting = 1
+ }
+ vfmsg := nl.VfTrust{
+ Vf: uint32(vf),
+ Setting: setting,
+ }
+ nl.NewRtAttrChild(info, nl.IFLA_VF_TRUST, vfmsg.Serialize())
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
// LinkSetMaster sets the master of the link device.
// Equivalent to: `ip link set $link master $master`
func LinkSetMaster(link Link, master *Bridge) error {
@@ -500,6 +568,12 @@
// LinkSetXdpFd adds a bpf function to the driver. The fd must be a bpf
// program loaded with bpf(type=BPF_PROG_TYPE_XDP)
func LinkSetXdpFd(link Link, fd int) error {
+ return LinkSetXdpFdWithFlags(link, fd, 0)
+}
+
+// LinkSetXdpFdWithFlags adds a bpf function to the driver with the given
+// options. The fd must be a bpf program loaded with bpf(type=BPF_PROG_TYPE_XDP)
+func LinkSetXdpFdWithFlags(link Link, fd, flags int) error {
base := link.Attrs()
ensureIndex(base)
req := nl.NewNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
@@ -508,7 +582,7 @@
msg.Index = int32(base.Index)
req.AddData(msg)
- addXdpAttrs(&LinkXdp{Fd: fd}, req)
+ addXdpAttrs(&LinkXdp{Fd: fd, Flags: uint32(flags)}, req)
_, err := req.Execute(syscall.NETLINK_ROUTE, 0)
return err
@@ -528,7 +602,13 @@
func addVxlanAttrs(vxlan *Vxlan, linkInfo *nl.RtAttr) {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+ if vxlan.FlowBased {
+ vxlan.VxlanId = 0
+ }
+
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_ID, nl.Uint32Attr(uint32(vxlan.VxlanId)))
+
if vxlan.VtepDevIndex != 0 {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LINK, nl.Uint32Attr(uint32(vxlan.VtepDevIndex)))
}
@@ -569,6 +649,9 @@
if vxlan.GBP {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_GBP, []byte{})
}
+ if vxlan.FlowBased {
+ nl.NewRtAttrChild(data, nl.IFLA_VXLAN_FLOWBASED, boolAttr(vxlan.FlowBased))
+ }
if vxlan.NoAge {
nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(0))
} else if vxlan.Age > 0 {
@@ -818,16 +901,17 @@
linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil)
nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type()))
- if vlan, ok := link.(*Vlan); ok {
+ switch link := link.(type) {
+ case *Vlan:
b := make([]byte, 2)
- native.PutUint16(b, uint16(vlan.VlanId))
+ native.PutUint16(b, uint16(link.VlanId))
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b)
- } else if veth, ok := link.(*Veth); ok {
+ case *Veth:
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil)
nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC)
- nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(veth.PeerName))
+ nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName))
if base.TxQLen >= 0 {
nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen)))
}
@@ -835,35 +919,37 @@
nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU)))
}
- } else if vxlan, ok := link.(*Vxlan); ok {
- addVxlanAttrs(vxlan, linkInfo)
- } else if bond, ok := link.(*Bond); ok {
- addBondAttrs(bond, linkInfo)
- } else if ipv, ok := link.(*IPVlan); ok {
+ case *Vxlan:
+ addVxlanAttrs(link, linkInfo)
+ case *Bond:
+ addBondAttrs(link, linkInfo)
+ case *IPVlan:
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(ipv.Mode)))
- } else if macv, ok := link.(*Macvlan); ok {
- if macv.Mode != MACVLAN_MODE_DEFAULT {
+ nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode)))
+ case *Macvlan:
+ if link.Mode != MACVLAN_MODE_DEFAULT {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+ nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
}
- } else if macv, ok := link.(*Macvtap); ok {
- if macv.Mode != MACVLAN_MODE_DEFAULT {
+ case *Macvtap:
+ if link.Mode != MACVLAN_MODE_DEFAULT {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
- nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[macv.Mode]))
+ nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode]))
}
- } else if gretap, ok := link.(*Gretap); ok {
- addGretapAttrs(gretap, linkInfo)
- } else if iptun, ok := link.(*Iptun); ok {
- addIptunAttrs(iptun, linkInfo)
- } else if vti, ok := link.(*Vti); ok {
- addVtiAttrs(vti, linkInfo)
- } else if vrf, ok := link.(*Vrf); ok {
- addVrfAttrs(vrf, linkInfo)
- } else if bridge, ok := link.(*Bridge); ok {
- addBridgeAttrs(bridge, linkInfo)
- } else if gtp, ok := link.(*GTP); ok {
- addGTPAttrs(gtp, linkInfo)
+ case *Gretap:
+ addGretapAttrs(link, linkInfo)
+ case *Iptun:
+ addIptunAttrs(link, linkInfo)
+ case *Gretun:
+ addGretunAttrs(link, linkInfo)
+ case *Vti:
+ addVtiAttrs(link, linkInfo)
+ case *Vrf:
+ addVrfAttrs(link, linkInfo)
+ case *Bridge:
+ addBridgeAttrs(link, linkInfo)
+ case *GTP:
+ addGTPAttrs(link, linkInfo)
}
req.AddData(linkInfo)
@@ -1093,6 +1179,8 @@
link = &Gretap{}
case "ipip":
link = &Iptun{}
+ case "gre":
+ link = &Gretun{}
case "vti":
link = &Vti{}
case "vrf":
@@ -1124,6 +1212,8 @@
parseGretapData(link, data)
case "ipip":
parseIptunData(link, data)
+ case "gre":
+ parseGretunData(link, data)
case "vti":
parseVtiData(link, data)
case "vrf":
@@ -1178,6 +1268,8 @@
}
case syscall.IFLA_OPERSTATE:
base.OperState = LinkOperState(uint8(attr.Value[0]))
+ case nl.IFLA_LINK_NETNSID:
+ base.NetNsID = int(native.Uint32(attr.Value[0:4]))
}
}
@@ -1239,16 +1331,34 @@
// LinkSubscribe takes a chan down which notifications will be sent
// when links change. Close the 'done' chan to stop subscription.
func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error {
- return linkSubscribe(netns.None(), netns.None(), ch, done)
+ return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil)
}
// LinkSubscribeAt works like LinkSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
- return linkSubscribe(ns, netns.None(), ch, done)
+ return linkSubscribeAt(ns, netns.None(), ch, done, nil)
}
-func linkSubscribe(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error {
+// LinkSubscribeOptions contains a set of options to use with
+// LinkSubscribeWithOptions.
+type LinkSubscribeOptions struct {
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+}
+
+// LinkSubscribeWithOptions work like LinkSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error {
+ if options.Namespace == nil {
+ none := netns.None()
+ options.Namespace = &none
+ }
+ return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)
+}
+
+func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error {
s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK)
if err != nil {
return err
@@ -1264,12 +1374,18 @@
for {
msgs, err := s.Receive()
if err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
return
}
for _, m := range msgs {
ifmsg := nl.DeserializeIfInfomsg(m.Data)
link, err := LinkDeserialize(&m.Header, m.Data)
if err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
return
}
ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link}
@@ -1363,6 +1479,33 @@
return nil
}
+// LinkSetTxQLen sets the transaction queue length for the link.
+// Equivalent to: `ip link set $link txqlen $qlen`
+func LinkSetTxQLen(link Link, qlen int) error {
+ return pkgHandle.LinkSetTxQLen(link, qlen)
+}
+
+// LinkSetTxQLen sets the transaction queue length for the link.
+// Equivalent to: `ip link set $link txqlen $qlen`
+func (h *Handle) LinkSetTxQLen(link Link, qlen int) error {
+ base := link.Attrs()
+ h.ensureIndex(base)
+ req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK)
+
+ msg := nl.NewIfInfomsg(syscall.AF_UNSPEC)
+ msg.Index = int32(base.Index)
+ req.AddData(msg)
+
+ b := make([]byte, 4)
+ native.PutUint32(b, uint32(qlen))
+
+ data := nl.NewRtAttr(syscall.IFLA_TXQLEN, b)
+ req.AddData(data)
+
+ _, err := req.Execute(syscall.NETLINK_ROUTE, 0)
+ return err
+}
+
func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) {
vlan := link.(*Vlan)
for _, datum := range data {
@@ -1407,6 +1550,8 @@
vxlan.UDPCSum = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_GBP:
vxlan.GBP = true
+ case nl.IFLA_VXLAN_FLOWBASED:
+ vxlan.FlowBased = int8(datum.Value[0]) != 0
case nl.IFLA_VXLAN_AGEING:
vxlan.Age = int(native.Uint32(datum.Value[0:4]))
vxlan.NoAge = vxlan.Age == 0
@@ -1547,6 +1692,12 @@
func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) {
data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+ if gretap.FlowBased {
+ // In flow based mode, no other attributes need to be configured
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased))
+ return
+ }
+
ip := gretap.Local.To4()
if ip != nil {
nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
@@ -1613,6 +1764,69 @@
gre.EncapType = native.Uint16(datum.Value[0:2])
case nl.IFLA_GRE_ENCAP_FLAGS:
gre.EncapFlags = native.Uint16(datum.Value[0:2])
+ case nl.IFLA_GRE_COLLECT_METADATA:
+ gre.FlowBased = int8(datum.Value[0]) != 0
+ }
+ }
+}
+
+func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) {
+ data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil)
+
+ ip := gre.Local.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip))
+ }
+ ip = gre.Remote.To4()
+ if ip != nil {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip))
+ }
+
+ if gre.IKey != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gre.IKey))
+ gre.IFlags |= uint16(nl.GRE_KEY)
+ }
+
+ if gre.OKey != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gre.OKey))
+ gre.OFlags |= uint16(nl.GRE_KEY)
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gre.IFlags))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gre.OFlags))
+
+ if gre.Link != 0 {
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link))
+ }
+
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl))
+ nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos))
+}
+
+func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) {
+ gre := link.(*Gretun)
+ for _, datum := range data {
+ switch datum.Attr.Type {
+ case nl.IFLA_GRE_OKEY:
+ gre.IKey = ntohl(datum.Value[0:4])
+ case nl.IFLA_GRE_IKEY:
+ gre.OKey = ntohl(datum.Value[0:4])
+ case nl.IFLA_GRE_LOCAL:
+ gre.Local = net.IP(datum.Value[0:4])
+ case nl.IFLA_GRE_REMOTE:
+ gre.Remote = net.IP(datum.Value[0:4])
+ case nl.IFLA_GRE_IFLAGS:
+ gre.IFlags = ntohs(datum.Value[0:2])
+ case nl.IFLA_GRE_OFLAGS:
+ gre.OFlags = ntohs(datum.Value[0:2])
+
+ case nl.IFLA_GRE_TTL:
+ gre.Ttl = uint8(datum.Value[0])
+ case nl.IFLA_GRE_TOS:
+ gre.Tos = uint8(datum.Value[0])
+ case nl.IFLA_GRE_PMTUDISC:
+ gre.PMtuDisc = uint8(datum.Value[0])
}
}
}
@@ -1630,8 +1844,10 @@
b := make([]byte, 4)
native.PutUint32(b, uint32(xdp.Fd))
nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b)
- native.PutUint32(b, xdp.Flags)
- nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b)
+ if xdp.Flags != 0 {
+ native.PutUint32(b, xdp.Flags)
+ nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b)
+ }
req.AddData(attrs)
}
@@ -1649,6 +1865,8 @@
xdp.Attached = attr.Value[0] != 0
case nl.IFLA_XDP_FLAGS:
xdp.Flags = native.Uint32(attr.Value[0:4])
+ case nl.IFLA_XDP_PROG_ID:
+ xdp.ProgId = native.Uint32(attr.Value[0:4])
}
}
return xdp, nil
diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go
index 0e5eb90..6a6f71c 100644
--- a/vendor/github.com/vishvananda/netlink/neigh.go
+++ b/vendor/github.com/vishvananda/netlink/neigh.go
@@ -14,6 +14,7 @@
Flags int
IP net.IP
HardwareAddr net.HardwareAddr
+ LLIPAddr net.IP //Used in the case of NHRP
}
// String returns $ip/$hwaddr $label
diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go
index f069db2..5edc8b4 100644
--- a/vendor/github.com/vishvananda/netlink/neigh_linux.go
+++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go
@@ -128,6 +128,7 @@
func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error {
var family int
+
if neigh.Family > 0 {
family = neigh.Family
} else {
@@ -151,7 +152,10 @@
dstData := nl.NewRtAttr(NDA_DST, ipData)
req.AddData(dstData)
- if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil {
+ if neigh.LLIPAddr != nil {
+ llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4())
+ req.AddData(llIPData)
+ } else if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil {
hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr))
req.AddData(hwData)
}
@@ -237,12 +241,33 @@
return nil, err
}
+ // This should be cached for perfomance
+ // once per table dump
+ link, err := LinkByIndex(neigh.LinkIndex)
+ if err != nil {
+ return nil, err
+ }
+ encapType := link.Attrs().EncapType
+
for _, attr := range attrs {
switch attr.Attr.Type {
case NDA_DST:
neigh.IP = net.IP(attr.Value)
case NDA_LLADDR:
- neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+ // BUG: Is this a bug in the netlink library?
+ // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len))
+ // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0))
+ attrLen := attr.Attr.Len - syscall.SizeofRtAttr
+ if attrLen == 4 && (encapType == "ipip" ||
+ encapType == "sit" ||
+ encapType == "gre") {
+ neigh.LLIPAddr = net.IP(attr.Value)
+ } else if attrLen == 16 &&
+ encapType == "tunnel6" {
+ neigh.IP = net.IP(attr.Value)
+ } else {
+ neigh.HardwareAddr = net.HardwareAddr(attr.Value)
+ }
}
}
diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
index 2d57c16..86111b9 100644
--- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
+++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go
@@ -108,6 +108,10 @@
return ErrNotImplemented
}
+func LinkSetTxQLen(link Link, qlen int) error {
+ return ErrNotImplemented
+}
+
func LinkAdd(link Link) error {
return ErrNotImplemented
}
diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
new file mode 100644
index 0000000..6c0d333
--- /dev/null
+++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go
@@ -0,0 +1,74 @@
+package nl
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+const (
+ SizeofBridgeVlanInfo = 0x04
+)
+
+/* Bridge Flags */
+const (
+ BRIDGE_FLAGS_MASTER = iota /* Bridge command to/from master */
+ BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */
+)
+
+/* Bridge management nested attributes
+ * [IFLA_AF_SPEC] = {
+ * [IFLA_BRIDGE_FLAGS]
+ * [IFLA_BRIDGE_MODE]
+ * [IFLA_BRIDGE_VLAN_INFO]
+ * }
+ */
+const (
+ IFLA_BRIDGE_FLAGS = iota
+ IFLA_BRIDGE_MODE
+ IFLA_BRIDGE_VLAN_INFO
+)
+
+const (
+ BRIDGE_VLAN_INFO_MASTER = 1 << iota
+ BRIDGE_VLAN_INFO_PVID
+ BRIDGE_VLAN_INFO_UNTAGGED
+ BRIDGE_VLAN_INFO_RANGE_BEGIN
+ BRIDGE_VLAN_INFO_RANGE_END
+)
+
+// struct bridge_vlan_info {
+// __u16 flags;
+// __u16 vid;
+// };
+
+type BridgeVlanInfo struct {
+ Flags uint16
+ Vid uint16
+}
+
+func (b *BridgeVlanInfo) Serialize() []byte {
+ return (*(*[SizeofBridgeVlanInfo]byte)(unsafe.Pointer(b)))[:]
+}
+
+func DeserializeBridgeVlanInfo(b []byte) *BridgeVlanInfo {
+ return (*BridgeVlanInfo)(unsafe.Pointer(&b[0:SizeofBridgeVlanInfo][0]))
+}
+
+func (b *BridgeVlanInfo) PortVID() bool {
+ return b.Flags&BRIDGE_VLAN_INFO_PVID > 0
+}
+
+func (b *BridgeVlanInfo) EngressUntag() bool {
+ return b.Flags&BRIDGE_VLAN_INFO_UNTAGGED > 0
+}
+
+func (b *BridgeVlanInfo) String() string {
+ return fmt.Sprintf("%+v", *b)
+}
+
+/* New extended info filters for IFLA_EXT_MASK */
+const (
+ RTEXT_FILTER_VF = 1 << iota
+ RTEXT_FILTER_BRVLAN
+ RTEXT_FILTER_BRVLAN_COMPRESSED
+)
diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
index 6692b53..380cc59 100644
--- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go
@@ -79,8 +79,8 @@
CTA_TUPLE_ORIG = 1
CTA_TUPLE_REPLY = 2
CTA_STATUS = 3
- CTA_TIMEOUT = 8
- CTA_MARK = 9
+ CTA_TIMEOUT = 7
+ CTA_MARK = 8
CTA_PROTOINFO = 4
)
diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
index f7b9575..9ae65a1 100644
--- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go
@@ -231,7 +231,8 @@
* on/off switch
*/
IFLA_VF_STATS /* network device statistics */
- IFLA_VF_MAX = IFLA_VF_STATS
+ IFLA_VF_TRUST /* Trust state of VF */
+ IFLA_VF_MAX = IFLA_VF_TRUST
)
const (
@@ -259,6 +260,7 @@
SizeofVfSpoofchk = 0x08
SizeofVfLinkState = 0x08
SizeofVfRssQueryEn = 0x08
+ SizeofVfTrust = 0x08
)
// struct ifla_vf_mac {
@@ -419,12 +421,42 @@
return (*(*[SizeofVfRssQueryEn]byte)(unsafe.Pointer(msg)))[:]
}
+// struct ifla_vf_trust {
+// __u32 vf;
+// __u32 setting;
+// };
+
+type VfTrust struct {
+ Vf uint32
+ Setting uint32
+}
+
+func (msg *VfTrust) Len() int {
+ return SizeofVfTrust
+}
+
+func DeserializeVfTrust(b []byte) *VfTrust {
+ return (*VfTrust)(unsafe.Pointer(&b[0:SizeofVfTrust][0]))
+}
+
+func (msg *VfTrust) Serialize() []byte {
+ return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:]
+}
+
+const (
+ XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota
+ XDP_FLAGS_SKB_MODE
+ XDP_FLAGS_DRV_MODE
+ XDP_FLAGS_MASK = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE
+)
+
const (
IFLA_XDP_UNSPEC = iota
IFLA_XDP_FD /* fd of xdp program to attach, or -1 to remove */
IFLA_XDP_ATTACHED /* read-only bool indicating if prog is attached */
IFLA_XDP_FLAGS /* xdp prog related flags */
- IFLA_XDP_MAX = IFLA_XDP_FLAGS
+ IFLA_XDP_PROG_ID /* xdp prog id */
+ IFLA_XDP_MAX = IFLA_XDP_PROG_ID
)
const (
diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
index 1329acd..72f7f6a 100644
--- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
+++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go
@@ -621,6 +621,20 @@
return syscall.ParseNetlinkMessage(rb)
}
+// SetSendTimeout allows to set a send timeout on the socket
+func (s *NetlinkSocket) SetSendTimeout(timeout *syscall.Timeval) error {
+ // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine
+ // remains stuck on a send on a closed fd
+ return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, timeout)
+}
+
+// SetReceiveTimeout allows to set a receive timeout on the socket
+func (s *NetlinkSocket) SetReceiveTimeout(timeout *syscall.Timeval) error {
+ // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine
+ // remains stuck on a recvmsg on a closed fd
+ return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, timeout)
+}
+
func (s *NetlinkSocket) GetPid() (uint32, error) {
fd := int(atomic.LoadInt32(&s.fd))
lsa, err := syscall.Getsockname(fd)
diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
index 2c0dedd..1123396 100644
--- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go
+++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go
@@ -160,71 +160,73 @@
req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type())))
options := nl.NewRtAttr(nl.TCA_OPTIONS, nil)
- if prio, ok := qdisc.(*Prio); ok {
+
+ switch qdisc := qdisc.(type) {
+ case *Prio:
tcmap := nl.TcPrioMap{
- Bands: int32(prio.Bands),
- Priomap: prio.PriorityMap,
+ Bands: int32(qdisc.Bands),
+ Priomap: qdisc.PriorityMap,
}
options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize())
- } else if tbf, ok := qdisc.(*Tbf); ok {
+ case *Tbf:
opt := nl.TcTbfQopt{}
- opt.Rate.Rate = uint32(tbf.Rate)
- opt.Peakrate.Rate = uint32(tbf.Peakrate)
- opt.Limit = tbf.Limit
- opt.Buffer = tbf.Buffer
+ opt.Rate.Rate = uint32(qdisc.Rate)
+ opt.Peakrate.Rate = uint32(qdisc.Peakrate)
+ opt.Limit = qdisc.Limit
+ opt.Buffer = qdisc.Buffer
nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize())
- if tbf.Rate >= uint64(1<<32) {
- nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(tbf.Rate))
+ if qdisc.Rate >= uint64(1<<32) {
+ nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate))
}
- if tbf.Peakrate >= uint64(1<<32) {
- nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(tbf.Peakrate))
+ if qdisc.Peakrate >= uint64(1<<32) {
+ nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate))
}
- if tbf.Peakrate > 0 {
- nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(tbf.Minburst))
+ if qdisc.Peakrate > 0 {
+ nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst))
}
- } else if htb, ok := qdisc.(*Htb); ok {
+ case *Htb:
opt := nl.TcHtbGlob{}
- opt.Version = htb.Version
- opt.Rate2Quantum = htb.Rate2Quantum
- opt.Defcls = htb.Defcls
+ opt.Version = qdisc.Version
+ opt.Rate2Quantum = qdisc.Rate2Quantum
+ opt.Defcls = qdisc.Defcls
// TODO: Handle Debug properly. For now default to 0
- opt.Debug = htb.Debug
- opt.DirectPkts = htb.DirectPkts
+ opt.Debug = qdisc.Debug
+ opt.DirectPkts = qdisc.DirectPkts
nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize())
// nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize())
- } else if netem, ok := qdisc.(*Netem); ok {
+ case *Netem:
opt := nl.TcNetemQopt{}
- opt.Latency = netem.Latency
- opt.Limit = netem.Limit
- opt.Loss = netem.Loss
- opt.Gap = netem.Gap
- opt.Duplicate = netem.Duplicate
- opt.Jitter = netem.Jitter
+ opt.Latency = qdisc.Latency
+ opt.Limit = qdisc.Limit
+ opt.Loss = qdisc.Loss
+ opt.Gap = qdisc.Gap
+ opt.Duplicate = qdisc.Duplicate
+ opt.Jitter = qdisc.Jitter
options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize())
// Correlation
corr := nl.TcNetemCorr{}
- corr.DelayCorr = netem.DelayCorr
- corr.LossCorr = netem.LossCorr
- corr.DupCorr = netem.DuplicateCorr
+ corr.DelayCorr = qdisc.DelayCorr
+ corr.LossCorr = qdisc.LossCorr
+ corr.DupCorr = qdisc.DuplicateCorr
if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 {
nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize())
}
// Corruption
corruption := nl.TcNetemCorrupt{}
- corruption.Probability = netem.CorruptProb
- corruption.Correlation = netem.CorruptCorr
+ corruption.Probability = qdisc.CorruptProb
+ corruption.Correlation = qdisc.CorruptCorr
if corruption.Probability > 0 {
nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize())
}
// Reorder
reorder := nl.TcNetemReorder{}
- reorder.Probability = netem.ReorderProb
- reorder.Correlation = netem.ReorderCorr
+ reorder.Probability = qdisc.ReorderProb
+ reorder.Correlation = qdisc.ReorderCorr
if reorder.Probability > 0 {
nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize())
}
- } else if _, ok := qdisc.(*Ingress); ok {
+ case *Ingress:
// ingress filters must use the proper handle
if qdisc.Attrs().Parent != HANDLE_INGRESS {
return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS")
diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go
index 03ac4b2..68c6a22 100644
--- a/vendor/github.com/vishvananda/netlink/route.go
+++ b/vendor/github.com/vishvananda/netlink/route.go
@@ -16,6 +16,7 @@
Decode([]byte) error
Encode() ([]byte, error)
String() string
+ Equal(Destination) bool
}
type Encap interface {
@@ -23,6 +24,7 @@
Decode([]byte) error
Encode() ([]byte, error)
String() string
+ Equal(Encap) bool
}
// Route represents a netlink route.
@@ -72,6 +74,25 @@
return fmt.Sprintf("{%s}", strings.Join(elems, " "))
}
+func (r Route) Equal(x Route) bool {
+ return r.LinkIndex == x.LinkIndex &&
+ r.ILinkIndex == x.ILinkIndex &&
+ r.Scope == x.Scope &&
+ ipNetEqual(r.Dst, x.Dst) &&
+ r.Src.Equal(x.Src) &&
+ r.Gw.Equal(x.Gw) &&
+ nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) &&
+ r.Protocol == x.Protocol &&
+ r.Priority == x.Priority &&
+ r.Table == x.Table &&
+ r.Type == x.Type &&
+ r.Tos == x.Tos &&
+ r.Flags == x.Flags &&
+ (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) &&
+ (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) &&
+ (r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap)))
+}
+
func (r *Route) SetFlag(flag NextHopFlag) {
r.Flags |= int(flag)
}
@@ -110,7 +131,46 @@
elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap))
}
elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1))
- elems = append(elems, fmt.Sprintf("Gw: %d", n.Gw))
+ elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw))
elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags()))
return fmt.Sprintf("{%s}", strings.Join(elems, " "))
}
+
+func (n NexthopInfo) Equal(x NexthopInfo) bool {
+ return n.LinkIndex == x.LinkIndex &&
+ n.Hops == x.Hops &&
+ n.Gw.Equal(x.Gw) &&
+ n.Flags == x.Flags &&
+ (n.NewDst == x.NewDst || (n.NewDst != nil && n.NewDst.Equal(x.NewDst))) &&
+ (n.Encap == x.Encap || (n.Encap != nil && n.Encap.Equal(x.Encap)))
+}
+
+type nexthopInfoSlice []*NexthopInfo
+
+func (n nexthopInfoSlice) Equal(x []*NexthopInfo) bool {
+ if len(n) != len(x) {
+ return false
+ }
+ for i := range n {
+ if n[i] == nil || x[i] == nil {
+ return false
+ }
+ if !n[i].Equal(*x[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// ipNetEqual returns true iff both IPNet are equal
+func ipNetEqual(ipn1 *net.IPNet, ipn2 *net.IPNet) bool {
+ if ipn1 == ipn2 {
+ return true
+ }
+ if ipn1 == nil || ipn2 == nil {
+ return false
+ }
+ m1, _ := ipn1.Mask.Size()
+ m2, _ := ipn2.Mask.Size()
+ return m1 == m2 && ipn1.IP.Equal(ipn2.IP)
+}
diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go
index cd739e7..9234c69 100644
--- a/vendor/github.com/vishvananda/netlink/route_linux.go
+++ b/vendor/github.com/vishvananda/netlink/route_linux.go
@@ -86,6 +86,34 @@
return strings.Join(s, "/")
}
+func (d *MPLSDestination) Equal(x Destination) bool {
+ o, ok := x.(*MPLSDestination)
+ if !ok {
+ return false
+ }
+ if d == nil && o == nil {
+ return true
+ }
+ if d == nil || o == nil {
+ return false
+ }
+ if d.Labels == nil && o.Labels == nil {
+ return true
+ }
+ if d.Labels == nil || o.Labels == nil {
+ return false
+ }
+ if len(d.Labels) != len(o.Labels) {
+ return false
+ }
+ for i := range d.Labels {
+ if d.Labels[i] != o.Labels[i] {
+ return false
+ }
+ }
+ return true
+}
+
type MPLSEncap struct {
Labels []int
}
@@ -129,6 +157,34 @@
return strings.Join(s, "/")
}
+func (e *MPLSEncap) Equal(x Encap) bool {
+ o, ok := x.(*MPLSEncap)
+ if !ok {
+ return false
+ }
+ if e == nil && o == nil {
+ return true
+ }
+ if e == nil || o == nil {
+ return false
+ }
+ if e.Labels == nil && o.Labels == nil {
+ return true
+ }
+ if e.Labels == nil || o.Labels == nil {
+ return false
+ }
+ if len(e.Labels) != len(o.Labels) {
+ return false
+ }
+ for i := range e.Labels {
+ if e.Labels[i] != o.Labels[i] {
+ return false
+ }
+ }
+ return true
+}
+
// RouteAdd will add a route to the system.
// Equivalent to: `ip route add $route`
func RouteAdd(route *Route) error {
@@ -421,19 +477,8 @@
continue
case filterMask&RT_FILTER_DST != 0:
if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) {
- if filter.Dst == nil {
- if route.Dst != nil {
- continue
- }
- } else {
- if route.Dst == nil {
- continue
- }
- aMaskLen, aMaskBits := route.Dst.Mask.Size()
- bMaskLen, bMaskBits := filter.Dst.Mask.Size()
- if !(route.Dst.IP.Equal(filter.Dst.IP) && aMaskLen == bMaskLen && aMaskBits == bMaskBits) {
- continue
- }
+ if !ipNetEqual(route.Dst, filter.Dst) {
+ continue
}
}
}
@@ -633,16 +678,34 @@
// RouteSubscribe takes a chan down which notifications will be sent
// when routes are added or deleted. Close the 'done' chan to stop subscription.
func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error {
- return routeSubscribeAt(netns.None(), netns.None(), ch, done)
+ return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil)
}
// RouteSubscribeAt works like RouteSubscribe plus it allows the caller
// to choose the network namespace in which to subscribe (ns).
func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
- return routeSubscribeAt(ns, netns.None(), ch, done)
+ return routeSubscribeAt(ns, netns.None(), ch, done, nil)
}
-func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error {
+// RouteSubscribeOptions contains a set of options to use with
+// RouteSubscribeWithOptions.
+type RouteSubscribeOptions struct {
+ Namespace *netns.NsHandle
+ ErrorCallback func(error)
+}
+
+// RouteSubscribeWithOptions work like RouteSubscribe but enable to
+// provide additional options to modify the behavior. Currently, the
+// namespace can be provided as well as an error callback.
+func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error {
+ if options.Namespace == nil {
+ none := netns.None()
+ options.Namespace = &none
+ }
+ return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback)
+}
+
+func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error {
s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE)
if err != nil {
return err
@@ -658,11 +721,17 @@
for {
msgs, err := s.Receive()
if err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
return
}
for _, m := range msgs {
route, err := deserializeRoute(m.Data)
if err != nil {
+ if cberr != nil {
+ cberr(err)
+ }
return
}
ch <- RouteUpdate{Type: m.Header.Type, Route: route}
diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go
index f0243de..e4d9168 100644
--- a/vendor/github.com/vishvananda/netlink/rule.go
+++ b/vendor/github.com/vishvananda/netlink/rule.go
@@ -8,6 +8,7 @@
// Rule represents a netlink rule.
type Rule struct {
Priority int
+ Family int
Table int
Mark int
Mask int
diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go
index f9cdc85..cbd91a5 100644
--- a/vendor/github.com/vishvananda/netlink/rule_linux.go
+++ b/vendor/github.com/vishvananda/netlink/rule_linux.go
@@ -37,6 +37,9 @@
func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error {
msg := nl.NewRtMsg()
msg.Family = syscall.AF_INET
+ if rule.Family != 0 {
+ msg.Family = uint8(rule.Family)
+ }
var dstFamily uint8
var rtAttrs []*nl.RtAttr
diff --git a/volume/store/store.go b/volume/store/store.go
index fd1ca61..9a511a5 100644
--- a/volume/store/store.go
+++ b/volume/store/store.go
@@ -145,8 +145,9 @@
s.globalLock.Lock()
v, exists := s.names[name]
if exists {
- if _, err := volumedrivers.ReleaseDriver(v.DriverName()); err != nil {
- logrus.Errorf("Error dereferencing volume driver: %v", err)
+ driverName := v.DriverName()
+ if _, err := volumedrivers.ReleaseDriver(driverName); err != nil {
+ logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver")
}
}
if err := s.removeMeta(name); err != nil {