Merge pull request #35376 from KingEmet/master
Permit a broader range of errors from mirror endpoints when determining whether to fall back
diff --git a/.DEREK.yml b/.DEREK.yml
new file mode 100644
index 0000000..3fd6789
--- /dev/null
+++ b/.DEREK.yml
@@ -0,0 +1,17 @@
+curators:
+ - aboch
+ - alexellis
+ - andrewhsu
+ - anonymuse
+ - chanwit
+ - ehazlett
+ - fntlnz
+ - gianarb
+ - mgoelzer
+ - programmerq
+ - rheinwein
+ - ripcurld0
+ - thajeztah
+
+features:
+ - comments
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index b7961e1..a38f54d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -311,6 +311,36 @@
will have time to make yourself available. You don't have to be a
maintainer to make a difference on the project!
+### Manage issues and pull requests using the Derek bot
+
+If you want to help label, assign, close or reopen issues or pull requests
+without commit rights, ask a maintainer to add your Github handle to the
+`.DEREK.yml` file. [Derek](https://github.com/alexellis/derek) is a bot that extends
+Github's user permissions to help non-committers to manage issues and pull requests simply by commenting.
+
+For example:
+
+* Labels
+
+```
+Derek add label: kind/question
+Derek remove label: status/claimed
+```
+
+* Assign work
+
+```
+Derek assign: username
+Derek unassign: me
+```
+
+* Manage issues and PRs
+
+```
+Derek close
+Derek reopen
+```
+
## Moby community guidelines
We want to keep the Moby community awesome, growing and collaborative. We need
diff --git a/Dockerfile b/Dockerfile
index e2c8770..5f78eda 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -52,6 +52,7 @@
libapparmor-dev \
libcap-dev \
libdevmapper-dev \
+ libnet-dev \
libnl-3-dev \
libprotobuf-c0-dev \
libprotobuf-dev \
@@ -94,11 +95,9 @@
ENV GOPATH /go
# Install CRIU for checkpoint/restore support
-ENV CRIU_VERSION 2.12.1
-# Install dependancy packages specific to criu
-RUN apt-get install libnet-dev -y && \
- mkdir -p /usr/src/criu \
- && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \
+ENV CRIU_VERSION 3.6
+RUN mkdir -p /usr/src/criu \
+ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \
&& cd /usr/src/criu \
&& make \
&& make install-criu
diff --git a/api/swagger.yaml b/api/swagger.yaml
index 07ee067..96386e0 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -607,17 +607,7 @@
description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:<name|id>`. Any other value is taken
as a custom network's name to which this container should connect to."
PortBindings:
- type: "object"
- description: "A map of exposed container ports and the host port they should map to."
- additionalProperties:
- type: "object"
- properties:
- HostIp:
- type: "string"
- description: "The host IP address"
- HostPort:
- type: "string"
- description: "The host port number, as a string"
+ $ref: "#/definitions/PortMap"
RestartPolicy:
$ref: "#/definitions/RestartPolicy"
AutoRemove:
diff --git a/api/types/swarm/runtime/plugin.proto b/api/types/swarm/runtime/plugin.proto
index 06eb7ba..6d63b77 100644
--- a/api/types/swarm/runtime/plugin.proto
+++ b/api/types/swarm/runtime/plugin.proto
@@ -1,5 +1,7 @@
syntax = "proto3";
+option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime";
+
// PluginSpec defines the base payload which clients can specify for creating
// a service with the plugin runtime.
message PluginSpec {
diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go
index 41e6b61..a65d8ed 100644
--- a/cmd/dockerd/daemon_unix.go
+++ b/cmd/dockerd/daemon_unix.go
@@ -14,7 +14,6 @@
"github.com/docker/docker/cmd/dockerd/hack"
"github.com/docker/docker/daemon"
"github.com/docker/docker/libcontainerd"
- "github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/libnetwork/portallocator"
"golang.org/x/sys/unix"
)
@@ -38,24 +37,13 @@
}
func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) {
- // On older kernel, letting putting the containerd-shim in its own
- // namespace will effectively prevent operations such as unlink, rename
- // and remove on mountpoints that were present at the time the shim
- // namespace was created. This would led to a famous EBUSY will trying to
- // remove shm mounts.
- var noNewNS bool
- if !kernel.CheckKernelVersion(3, 18, 0) {
- noNewNS = true
- }
-
opts := []libcontainerd.RemoteOption{
libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust),
libcontainerd.WithPlugin("linux", &linux.Config{
- Shim: daemon.DefaultShimBinary,
- Runtime: daemon.DefaultRuntimeBinary,
- RuntimeRoot: filepath.Join(cli.Config.Root, "runc"),
- ShimDebug: cli.Config.Debug,
- ShimNoMountNS: noNewNS,
+ Shim: daemon.DefaultShimBinary,
+ Runtime: daemon.DefaultRuntimeBinary,
+ RuntimeRoot: filepath.Join(cli.Config.Root, "runc"),
+ ShimDebug: cli.Config.Debug,
}),
}
if cli.Config.Debug {
diff --git a/daemon/graphdriver/copy/copy.go b/daemon/graphdriver/copy/copy.go
index 8ec458d..7a98bec 100644
--- a/daemon/graphdriver/copy/copy.go
+++ b/daemon/graphdriver/copy/copy.go
@@ -11,6 +11,7 @@
*/
import "C"
import (
+ "container/list"
"fmt"
"io"
"os"
@@ -65,7 +66,7 @@
// as the ioctl may not have been available (therefore EINVAL)
if err == unix.EXDEV || err == unix.ENOSYS {
*copyWithFileRange = false
- } else if err != nil {
+ } else {
return err
}
}
@@ -106,11 +107,28 @@
return nil
}
+type fileID struct {
+ dev uint64
+ ino uint64
+}
+
+type dirMtimeInfo struct {
+ dstPath *string
+ stat *syscall.Stat_t
+}
+
// DirCopy copies or hardlinks the contents of one directory to another,
// properly handling xattrs, and soft links
-func DirCopy(srcDir, dstDir string, copyMode Mode) error {
+//
+// Copying xattrs can be opted out of by passing false for copyXattrs.
+func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
copyWithFileRange := true
copyWithFileClone := true
+
+ // This is a map of source file inodes to dst file paths
+ copiedFiles := make(map[fileID]string)
+
+ dirsToSetMtimes := list.New()
err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
if err != nil {
return err
@@ -136,15 +154,21 @@
switch f.Mode() & os.ModeType {
case 0: // Regular file
+ id := fileID{dev: stat.Dev, ino: stat.Ino}
if copyMode == Hardlink {
isHardlink = true
if err2 := os.Link(srcPath, dstPath); err2 != nil {
return err2
}
+ } else if hardLinkDstPath, ok := copiedFiles[id]; ok {
+ if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
+ return err2
+ }
} else {
if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil {
return err2
}
+ copiedFiles[id] = dstPath
}
case os.ModeDir:
@@ -192,16 +216,10 @@
return err
}
- if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
- return err
- }
-
- // We need to copy this attribute if it appears in an overlay upper layer, as
- // this function is used to copy those. It is set by overlay if a directory
- // is removed and then re-created and should not inherit anything from the
- // same dir in the lower dir.
- if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
- return err
+ if copyXattrs {
+ if err := doCopyXattrs(srcPath, dstPath); err != nil {
+ return err
+ }
}
isSymlink := f.Mode()&os.ModeSymlink != 0
@@ -216,7 +234,9 @@
// system.Chtimes doesn't support a NOFOLLOW flag atm
// nolint: unconvert
- if !isSymlink {
+ if f.IsDir() {
+ dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
+ } else if !isSymlink {
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
@@ -230,5 +250,31 @@
}
return nil
})
- return err
+ if err != nil {
+ return err
+ }
+ for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
+ mtimeInfo := e.Value.(*dirMtimeInfo)
+ ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
+ if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func doCopyXattrs(srcPath, dstPath string) error {
+ if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+ return err
+ }
+
+ // We need to copy this attribute if it appears in an overlay upper layer, as
+ // this function is used to copy those. It is set by overlay if a directory
+ // is removed and then re-created and should not inherit anything from the
+ // same dir in the lower dir.
+ if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
+ return err
+ }
+ return nil
}
diff --git a/daemon/graphdriver/copy/copy_test.go b/daemon/graphdriver/copy/copy_test.go
index 6976503..d216991 100644
--- a/daemon/graphdriver/copy/copy_test.go
+++ b/daemon/graphdriver/copy/copy_test.go
@@ -3,15 +3,20 @@
package copy
import (
+ "fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
+ "syscall"
"testing"
+ "time"
"github.com/docker/docker/pkg/parsers/kernel"
+ "github.com/docker/docker/pkg/system"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "golang.org/x/sys/unix"
)
func TestIsCopyFileRangeSyscallAvailable(t *testing.T) {
@@ -45,6 +50,84 @@
doCopyTest(t, ©WithFileRange, ©WithFileClone)
}
+func TestCopyDir(t *testing.T) {
+ srcDir, err := ioutil.TempDir("", "srcDir")
+ require.NoError(t, err)
+ populateSrcDir(t, srcDir, 3)
+
+ dstDir, err := ioutil.TempDir("", "testdst")
+ require.NoError(t, err)
+ defer os.RemoveAll(dstDir)
+
+ assert.NoError(t, DirCopy(srcDir, dstDir, Content, false))
+ require.NoError(t, filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Rebase path
+ relPath, err := filepath.Rel(srcDir, srcPath)
+ require.NoError(t, err)
+ if relPath == "." {
+ return nil
+ }
+
+ dstPath := filepath.Join(dstDir, relPath)
+ require.NoError(t, err)
+
+ // If we add non-regular dirs and files to the test
+ // then we need to add more checks here.
+ dstFileInfo, err := os.Lstat(dstPath)
+ require.NoError(t, err)
+
+ srcFileSys := f.Sys().(*syscall.Stat_t)
+ dstFileSys := dstFileInfo.Sys().(*syscall.Stat_t)
+
+ t.Log(relPath)
+ if srcFileSys.Dev == dstFileSys.Dev {
+ assert.NotEqual(t, srcFileSys.Ino, dstFileSys.Ino)
+ }
+ // Todo: check size, and ctim is not equal
+ /// on filesystems that have granular ctimes
+ assert.Equal(t, srcFileSys.Mode, dstFileSys.Mode)
+ assert.Equal(t, srcFileSys.Uid, dstFileSys.Uid)
+ assert.Equal(t, srcFileSys.Gid, dstFileSys.Gid)
+ assert.Equal(t, srcFileSys.Mtim, dstFileSys.Mtim)
+
+ return nil
+ }))
+}
+
+func randomMode(baseMode int) os.FileMode {
+ for i := 0; i < 7; i++ {
+ baseMode = baseMode | (1&rand.Intn(2))<<uint(i)
+ }
+ return os.FileMode(baseMode)
+}
+
+func populateSrcDir(t *testing.T, srcDir string, remainingDepth int) {
+ if remainingDepth == 0 {
+ return
+ }
+ aTime := time.Unix(rand.Int63(), 0)
+ mTime := time.Unix(rand.Int63(), 0)
+
+ for i := 0; i < 10; i++ {
+ dirName := filepath.Join(srcDir, fmt.Sprintf("srcdir-%d", i))
+ // Owner all bits set
+ require.NoError(t, os.Mkdir(dirName, randomMode(0700)))
+ populateSrcDir(t, dirName, remainingDepth-1)
+ require.NoError(t, system.Chtimes(dirName, aTime, mTime))
+ }
+
+ for i := 0; i < 10; i++ {
+ fileName := filepath.Join(srcDir, fmt.Sprintf("srcfile-%d", i))
+ // Owner read bit set
+ require.NoError(t, ioutil.WriteFile(fileName, []byte{}, randomMode(0400)))
+ require.NoError(t, system.Chtimes(fileName, aTime, mTime))
+ }
+}
+
func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) {
dir, err := ioutil.TempDir("", "docker-copy-check")
require.NoError(t, err)
@@ -65,3 +148,32 @@
require.NoError(t, err)
assert.Equal(t, buf, readBuf)
}
+
+func TestCopyHardlink(t *testing.T) {
+ var srcFile1FileInfo, srcFile2FileInfo, dstFile1FileInfo, dstFile2FileInfo unix.Stat_t
+
+ srcDir, err := ioutil.TempDir("", "srcDir")
+ require.NoError(t, err)
+ defer os.RemoveAll(srcDir)
+
+ dstDir, err := ioutil.TempDir("", "dstDir")
+ require.NoError(t, err)
+ defer os.RemoveAll(dstDir)
+
+ srcFile1 := filepath.Join(srcDir, "file1")
+ srcFile2 := filepath.Join(srcDir, "file2")
+ dstFile1 := filepath.Join(dstDir, "file1")
+ dstFile2 := filepath.Join(dstDir, "file2")
+ require.NoError(t, ioutil.WriteFile(srcFile1, []byte{}, 0777))
+ require.NoError(t, os.Link(srcFile1, srcFile2))
+
+ assert.NoError(t, DirCopy(srcDir, dstDir, Content, false))
+
+ require.NoError(t, unix.Stat(srcFile1, &srcFile1FileInfo))
+ require.NoError(t, unix.Stat(srcFile2, &srcFile2FileInfo))
+ require.Equal(t, srcFile1FileInfo.Ino, srcFile2FileInfo.Ino)
+
+ require.NoError(t, unix.Stat(dstFile1, &dstFile1FileInfo))
+ require.NoError(t, unix.Stat(dstFile2, &dstFile2FileInfo))
+ assert.Equal(t, dstFile1FileInfo.Ino, dstFile2FileInfo.Ino)
+}
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index 83a1677..647a86d 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -327,7 +327,7 @@
return err
}
- return copy.DirCopy(parentUpperDir, upperDir, copy.Content)
+ return copy.DirCopy(parentUpperDir, upperDir, copy.Content, true)
}
func (d *Driver) dir(id string) string {
@@ -466,7 +466,7 @@
}
}()
- if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink); err != nil {
+ if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink, true); err != nil {
return 0, err
}
diff --git a/daemon/graphdriver/vfs/copy_linux.go b/daemon/graphdriver/vfs/copy_linux.go
new file mode 100644
index 0000000..a632d35
--- /dev/null
+++ b/daemon/graphdriver/vfs/copy_linux.go
@@ -0,0 +1,9 @@
+// +build linux
+
+package vfs
+
+import "github.com/docker/docker/daemon/graphdriver/copy"
+
+func dirCopy(srcDir, dstDir string) error {
+ return copy.DirCopy(srcDir, dstDir, copy.Content, false)
+}
diff --git a/daemon/graphdriver/vfs/copy_unsupported.go b/daemon/graphdriver/vfs/copy_unsupported.go
new file mode 100644
index 0000000..fcc4b69
--- /dev/null
+++ b/daemon/graphdriver/vfs/copy_unsupported.go
@@ -0,0 +1,9 @@
+// +build !linux
+
+package vfs
+
+import "github.com/docker/docker/pkg/chrootarchive"
+
+func dirCopy(srcDir, dstDir string) error {
+ return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
+}
diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go
index 610476f..a85d6a7 100644
--- a/daemon/graphdriver/vfs/driver.go
+++ b/daemon/graphdriver/vfs/driver.go
@@ -7,7 +7,6 @@
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/daemon/graphdriver/quota"
- "github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
@@ -16,8 +15,8 @@
)
var (
- // CopyWithTar defines the copy method to use.
- CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
+ // CopyDir defines the copy method to use.
+ CopyDir = dirCopy
)
func init() {
@@ -133,7 +132,7 @@
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
}
- return CopyWithTar(parentDir.Path(), dir)
+ return CopyDir(parentDir.Path(), dir)
}
func (d *Driver) dir(id string) string {
diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go
index 6a0653e..cde36d4 100644
--- a/daemon/logger/fluentd/fluentd.go
+++ b/daemon/logger/fluentd/fluentd.go
@@ -48,11 +48,12 @@
defaultRetryWait = 1000
defaultMaxRetries = math.MaxInt32
- addressKey = "fluentd-address"
- bufferLimitKey = "fluentd-buffer-limit"
- retryWaitKey = "fluentd-retry-wait"
- maxRetriesKey = "fluentd-max-retries"
- asyncConnectKey = "fluentd-async-connect"
+ addressKey = "fluentd-address"
+ bufferLimitKey = "fluentd-buffer-limit"
+ retryWaitKey = "fluentd-retry-wait"
+ maxRetriesKey = "fluentd-max-retries"
+ asyncConnectKey = "fluentd-async-connect"
+ subSecondPrecisionKey = "fluentd-sub-second-precision"
)
func init() {
@@ -117,15 +118,23 @@
}
}
+ subSecondPrecision := false
+ if info.Config[subSecondPrecisionKey] != "" {
+ if subSecondPrecision, err = strconv.ParseBool(info.Config[subSecondPrecisionKey]); err != nil {
+ return nil, err
+ }
+ }
+
fluentConfig := fluent.Config{
- FluentPort: loc.port,
- FluentHost: loc.host,
- FluentNetwork: loc.protocol,
- FluentSocketPath: loc.path,
- BufferLimit: bufferLimit,
- RetryWait: retryWait,
- MaxRetry: maxRetries,
- AsyncConnect: asyncConnect,
+ FluentPort: loc.port,
+ FluentHost: loc.host,
+ FluentNetwork: loc.protocol,
+ FluentSocketPath: loc.path,
+ BufferLimit: bufferLimit,
+ RetryWait: retryWait,
+ MaxRetry: maxRetries,
+ AsyncConnect: asyncConnect,
+ SubSecondPrecision: subSecondPrecision,
}
logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig).
@@ -183,6 +192,7 @@
case retryWaitKey:
case maxRetriesKey:
case asyncConnectKey:
+ case subSecondPrecisionKey:
// Accepted
default:
return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key)
diff --git a/daemon/network.go b/daemon/network.go
index 0420caa..573901e 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -3,6 +3,7 @@
import (
"fmt"
"net"
+ "runtime"
"sort"
"strings"
"sync"
@@ -183,21 +184,14 @@
// Otherwise continue down the call to create or recreate sandbox.
}
- n, err := daemon.GetNetworkByID(create.ID)
+ _, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
}
-
- if err = daemon.createLoadBalancerSandbox("ingress", create.ID, ip, n, libnetwork.OptionIngress()); err != nil {
- logrus.Errorf("Failed creating load balancer sandbox for ingress network: %v", err)
- }
}
func (daemon *Daemon) releaseIngress(id string) {
controller := daemon.netController
- if err := controller.SandboxDestroy("ingress-sbox"); err != nil {
- logrus.Errorf("Failed to delete ingress sandbox: %v", err)
- }
if id == "" {
return
@@ -209,13 +203,6 @@
return
}
- for _, ep := range n.Endpoints() {
- if err := ep.Delete(true); err != nil {
- logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
- return
- }
- }
-
if err := n.Delete(); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return
@@ -270,34 +257,6 @@
return resp, err
}
-func (daemon *Daemon) createLoadBalancerSandbox(prefix, id string, ip net.IP, n libnetwork.Network, options ...libnetwork.SandboxOption) error {
- c := daemon.netController
- sandboxName := prefix + "-sbox"
- sb, err := c.NewSandbox(sandboxName, options...)
- if err != nil {
- if _, ok := err.(networktypes.ForbiddenError); !ok {
- return errors.Wrapf(err, "Failed creating %s sandbox", sandboxName)
- }
- return nil
- }
-
- endpointName := prefix + "-endpoint"
- ep, err := n.CreateEndpoint(endpointName, libnetwork.CreateOptionIpam(ip, nil, nil, nil), libnetwork.CreateOptionLoadBalancer())
- if err != nil {
- return errors.Wrapf(err, "Failed creating %s in sandbox %s", endpointName, sandboxName)
- }
-
- if err := ep.Join(sb, nil); err != nil {
- return errors.Wrapf(err, "Failed joining %s to sandbox %s", endpointName, sandboxName)
- }
-
- if err := sb.EnableService(); err != nil {
- return errors.Wrapf(err, "Failed enabling service in %s sandbox", sandboxName)
- }
-
- return nil
-}
-
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
@@ -360,6 +319,15 @@
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
}
+ if agent && driver == "overlay" && (create.Ingress || runtime.GOOS == "windows") {
+ nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
+ if !exists {
+ return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
+ }
+
+ nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
+ }
+
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
@@ -375,18 +343,6 @@
}
daemon.LogNetworkEvent(n, "create")
- if agent && !n.Info().Ingress() && n.Type() == "overlay" {
- nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
- if !exists {
- return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
- }
-
- if err := daemon.createLoadBalancerSandbox(create.Name, id, nodeIP, n); err != nil {
- return nil, err
- }
-
- }
-
return &types.NetworkCreateResponse{
ID: n.ID(),
Warning: warning,
@@ -517,43 +473,16 @@
return daemon.deleteNetwork(networkID, false)
}
-func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) {
- controller := daemon.netController
-
- //The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint")
- endpoints := n.Endpoints()
- if len(endpoints) == 1 {
- sandboxName := n.Name() + "-sbox"
-
- info := endpoints[0].Info()
- if info != nil {
- sb := info.Sandbox()
- if sb != nil {
- if err := sb.DisableService(); err != nil {
- logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
- //Ignore error and attempt to delete the load balancer endpoint
- }
- }
- }
-
- if err := endpoints[0].Delete(true); err != nil {
- logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err)
- //Ignore error and attempt to delete the sandbox.
- }
-
- if err := controller.SandboxDestroy(sandboxName); err != nil {
- logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
- //Ignore error and attempt to delete the network.
- }
- }
-}
-
func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
nw, err := daemon.FindNetwork(networkID)
if err != nil {
return err
}
+ if nw.Info().Ingress() {
+ return nil
+ }
+
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return notAllowedError{err}
@@ -569,10 +498,6 @@
return notAllowedError{err}
}
- if !nw.Info().Ingress() && nw.Type() == "overlay" {
- daemon.deleteLoadBalancerSandbox(nw)
- }
-
if err := nw.Delete(); err != nil {
return err
}
diff --git a/docs/contributing/set-up-dev-env.md b/docs/contributing/set-up-dev-env.md
index acd6888..b28b8fb 100644
--- a/docs/contributing/set-up-dev-env.md
+++ b/docs/contributing/set-up-dev-env.md
@@ -132,14 +132,14 @@
```none
Successfully built 3d872560918e
docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/moby/moby/bundles" -t "docker-dev:dry-run-test" bash
- root@f31fa223770f:/go/src/github.com/moby/moby#
+ root@f31fa223770f:/go/src/github.com/docker/docker#
```
At this point, your prompt reflects the container's BASH shell.
5. List the contents of the current directory (`/go/src/github.com/moby/moby`).
- You should see the image's source from the `/go/src/github.com/moby/moby`
+ You should see the image's source from the `/go/src/github.com/docker/docker`
directory.
![List example](images/list_example.png)
@@ -147,7 +147,7 @@
6. Make a `dockerd` binary.
```none
- root@a8b2885ab900:/go/src/github.com/moby/moby# hack/make.sh binary
+ root@a8b2885ab900:/go/src/github.com/docker/docker# hack/make.sh binary
Removing bundles/
---> Making bundle: binary (in bundles/binary)
@@ -161,7 +161,7 @@
`/usr/local/bin/` directory.
```none
- root@a8b2885ab900:/go/src/github.com/moby/moby# make install
+ root@a8b2885ab900:/go/src/github.com/docker/docker# make install
```
8. Start the Engine daemon running in the background.
@@ -190,7 +190,7 @@
9. Inside your container, check your Docker version.
```none
- root@5f8630b873fe:/go/src/github.com/moby/moby# docker --version
+ root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version
Docker version 1.12.0-dev, build 6e728fb
```
@@ -201,13 +201,13 @@
10. Run the `hello-world` image.
```none
- root@5f8630b873fe:/go/src/github.com/moby/moby# docker run hello-world
+ root@5f8630b873fe:/go/src/github.com/docker/docker# docker run hello-world
```
11. List the image you just downloaded.
```none
- root@5f8630b873fe:/go/src/github.com/moby/moby# docker images
+ root@5f8630b873fe:/go/src/github.com/docker/docker# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hello-world latest c54a2cc56cbb 3 months ago 1.85 kB
```
@@ -296,7 +296,7 @@
10. To view your change, run the `dockerd --help` command in the docker development container shell.
```bash
- root@b0cb4f22715d:/go/src/github.com/moby/moby# dockerd --help
+ root@b0cb4f22715d:/go/src/github.com/docker/docker# dockerd --help
Usage: dockerd COMMAND
diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits
index 1b38076..abe8bfe 100644
--- a/hack/dockerfile/binaries-commits
+++ b/hack/dockerfile/binaries-commits
@@ -4,7 +4,7 @@
# When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
RUNC_COMMIT=b2567b37d7b75eb4cf325b77297b140ea686ce8f
-CONTAINERD_COMMIT=6bff39c643886dfa3d546e83a90a527b64ddeacf
+CONTAINERD_COMMIT=v1.0.0
TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
index e94185a..6cfb27e 100644
--- a/integration/service/create_test.go
+++ b/integration/service/create_test.go
@@ -11,12 +11,11 @@
"github.com/docker/docker/client"
"github.com/docker/docker/integration-cli/request"
"github.com/gotestyourself/gotestyourself/poll"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
)
-func TestCreateWithLBSandbox(t *testing.T) {
+func TestCreateServiceMultipleTimes(t *testing.T) {
defer setupTest(t)()
d := newSwarm(t)
defer d.Stop(t)
@@ -33,9 +32,8 @@
require.NoError(t, err)
overlayID := netResp.ID
- var instances uint64 = 1
+ var instances uint64 = 4
serviceSpec := swarmServiceSpec("TestService", instances)
-
serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: overlayName})
serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
@@ -56,14 +54,26 @@
_, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
require.NoError(t, err)
- network, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{})
- require.NoError(t, err)
- assert.Contains(t, network.Containers, overlayName+"-sbox")
-
err = client.ServiceRemove(context.Background(), serviceID)
require.NoError(t, err)
poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
+ poll.WaitOn(t, noTasks(client), pollSettings)
+
+ serviceResp, err = client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
+ QueryRegistry: false,
+ })
+ require.NoError(t, err)
+
+ serviceID2 := serviceResp.ID
+ poll.WaitOn(t, serviceRunningTasksCount(client, serviceID2, instances), pollSettings)
+
+ err = client.ServiceRemove(context.Background(), serviceID2)
+ require.NoError(t, err)
+
+ poll.WaitOn(t, serviceIsRemoved(client, serviceID2), pollSettings)
+ poll.WaitOn(t, noTasks(client), pollSettings)
+
err = client.NetworkRemove(context.Background(), overlayID)
require.NoError(t, err)
@@ -112,6 +122,23 @@
}
}
+func noTasks(client client.ServiceAPIClient) func(log poll.LogT) poll.Result {
+ return func(log poll.LogT) poll.Result {
+ filter := filters.NewArgs()
+ tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
+ Filters: filter,
+ })
+ switch {
+ case err != nil:
+ return poll.Error(err)
+ case len(tasks) == 0:
+ return poll.Success()
+ default:
+ return poll.Continue("task count at %d waiting for 0", len(tasks))
+ }
+ }
+}
+
func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result {
return func(log poll.LogT) poll.Result {
filter := filters.NewArgs()
diff --git a/layer/layer_test.go b/layer/layer_test.go
index 6936fae..f632d44 100644
--- a/layer/layer_test.go
+++ b/layer/layer_test.go
@@ -23,7 +23,7 @@
func init() {
graphdriver.ApplyUncompressedLayer = archive.UnpackLayer
defaultArchiver := archive.NewDefaultArchiver()
- vfs.CopyWithTar = defaultArchiver.CopyWithTar
+ vfs.CopyDir = defaultArchiver.CopyWithTar
}
func newVFSGraphDriver(td string) (graphdriver.Driver, error) {
diff --git a/libcontainerd/client_daemon.go b/libcontainerd/client_daemon.go
index 5c26bc2..0a3502c 100644
--- a/libcontainerd/client_daemon.go
+++ b/libcontainerd/client_daemon.go
@@ -681,7 +681,10 @@
}()
eventStream, err = c.remote.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
- Filters: []string{"namespace==" + c.namespace + ",topic~=/tasks/.+"},
+ Filters: []string{
+ "namespace==" + c.namespace,
+ "topic~=/tasks/",
+ },
}, grpc.FailFast(false))
if err != nil {
return
diff --git a/plugin/manager_linux_test.go b/plugin/manager_linux_test.go
new file mode 100644
index 0000000..3259ca8
--- /dev/null
+++ b/plugin/manager_linux_test.go
@@ -0,0 +1,79 @@
+package plugin
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/docker/docker/api/types"
+ "github.com/docker/docker/pkg/mount"
+ "github.com/docker/docker/pkg/system"
+ "github.com/docker/docker/plugin/v2"
+)
+
+func TestManagerWithPluginMounts(t *testing.T) {
+ root, err := ioutil.TempDir("", "test-store-with-plugin-mounts")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer system.EnsureRemoveAll(root)
+
+ s := NewStore()
+ managerRoot := filepath.Join(root, "manager")
+ p1 := newTestPlugin(t, "test1", "testcap", managerRoot)
+
+ p2 := newTestPlugin(t, "test2", "testcap", managerRoot)
+ p2.PluginObj.Enabled = true
+
+ m, err := NewManager(
+ ManagerConfig{
+ Store: s,
+ Root: managerRoot,
+ ExecRoot: filepath.Join(root, "exec"),
+ CreateExecutor: func(*Manager) (Executor, error) { return nil, nil },
+ LogPluginEvent: func(_, _, _ string) {},
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := s.Add(p1); err != nil {
+ t.Fatal(err)
+ }
+ if err := s.Add(p2); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a mount to simulate a plugin that has created it's own mounts
+ p2Mount := filepath.Join(p2.Rootfs, "testmount")
+ if err := os.MkdirAll(p2Mount, 0755); err != nil {
+ t.Fatal(err)
+ }
+ if err := mount.Mount("tmpfs", p2Mount, "tmpfs", ""); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := m.Remove(p1.Name(), &types.PluginRmConfig{ForceRemove: true}); err != nil {
+ t.Fatal(err)
+ }
+ if mounted, err := mount.Mounted(p2Mount); !mounted || err != nil {
+ t.Fatalf("expected %s to be mounted, err: %v", p2Mount, err)
+ }
+}
+
+func newTestPlugin(t *testing.T, name, cap, root string) *v2.Plugin {
+ rootfs := filepath.Join(root, name)
+ if err := os.MkdirAll(rootfs, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ p := v2.Plugin{PluginObj: types.Plugin{Name: name}}
+ p.Rootfs = rootfs
+ iType := types.PluginInterfaceType{Capability: cap, Prefix: "docker", Version: "1.0"}
+ i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
+ p.PluginObj.Config.Interface = i
+ p.PluginObj.ID = name
+
+ return &p
+}
diff --git a/vendor.conf b/vendor.conf
index 84945ec..120dd9d 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -30,7 +30,7 @@
github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
#get libnetwork packages
-github.com/docker/libnetwork f7d21337cf1eb628ad54eecac0881fa23ec266df
+github.com/docker/libnetwork 64ae58878fc8f95e4a167499d654e13fa36abdc7
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -79,10 +79,10 @@
# gelf logging driver deps
github.com/Graylog2/go-gelf v2
-github.com/fluent/fluent-logger-golang v1.2.1
+github.com/fluent/fluent-logger-golang v1.3.0
# fluent-logger-golang deps
github.com/philhofer/fwd 98c11a7a6ec829d672b03833c3d69a7fae1ca972
-github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
+github.com/tinylib/msgp 3b556c64540842d4f82967be066a7f7fffc3adad
# fsnotify
github.com/fsnotify/fsnotify 4da3e2cfbabc9f751898f250b49f2439785783a1
@@ -103,7 +103,7 @@
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
-github.com/containerd/containerd 6bff39c643886dfa3d546e83a90a527b64ddeacf
+github.com/containerd/containerd v1.0.0
github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
github.com/containerd/cgroups 29da22c6171a4316169f9205ab6c49f59b5b852f
@@ -111,7 +111,7 @@
github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/dmcgowan/go-tar go1.10
-github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1
+github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
# cluster
github.com/docker/swarmkit de950a7ed842c7b7e47e9451cde9bf8f96031894
diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md
index 6e59def..84d1eec 100644
--- a/vendor/github.com/containerd/containerd/README.md
+++ b/vendor/github.com/containerd/containerd/README.md
@@ -13,7 +13,37 @@
## Getting Started
-If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
+See our documentation on [containerd.io](containerd.io):
+* [for ops and admins](docs/ops.md)
+* [namespaces](docs/namespaces.md)
+* [client options](docs/client-opts.md)
+
+See how to build containerd from source at [BUILDING](BUILDING.md).
+
+If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
+
+
+## Runtime Requirements
+
+Runtime requirements for containerd are very minimal. Most interactions with
+the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
+OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
+
+There are specific features
+used by containerd core code and snapshotters that will require a minimum kernel
+version on Linux. With the understood caveat of distro kernel versioning, a
+reasonable starting point for Linux is a minimum 4.x kernel version.
+
+The overlay filesystem snapshotter, used by default, uses features that were
+finalized in the 4.x kernel series. If you choose to use btrfs, there may
+be more flexibility in kernel version (minimum recommended is 3.18), but will
+require the btrfs kernel module and btrfs tools to be installed on your Linux
+distribution.
+
+To use Linux checkpoint and restore features, you will need `criu` installed on
+your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
+
+Build requirements for developers are listed in [BUILDING](BUILDING.md).
## Features
@@ -23,7 +53,11 @@
```go
-import "github.com/containerd/containerd"
+import (
+ "github.com/containerd/containerd"
+ "github.com/containerd/containerd/cio"
+)
+
func main() {
client, err := containerd.New("/run/containerd/containerd.sock")
@@ -39,7 +73,7 @@
To set a namespace for requests to the API:
```go
-context = context.Background()
+context = context.Background()
// create a context for docker
docker = namespaces.WithNamespace(context, "docker")
@@ -93,7 +127,6 @@
redis, err := client.NewContainer(context, "redis-master",
containerd.WithNewSnapshot("redis-rootfs", image),
containerd.WithNewSpec(oci.WithImageConfig(image)),
-
)
// use a readonly filesystem with multiple containers
@@ -112,7 +145,7 @@
```go
// create a new task
-task, err := redis.NewTask(context, containerd.Stdio)
+task, err := redis.NewTask(context, cio.Stdio)
defer task.Delete(context)
// the task is now running and has a pid that can be use to setup networking
@@ -144,37 +177,12 @@
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
defer container.Delete(context)
-task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
+task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
defer task.Delete(context)
err := task.Start(context)
```
-## Developer Quick-Start
-
-To build the daemon and `ctr` simple test client, the following build system dependencies are required:
-
-* Go 1.9.x or above
-* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
-* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency.
-
-For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host:
-
-```
-$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip
-$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local
-```
-
-With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
-
-> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
-> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
-> Makefile target will disable the btrfs driver within the containerd Go build.
-
-Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
-
-Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
-
### Releases and API Stability
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go
index 5c20335..39547f5 100644
--- a/vendor/github.com/containerd/containerd/client.go
+++ b/vendor/github.com/containerd/containerd/client.go
@@ -7,6 +7,7 @@
"net/http"
"runtime"
"strconv"
+ "strings"
"sync"
"time"
@@ -29,7 +30,6 @@
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
- "github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1"
@@ -334,6 +334,14 @@
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
if err != nil {
+ // TODO(estesp): until we have a more complete method for index push, we need to report
+ // missing dependencies in an index/manifest list by sensing the "400 Bad Request"
+ // as a marker for this problem
+ if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
+ manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
+ errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
+ return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
+ }
return err
}
}
@@ -494,95 +502,27 @@
}, nil
}
-type imageFormat string
-
-const (
- ociImageFormat imageFormat = "oci"
-)
-
type importOpts struct {
- format imageFormat
- refObject string
- labels map[string]string
}
// ImportOpt allows the caller to specify import specific options
type ImportOpt func(c *importOpts) error
-// WithImportLabel sets a label to be associated with an imported image
-func WithImportLabel(key, value string) ImportOpt {
- return func(opts *importOpts) error {
- if opts.labels == nil {
- opts.labels = make(map[string]string)
- }
-
- opts.labels[key] = value
- return nil
- }
-}
-
-// WithImportLabels associates a set of labels to an imported image
-func WithImportLabels(labels map[string]string) ImportOpt {
- return func(opts *importOpts) error {
- if opts.labels == nil {
- opts.labels = make(map[string]string)
- }
-
- for k, v := range labels {
- opts.labels[k] = v
- }
- return nil
- }
-}
-
-// WithOCIImportFormat sets the import format for an OCI image format
-func WithOCIImportFormat() ImportOpt {
- return func(c *importOpts) error {
- if c.format != "" {
- return errors.New("format already set")
- }
- c.format = ociImageFormat
- return nil
- }
-}
-
-// WithRefObject specifies the ref object to import.
-// If refObject is empty, it is copied from the ref argument of Import().
-func WithRefObject(refObject string) ImportOpt {
- return func(c *importOpts) error {
- c.refObject = refObject
- return nil
- }
-}
-
-func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
+func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
var iopts importOpts
for _, o := range opts {
if err := o(&iopts); err != nil {
return iopts, err
}
}
- // use OCI as the default format
- if iopts.format == "" {
- iopts.format = ociImageFormat
- }
- // if refObject is not explicitly specified, use the one specified in ref
- if iopts.refObject == "" {
- refSpec, err := reference.Parse(ref)
- if err != nil {
- return iopts, err
- }
- iopts.refObject = refSpec.Object
- }
return iopts, nil
}
// Import imports an image from a Tar stream using reader.
-// OCI format is assumed by default.
-//
-// Note that unreferenced blobs are imported to the content store as well.
-func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
- iopts, err := resolveImportOpt(ref, opts...)
+// Caller needs to specify importer. Future version may use oci.v1 as the default.
+// Note that unreferrenced blobs may be imported to the content store as well.
+func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
+ _, err := resolveImportOpt(opts...) // unused now
if err != nil {
return nil, err
}
@@ -593,58 +533,66 @@
}
defer done()
- switch iopts.format {
- case ociImageFormat:
- return c.importFromOCITar(ctx, ref, reader, iopts)
- default:
- return nil, errors.Errorf("unsupported format: %s", iopts.format)
+ imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
+ if err != nil {
+ // is.Update() is not called on error
+ return nil, err
}
+
+ is := c.ImageService()
+ var images []Image
+ for _, imgrec := range imgrecs {
+ if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
+ if !errdefs.IsNotFound(err) {
+ return nil, err
+ }
+
+ created, err := is.Create(ctx, imgrec)
+ if err != nil {
+ return nil, err
+ }
+
+ imgrec = created
+ } else {
+ imgrec = updated
+ }
+
+ images = append(images, &image{
+ client: c,
+ i: imgrec,
+ })
+ }
+ return images, nil
}
type exportOpts struct {
- format imageFormat
}
-// ExportOpt allows callers to set export options
+// ExportOpt allows the caller to specify export-specific options
type ExportOpt func(c *exportOpts) error
-// WithOCIExportFormat sets the OCI image format as the export target
-func WithOCIExportFormat() ExportOpt {
- return func(c *exportOpts) error {
- if c.format != "" {
- return errors.New("format already set")
+func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
+ var eopts exportOpts
+ for _, o := range opts {
+ if err := o(&eopts); err != nil {
+ return eopts, err
}
- c.format = ociImageFormat
- return nil
}
+ return eopts, nil
}
-// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
-// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
-// -> application/vnd.oci.image.layer.v1.tar+gzip
-
// Export exports an image to a Tar stream.
// OCI format is used by default.
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
-func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
- var eopts exportOpts
- for _, o := range opts {
- if err := o(&eopts); err != nil {
- return nil, err
- }
- }
- // use OCI as the default format
- if eopts.format == "" {
- eopts.format = ociImageFormat
+// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
+func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
+ _, err := resolveExportOpt(opts...) // unused now
+ if err != nil {
+ return nil, err
}
pr, pw := io.Pipe()
- switch eopts.format {
- case ociImageFormat:
- go func() {
- pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
- }()
- default:
- return nil, errors.Errorf("unsupported format: %s", eopts.format)
- }
+ go func() {
+ pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
+ }()
return pr, nil
}
diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go
index 2d5c9ae..716e045 100644
--- a/vendor/github.com/containerd/containerd/container.go
+++ b/vendor/github.com/containerd/containerd/container.go
@@ -162,11 +162,17 @@
}, nil
}
-func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (Task, error) {
+func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (_ Task, err error) {
i, err := ioCreate(c.id)
if err != nil {
return nil, err
}
+ defer func() {
+ if err != nil && i != nil {
+ i.Cancel()
+ i.Close()
+ }
+ }()
cfg := i.Config()
request := &tasks.CreateTaskRequest{
ContainerID: c.id,
diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go
index bb431e5..b678033 100644
--- a/vendor/github.com/containerd/containerd/container_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/container_opts_unix.go
@@ -24,7 +24,6 @@
"github.com/opencontainers/image-spec/identity"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "golang.org/x/sys/unix"
)
// WithCheckpoint allows a container to be created from the checkpointed information
@@ -193,14 +192,17 @@
if err != nil {
return err
}
- defer os.RemoveAll(root)
+ defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
- defer unix.Unmount(root, 0)
- return filepath.Walk(root, incrementFS(root, uid, gid))
+ err = filepath.Walk(root, incrementFS(root, uid, gid))
+ if uerr := mount.Unmount(root, 0); err == nil {
+ err = uerr
+ }
+ return err
}
func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go
index 56f99bb..9ff95de 100644
--- a/vendor/github.com/containerd/containerd/content/local/store.go
+++ b/vendor/github.com/containerd/containerd/content/local/store.go
@@ -62,7 +62,7 @@
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
// useful for tests or standalone implementations.
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
- if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
+ if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
return nil, err
}
diff --git a/vendor/github.com/containerd/containerd/export.go b/vendor/github.com/containerd/containerd/export.go
deleted file mode 100644
index 76bebe3..0000000
--- a/vendor/github.com/containerd/containerd/export.go
+++ /dev/null
@@ -1,189 +0,0 @@
-package containerd
-
-import (
- "archive/tar"
- "context"
- "encoding/json"
- "io"
- "sort"
-
- "github.com/containerd/containerd/content"
- "github.com/containerd/containerd/images"
- "github.com/containerd/containerd/platforms"
- ocispecs "github.com/opencontainers/image-spec/specs-go"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
- tw := tar.NewWriter(writer)
- defer tw.Close()
-
- records := []tarRecord{
- ociLayoutFile(""),
- ociIndexRecord(desc),
- }
-
- cs := c.ContentStore()
- algorithms := map[string]struct{}{}
- exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
- records = append(records, blobRecord(cs, desc))
- algorithms[desc.Digest.Algorithm().String()] = struct{}{}
- return nil, nil
- }
-
- handlers := images.Handlers(
- images.ChildrenHandler(cs, platforms.Default()),
- images.HandlerFunc(exportHandler),
- )
-
- // Walk sequentially since the number of fetchs is likely one and doing in
- // parallel requires locking the export handler
- if err := images.Walk(ctx, handlers, desc); err != nil {
- return err
- }
-
- if len(algorithms) > 0 {
- records = append(records, directoryRecord("blobs/", 0755))
- for alg := range algorithms {
- records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
- }
- }
-
- return writeTar(ctx, tw, records)
-}
-
-type tarRecord struct {
- Header *tar.Header
- CopyTo func(context.Context, io.Writer) (int64, error)
-}
-
-func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
- path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
- return tarRecord{
- Header: &tar.Header{
- Name: path,
- Mode: 0444,
- Size: desc.Size,
- Typeflag: tar.TypeReg,
- },
- CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
- r, err := cs.ReaderAt(ctx, desc.Digest)
- if err != nil {
- return 0, err
- }
- defer r.Close()
-
- // Verify digest
- dgstr := desc.Digest.Algorithm().Digester()
-
- n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
- if err != nil {
- return 0, err
- }
- if dgstr.Digest() != desc.Digest {
- return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
- }
- return n, nil
- },
- }
-}
-
-func directoryRecord(name string, mode int64) tarRecord {
- return tarRecord{
- Header: &tar.Header{
- Name: name,
- Mode: mode,
- Typeflag: tar.TypeDir,
- },
- }
-}
-
-func ociLayoutFile(version string) tarRecord {
- if version == "" {
- version = ocispec.ImageLayoutVersion
- }
- layout := ocispec.ImageLayout{
- Version: version,
- }
-
- b, err := json.Marshal(layout)
- if err != nil {
- panic(err)
- }
-
- return tarRecord{
- Header: &tar.Header{
- Name: ocispec.ImageLayoutFile,
- Mode: 0444,
- Size: int64(len(b)),
- Typeflag: tar.TypeReg,
- },
- CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
- n, err := w.Write(b)
- return int64(n), err
- },
- }
-
-}
-
-func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
- index := ocispec.Index{
- Versioned: ocispecs.Versioned{
- SchemaVersion: 2,
- },
- Manifests: manifests,
- }
-
- b, err := json.Marshal(index)
- if err != nil {
- panic(err)
- }
-
- return tarRecord{
- Header: &tar.Header{
- Name: "index.json",
- Mode: 0644,
- Size: int64(len(b)),
- Typeflag: tar.TypeReg,
- },
- CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
- n, err := w.Write(b)
- return int64(n), err
- },
- }
-}
-
-func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
- sort.Sort(tarRecordsByName(records))
-
- for _, record := range records {
- if err := tw.WriteHeader(record.Header); err != nil {
- return err
- }
- if record.CopyTo != nil {
- n, err := record.CopyTo(ctx, tw)
- if err != nil {
- return err
- }
- if n != record.Header.Size {
- return errors.Errorf("unexpected copy size for %s", record.Header.Name)
- }
- } else if record.Header.Size > 0 {
- return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
- }
- }
- return nil
-}
-
-type tarRecordsByName []tarRecord
-
-func (t tarRecordsByName) Len() int {
- return len(t)
-}
-func (t tarRecordsByName) Swap(i, j int) {
- t[i], t[j] = t[j], t[i]
-}
-func (t tarRecordsByName) Less(i, j int) bool {
- return t[i].Header.Name < t[j].Header.Name
-}
diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go
index c9b0984..c765ea0 100644
--- a/vendor/github.com/containerd/containerd/filters/parser.go
+++ b/vendor/github.com/containerd/containerd/filters/parser.go
@@ -3,7 +3,6 @@
import (
"fmt"
"io"
- "strconv"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
@@ -134,7 +133,12 @@
return selector{}, err
}
- value, err := p.value()
+ var allowAltQuotes bool
+ if op == operatorMatches {
+ allowAltQuotes = true
+ }
+
+ value, err := p.value(allowAltQuotes)
if err != nil {
if err == io.EOF {
return selector{}, io.ErrUnexpectedEOF
@@ -188,7 +192,7 @@
case tokenField:
return s, nil
case tokenQuoted:
- return p.unquote(pos, s)
+ return p.unquote(pos, s, false)
}
return "", p.mkerr(pos, "expected field or quoted")
@@ -213,21 +217,25 @@
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
}
-func (p *parser) value() (string, error) {
+func (p *parser) value(allowAltQuotes bool) (string, error) {
pos, tok, s := p.scanner.scan()
switch tok {
case tokenValue, tokenField:
return s, nil
case tokenQuoted:
- return p.unquote(pos, s)
+ return p.unquote(pos, s, allowAltQuotes)
}
return "", p.mkerr(pos, "expected value or quoted")
}
-func (p *parser) unquote(pos int, s string) (string, error) {
- uq, err := strconv.Unquote(s)
+func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
+ if !allowAlts && s[0] != '\'' && s[0] != '"' {
+ return "", p.mkerr(pos, "invalid quote encountered")
+ }
+
+ uq, err := unquote(s)
if err != nil {
return "", p.mkerr(pos, "unquoting failed: %v", err)
}
diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go
new file mode 100644
index 0000000..08698e1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/quote.go
@@ -0,0 +1,237 @@
+package filters
+
+import (
+ "unicode/utf8"
+
+ "github.com/pkg/errors"
+)
+
+// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
+// strconv package and modified to be able to handle quoting with `/` and `|`
+// as delimiters. The copyright is held by the Go authors.
+
+var errQuoteSyntax = errors.New("quote syntax error")
+
+// UnquoteChar decodes the first character or byte in the escaped string
+// or character literal represented by the string s.
+// It returns four values:
+//
+// 1) value, the decoded Unicode code point or byte value;
+// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
+// 3) tail, the remainder of the string after the character; and
+// 4) an error that will be nil if the character is syntactically valid.
+//
+// The second argument, quote, specifies the type of literal being parsed
+// and therefore which escaped quote character is permitted.
+// If set to a single quote, it permits the sequence \' and disallows unescaped '.
+// If set to a double quote, it permits \" and disallows unescaped ".
+// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
+//
+// This is from Go strconv package, modified to support `|` and `/` as double
+// quotes for use with regular expressions.
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
+ err = errQuoteSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = errQuoteSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = errQuoteSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = errQuoteSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = errQuoteSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = errQuoteSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = errQuoteSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = errQuoteSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"', '|', '/':
+ if c != quote {
+ err = errQuoteSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = errQuoteSyntax
+ return
+ }
+ tail = s
+ return
+}
+
+// unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+//
+// This is modified from the standard library to support `|` and `/` as quote
+// characters for use with regular expressions.
+func unquote(s string) (string, error) {
+ n := len(s)
+ if n < 2 {
+ return "", errQuoteSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", errQuoteSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote == '`' {
+ if contains(s, '`') {
+ return "", errQuoteSyntax
+ }
+ if contains(s, '\r') {
+ // -1 because we know there is at least one \r to remove.
+ buf := make([]byte, 0, len(s)-1)
+ for i := 0; i < len(s); i++ {
+ if s[i] != '\r' {
+ buf = append(buf, s[i])
+ }
+ }
+ return string(buf), nil
+ }
+ return s, nil
+ }
+ if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
+ return "", errQuoteSyntax
+ }
+ if contains(s, '\n') {
+ return "", errQuoteSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) {
+ switch quote {
+ case '"', '/', '|': // pipe and slash are treated like double quote
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", errQuoteSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go
index 5a55e0a..3a8e723 100644
--- a/vendor/github.com/containerd/containerd/filters/scanner.go
+++ b/vendor/github.com/containerd/containerd/filters/scanner.go
@@ -87,7 +87,7 @@
return ch
}
-func (s *scanner) scan() (int, token, string) {
+func (s *scanner) scan() (nextp int, tk token, text string) {
var (
ch = s.next()
pos = s.pos
@@ -101,6 +101,7 @@
s.scanQuoted(ch)
return pos, tokenQuoted, s.input[pos:s.ppos]
case isSeparatorRune(ch):
+ s.value = false
return pos, tokenSeparator, s.input[pos:s.ppos]
case isOperatorRune(ch):
s.scanOperator()
@@ -241,7 +242,7 @@
func isQuoteRune(r rune) bool {
switch r {
- case '"': // maybe add single quoting?
+ case '/', '|', '"': // maybe add single quoting?
return true
}
diff --git a/vendor/github.com/containerd/containerd/fs/diff.go b/vendor/github.com/containerd/containerd/fs/diff.go
index 9073d0d..3a53f42 100644
--- a/vendor/github.com/containerd/containerd/fs/diff.go
+++ b/vendor/github.com/containerd/containerd/fs/diff.go
@@ -222,8 +222,10 @@
c1 = make(chan *currentPath)
c2 = make(chan *currentPath)
- f1, f2 *currentPath
- rmdir string
+ f1, f2 *currentPath
+ rmdir string
+ lastEmittedDir = string(filepath.Separator)
+ parents []os.FileInfo
)
g.Go(func() error {
defer close(c1)
@@ -258,7 +260,10 @@
continue
}
- var f os.FileInfo
+ var (
+ f os.FileInfo
+ emit = true
+ )
k, p := pathChange(f1, f2)
switch k {
case ChangeKindAdd:
@@ -294,13 +299,35 @@
f2 = nil
if same {
if !isLinked(f) {
- continue
+ emit = false
}
k = ChangeKindUnmodified
}
}
- if err := changeFn(k, p, f, nil); err != nil {
- return err
+ if emit {
+ emittedDir, emitParents := commonParents(lastEmittedDir, p, parents)
+ for _, pf := range emitParents {
+ p := filepath.Join(emittedDir, pf.Name())
+ if err := changeFn(ChangeKindUnmodified, p, pf, nil); err != nil {
+ return err
+ }
+ emittedDir = p
+ }
+
+ if err := changeFn(k, p, f, nil); err != nil {
+ return err
+ }
+
+ if f != nil && f.IsDir() {
+ lastEmittedDir = p
+ } else {
+ lastEmittedDir = emittedDir
+ }
+
+ parents = parents[:0]
+ } else if f.IsDir() {
+ lastEmittedDir, parents = commonParents(lastEmittedDir, p, parents)
+ parents = append(parents, f)
}
}
return nil
@@ -308,3 +335,47 @@
return g.Wait()
}
+
+func commonParents(base, updated string, dirs []os.FileInfo) (string, []os.FileInfo) {
+ if basePrefix := makePrefix(base); strings.HasPrefix(updated, basePrefix) {
+ var (
+ parents []os.FileInfo
+ last = base
+ )
+ for _, d := range dirs {
+ next := filepath.Join(last, d.Name())
+ if strings.HasPrefix(updated, makePrefix(last)) {
+ parents = append(parents, d)
+ last = next
+ } else {
+ break
+ }
+ }
+ return base, parents
+ }
+
+ baseS := strings.Split(base, string(filepath.Separator))
+ updatedS := strings.Split(updated, string(filepath.Separator))
+ commonS := []string{string(filepath.Separator)}
+
+ min := len(baseS)
+ if len(updatedS) < min {
+ min = len(updatedS)
+ }
+ for i := 0; i < min; i++ {
+ if baseS[i] == updatedS[i] {
+ commonS = append(commonS, baseS[i])
+ } else {
+ break
+ }
+ }
+
+ return filepath.Join(commonS...), []os.FileInfo{}
+}
+
+func makePrefix(d string) string {
+ if d == "" || d[len(d)-1] != filepath.Separator {
+ return d + string(filepath.Separator)
+ }
+ return d
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du.go b/vendor/github.com/containerd/containerd/fs/du.go
index 61f439d..26f5333 100644
--- a/vendor/github.com/containerd/containerd/fs/du.go
+++ b/vendor/github.com/containerd/containerd/fs/du.go
@@ -1,5 +1,7 @@
package fs
+import "context"
+
// Usage of disk information
type Usage struct {
Inodes int64
@@ -11,3 +13,10 @@
func DiskUsage(roots ...string) (Usage, error) {
return diskUsage(roots...)
}
+
+// DiffUsage counts the numbers of inodes and disk usage in the
+// diff between the 2 directories. The first path is intended
+// as the base directory and the second as the changed directory.
+func DiffUsage(ctx context.Context, a, b string) (Usage, error) {
+ return diffUsage(ctx, a, b)
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_unix.go b/vendor/github.com/containerd/containerd/fs/du_unix.go
index d8654d3..6328e80 100644
--- a/vendor/github.com/containerd/containerd/fs/du_unix.go
+++ b/vendor/github.com/containerd/containerd/fs/du_unix.go
@@ -3,17 +3,19 @@
package fs
import (
+ "context"
"os"
"path/filepath"
"syscall"
)
+type inode struct {
+ // TODO(stevvooe): Can probably reduce memory usage by not tracking
+ // device, but we can leave this right for now.
+ dev, ino uint64
+}
+
func diskUsage(roots ...string) (Usage, error) {
- type inode struct {
- // TODO(stevvooe): Can probably reduce memory usage by not tracking
- // device, but we can leave this right for now.
- dev, ino uint64
- }
var (
size int64
@@ -45,3 +47,37 @@
Size: size,
}, nil
}
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+ var (
+ size int64
+ inodes = map[inode]struct{}{} // expensive!
+ )
+
+ if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if kind == ChangeKindAdd || kind == ChangeKindModify {
+ stat := fi.Sys().(*syscall.Stat_t)
+
+ inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
+ if _, ok := inodes[inoKey]; !ok {
+ inodes[inoKey] = struct{}{}
+ size += fi.Size()
+ }
+
+ return nil
+
+ }
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+
+ return Usage{
+ Inodes: int64(len(inodes)),
+ Size: size,
+ }, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_windows.go b/vendor/github.com/containerd/containerd/fs/du_windows.go
index 4a0363c..3f852fc 100644
--- a/vendor/github.com/containerd/containerd/fs/du_windows.go
+++ b/vendor/github.com/containerd/containerd/fs/du_windows.go
@@ -3,6 +3,7 @@
package fs
import (
+ "context"
"os"
"path/filepath"
)
@@ -31,3 +32,29 @@
Size: size,
}, nil
}
+
+func diffUsage(ctx context.Context, a, b string) (Usage, error) {
+ var (
+ size int64
+ )
+
+ if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if kind == ChangeKindAdd || kind == ChangeKindModify {
+ size += fi.Size()
+
+ return nil
+
+ }
+ return nil
+ }); err != nil {
+ return Usage{}, err
+ }
+
+ return Usage{
+ Size: size,
+ }, nil
+}
diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go
index 202fc42..6e9f4bd 100644
--- a/vendor/github.com/containerd/containerd/image.go
+++ b/vendor/github.com/containerd/containerd/image.go
@@ -147,7 +147,7 @@
manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
if err != nil {
- return nil, errors.Wrap(err, "")
+ return nil, err
}
diffIDs, err := i.i.RootFS(ctx, cs, platform)
diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go
index e0d6990..7b4215f 100644
--- a/vendor/github.com/containerd/containerd/images/image.go
+++ b/vendor/github.com/containerd/containerd/images/image.go
@@ -187,13 +187,13 @@
return descs, nil
}
- return nil, errors.Wrap(errdefs.ErrNotFound, "could not resolve manifest")
+ return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
}), image); err != nil {
return ocispec.Manifest{}, err
}
if m == nil {
- return ocispec.Manifest{}, errors.Wrap(errdefs.ErrNotFound, "manifest not found")
+ return ocispec.Manifest{}, errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
}
return *m, nil
@@ -257,7 +257,7 @@
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
}
- return false, nil, nil, nil, errors.Wrap(err, "image check failed")
+ return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
}
// TODO(stevvooe): It is possible that referenced conponents could have
@@ -272,7 +272,7 @@
missing = append(missing, desc)
continue
} else {
- return false, nil, nil, nil, err
+ return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
}
}
ra.Close()
diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go
new file mode 100644
index 0000000..f8cf742
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/images/importexport.go
@@ -0,0 +1,21 @@
+package images
+
+import (
+ "context"
+ "io"
+
+ "github.com/containerd/containerd/content"
+ ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Importer is the interface for image importer.
+type Importer interface {
+ // Import imports an image from a tar stream.
+ Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
+}
+
+// Exporter is the interface for image exporter.
+type Exporter interface {
+ // Export exports an image to a tar stream.
+ Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error
+}
diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go
deleted file mode 100644
index 9f8f9af..0000000
--- a/vendor/github.com/containerd/containerd/import.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package containerd
-
-import (
- "archive/tar"
- "context"
- "encoding/json"
- "io"
- "io/ioutil"
- "strings"
-
- "github.com/containerd/containerd/content"
- "github.com/containerd/containerd/errdefs"
- "github.com/containerd/containerd/images"
- "github.com/containerd/containerd/reference"
- digest "github.com/opencontainers/go-digest"
- ocispec "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-func resolveOCIIndex(idx ocispec.Index, refObject string) (*ocispec.Descriptor, error) {
- tag, dgst := reference.SplitObject(refObject)
- if tag == "" && dgst == "" {
- return nil, errors.Errorf("unexpected object: %q", refObject)
- }
- for _, m := range idx.Manifests {
- if m.Digest == dgst {
- return &m, nil
- }
- annot, ok := m.Annotations[ocispec.AnnotationRefName]
- if ok && annot == tag && tag != "" {
- return &m, nil
- }
- }
- return nil, errors.Errorf("not found: %q", refObject)
-}
-
-func (c *Client) importFromOCITar(ctx context.Context, ref string, reader io.Reader, iopts importOpts) (Image, error) {
- tr := tar.NewReader(reader)
- store := c.ContentStore()
- var desc *ocispec.Descriptor
- for {
- hdr, err := tr.Next()
- if err == io.EOF {
- break
- }
- if err != nil {
- return nil, err
- }
- if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
- continue
- }
- if hdr.Name == "index.json" {
- desc, err = onUntarIndexJSON(tr, iopts.refObject)
- if err != nil {
- return nil, err
- }
- continue
- }
- if strings.HasPrefix(hdr.Name, "blobs/") {
- if err := onUntarBlob(ctx, tr, store, hdr.Name, hdr.Size); err != nil {
- return nil, err
- }
- }
- }
- if desc == nil {
- return nil, errors.Errorf("no descriptor found for reference object %q", iopts.refObject)
- }
- imgrec := images.Image{
- Name: ref,
- Target: *desc,
- Labels: iopts.labels,
- }
- is := c.ImageService()
- if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
- if !errdefs.IsNotFound(err) {
- return nil, err
- }
-
- created, err := is.Create(ctx, imgrec)
- if err != nil {
- return nil, err
- }
-
- imgrec = created
- } else {
- imgrec = updated
- }
-
- img := &image{
- client: c,
- i: imgrec,
- }
- return img, nil
-}
-
-func onUntarIndexJSON(r io.Reader, refObject string) (*ocispec.Descriptor, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
- var idx ocispec.Index
- if err := json.Unmarshal(b, &idx); err != nil {
- return nil, err
- }
- return resolveOCIIndex(idx, refObject)
-}
-
-func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
- // name is like "blobs/sha256/deadbeef"
- split := strings.Split(name, "/")
- if len(split) != 3 {
- return errors.Errorf("unexpected name: %q", name)
- }
- algo := digest.Algorithm(split[1])
- if !algo.Available() {
- return errors.Errorf("unsupported algorithm: %s", algo)
- }
- dgst := digest.NewDigestFromHex(algo.String(), split[2])
- return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
-}
diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go
index 136f2cc..629d7f5 100644
--- a/vendor/github.com/containerd/containerd/linux/bundle.go
+++ b/vendor/github.com/containerd/containerd/linux/bundle.go
@@ -75,10 +75,10 @@
type ShimOpt func(*bundle, string, *runctypes.RuncOptions) (shim.Config, client.Opt)
// ShimRemote is a ShimOpt for connecting and starting a remote shim
-func ShimRemote(shimBinary, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) ShimOpt {
+func ShimRemote(shimBinary, daemonAddress, cgroup string, debug bool, exitHandler func()) ShimOpt {
return func(b *bundle, ns string, ropts *runctypes.RuncOptions) (shim.Config, client.Opt) {
return b.shimConfig(ns, ropts),
- client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, nonewns, debug, exitHandler)
+ client.WithStart(shimBinary, b.shimAddress(ns), daemonAddress, cgroup, debug, exitHandler)
}
}
diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go
index 1ffaca1..82ed4f4 100644
--- a/vendor/github.com/containerd/containerd/linux/runtime.go
+++ b/vendor/github.com/containerd/containerd/linux/runtime.go
@@ -22,6 +22,7 @@
shim "github.com/containerd/containerd/linux/shim/v1"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/metadata"
+ "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/plugin"
@@ -78,17 +79,6 @@
NoShim bool `toml:"no_shim"`
// Debug enable debug on the shim
ShimDebug bool `toml:"shim_debug"`
- // ShimNoMountNS prevents the runtime from putting shims into their own mount namespace.
- //
- // Putting the shim in its own mount namespace ensure that any mounts made
- // by it in order to get the task rootfs ready will be undone regardless
- // on how the shim dies.
- //
- // NOTE: This should only be used in kernel older than 3.18 to avoid shims
- // from causing a DoS in their parent namespace due to having a copy of
- // mounts previously there which would prevent unlink, rename and remove
- // operations on those mountpoints.
- ShimNoMountNS bool `toml:"shim_no_newns"`
}
// New returns a configured runtime
@@ -226,8 +216,7 @@
}).Warn("failed to clen up after killed shim")
}
}
- shimopt = ShimRemote(r.config.Shim, r.address, cgroup,
- r.config.ShimNoMountNS, r.config.ShimDebug, exitHandler)
+ shimopt = ShimRemote(r.config.Shim, r.address, cgroup, r.config.ShimDebug, exitHandler)
}
s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts)
@@ -486,7 +475,7 @@
}); err != nil {
log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
}
- if err := unix.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
+ if err := mount.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
log.G(ctx).WithError(err).WithFields(logrus.Fields{
"path": bundle.path,
"id": id,
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client.go b/vendor/github.com/containerd/containerd/linux/shim/client/client.go
index db59e2c..1fb949e 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client.go
@@ -34,7 +34,7 @@
type Opt func(context.Context, shim.Config) (shimapi.ShimService, io.Closer, error)
// WithStart executes a new shim process
-func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) Opt {
+func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHandler func()) Opt {
return func(ctx context.Context, config shim.Config) (_ shimapi.ShimService, _ io.Closer, err error) {
socket, err := newSocket(address)
if err != nil {
@@ -47,7 +47,7 @@
}
defer f.Close()
- cmd := newCommand(binary, daemonAddress, nonewns, debug, config, f)
+ cmd := newCommand(binary, daemonAddress, debug, config, f)
ec, err := reaper.Default.Start(cmd)
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to start shim")
@@ -87,7 +87,7 @@
}
}
-func newCommand(binary, daemonAddress string, nonewns, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
+func newCommand(binary, daemonAddress string, debug bool, config shim.Config, socket *os.File) *exec.Cmd {
selfExe, err := os.Executable()
if err != nil {
panic(err)
@@ -117,7 +117,7 @@
// make sure the shim can be re-parented to system init
// and is cloned in a new mount namespace because the overlay/filesystems
// will be mounted by the shim
- cmd.SysProcAttr = getSysProcAttr(nonewns)
+ cmd.SysProcAttr = getSysProcAttr()
cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
if debug {
cmd.Stdout = os.Stdout
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
index 03ebba0..3125541 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_linux.go
@@ -10,14 +10,10 @@
"github.com/pkg/errors"
)
-func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
- attr := syscall.SysProcAttr{
+func getSysProcAttr() *syscall.SysProcAttr {
+ return &syscall.SysProcAttr{
Setpgid: true,
}
- if !nonewns {
- attr.Cloneflags = syscall.CLONE_NEWNS
- }
- return &attr
}
func setCgroup(cgroupPath string, cmd *exec.Cmd) error {
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
index b34cf4d..0a24ce4 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/client/client_unix.go
@@ -7,7 +7,7 @@
"syscall"
)
-func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
+func getSysProcAttr() *syscall.SysProcAttr {
return &syscall.SysProcAttr{
Setpgid: true,
}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/local.go b/vendor/github.com/containerd/containerd/linux/shim/local.go
index 4264977..6e21926 100644
--- a/vendor/github.com/containerd/containerd/linux/shim/local.go
+++ b/vendor/github.com/containerd/containerd/linux/shim/local.go
@@ -7,8 +7,8 @@
"path/filepath"
shimapi "github.com/containerd/containerd/linux/shim/v1"
+ "github.com/containerd/containerd/mount"
ptypes "github.com/gogo/protobuf/types"
- "golang.org/x/sys/unix"
)
// NewLocal returns a shim client implementation for issue commands to a shim
@@ -32,7 +32,7 @@
func (c *local) Delete(ctx context.Context, in *ptypes.Empty) (*shimapi.DeleteResponse, error) {
// make sure we unmount the containers rootfs for this local
- if err := unix.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
+ if err := mount.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
return nil, err
}
return c.s.Delete(ctx, in)
diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go
index 4cca5f6..32f339a 100644
--- a/vendor/github.com/containerd/containerd/metadata/containers.go
+++ b/vendor/github.com/containerd/containerd/metadata/containers.go
@@ -37,12 +37,12 @@
bkt := getContainerBucket(s.tx, namespace, id)
if bkt == nil {
- return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "bucket name %q:%q", namespace, id)
+ return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
}
container := containers.Container{ID: id}
if err := readContainer(&container, bkt); err != nil {
- return containers.Container{}, errors.Wrapf(err, "failed to read container %v", id)
+ return containers.Container{}, errors.Wrapf(err, "failed to read container %q", id)
}
return container, nil
@@ -61,7 +61,7 @@
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
- return nil, nil
+ return nil, nil // empty store
}
var m []containers.Container
@@ -73,7 +73,7 @@
container := containers.Container{ID: string(k)}
if err := readContainer(&container, cbkt); err != nil {
- return errors.Wrap(err, "failed to read container")
+ return errors.Wrapf(err, "failed to read container %q", string(k))
}
if filter.Match(adaptContainer(container)) {
@@ -113,7 +113,7 @@
container.CreatedAt = time.Now().UTC()
container.UpdatedAt = container.CreatedAt
if err := writeContainer(cbkt, &container); err != nil {
- return containers.Container{}, errors.Wrap(err, "failed to write container")
+ return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
}
return container, nil
@@ -131,7 +131,7 @@
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
- return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
+ return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
}
cbkt := bkt.Bucket([]byte(container.ID))
@@ -141,7 +141,7 @@
var updated containers.Container
if err := readContainer(&updated, cbkt); err != nil {
- return updated, errors.Wrapf(err, "failed to read container from bucket")
+ return updated, errors.Wrapf(err, "failed to read container %q", container.ID)
}
createdat := updated.CreatedAt
updated.ID = container.ID
@@ -211,7 +211,7 @@
updated.CreatedAt = createdat
updated.UpdatedAt = time.Now().UTC()
if err := writeContainer(cbkt, &updated); err != nil {
- return containers.Container{}, errors.Wrap(err, "failed to write container")
+ return containers.Container{}, errors.Wrapf(err, "failed to write container %q", container.ID)
}
return updated, nil
@@ -225,7 +225,7 @@
bkt := getContainersBucket(s.tx, namespace)
if bkt == nil {
- return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %v, bucket not present", id)
+ return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
}
if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound {
@@ -236,7 +236,7 @@
func validateContainer(container *containers.Container) error {
if err := identifiers.Validate(container.ID); err != nil {
- return errors.Wrapf(err, "container.ID validation error")
+ return errors.Wrap(err, "container.ID")
}
for k := range container.Extensions {
diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go
index 1744321..8be62a9 100644
--- a/vendor/github.com/containerd/containerd/metadata/db.go
+++ b/vendor/github.com/containerd/containerd/metadata/db.go
@@ -138,7 +138,7 @@
if err := m.migrate(tx); err != nil {
return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version)
}
- log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("database migration to %s.%d finished", m.schema, m.version)
+ log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version)
}
}
@@ -269,7 +269,7 @@
stats.SnapshotD = map[string]time.Duration{}
wg.Add(len(m.dirtySS))
for snapshotterName := range m.dirtySS {
- log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup")
+ log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup")
go func(snapshotterName string) {
st1 := time.Now()
m.cleanupSnapshotter(snapshotterName)
@@ -286,7 +286,7 @@
if m.dirtyCS {
wg.Add(1)
- log.G(ctx).Debug("scheduling content cleanup")
+ log.G(ctx).Debug("schedule content cleanup")
go func() {
ct1 := time.Now()
m.cleanupContent()
diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go
index 7fe6f7d..186f350 100644
--- a/vendor/github.com/containerd/containerd/metadata/gc.go
+++ b/vendor/github.com/containerd/containerd/metadata/gc.go
@@ -301,7 +301,7 @@
cbkt = cbkt.Bucket(bucketKeyObjectBlob)
}
if cbkt != nil {
- log.G(ctx).WithField("key", node.Key).Debug("delete content")
+ log.G(ctx).WithField("key", node.Key).Debug("remove content")
return cbkt.DeleteBucket([]byte(node.Key))
}
case ResourceSnapshot:
@@ -313,7 +313,7 @@
}
ssbkt := sbkt.Bucket([]byte(parts[0]))
if ssbkt != nil {
- log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("delete snapshot")
+ log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot")
return ssbkt.DeleteBucket([]byte(parts[1]))
}
}
diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go
index 3e501c5..6c34e49 100644
--- a/vendor/github.com/containerd/containerd/metadata/snapshot.go
+++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go
@@ -359,7 +359,8 @@
return update(ctx, s.db, func(tx *bolt.Tx) error {
bkt := getSnapshotterBucket(tx, ns, s.name)
if bkt == nil {
- return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+ return errors.Wrapf(errdefs.ErrNotFound,
+ "can not find snapshotter %q", s.name)
}
bbkt, err := bkt.CreateBucket([]byte(name))
@@ -722,7 +723,7 @@
if !errdefs.IsFailedPrecondition(err) {
return err
}
- logger.WithError(err).WithField("key", node.info.Name).Warnf("snapshot removal failed")
+ logger.WithError(err).WithField("key", node.info.Name).Warnf("failed to remove snapshot")
} else {
logger.WithField("key", node.info.Name).Debug("removed snapshot")
}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go
index 474792d..de2e8bb 100644
--- a/vendor/github.com/containerd/containerd/mount/mount_linux.go
+++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go
@@ -2,7 +2,9 @@
import (
"strings"
+ "time"
+ "github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@@ -42,8 +44,27 @@
}
// Unmount the provided mount path with the flags
-func Unmount(mount string, flags int) error {
- return unix.Unmount(mount, flags)
+func Unmount(target string, flags int) error {
+ if err := unmount(target, flags); err != nil && err != unix.EINVAL {
+ return err
+ }
+ return nil
+}
+
+func unmount(target string, flags int) error {
+ for i := 0; i < 50; i++ {
+ if err := unix.Unmount(target, flags); err != nil {
+ switch err {
+ case unix.EBUSY:
+ time.Sleep(50 * time.Millisecond)
+ continue
+ default:
+ return err
+ }
+ }
+ return nil
+ }
+ return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target)
}
// UnmountAll repeatedly unmounts the given mount point until there
@@ -51,7 +72,7 @@
// useful for undoing a stack of mounts on the same mount point.
func UnmountAll(mount string, flags int) error {
for {
- if err := Unmount(mount, flags); err != nil {
+ if err := unmount(mount, flags); err != nil {
// EINVAL is returned if the target is not a
// mount point, indicating that we are
// done. It can also indicate a few other
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
index b17ca32..865aff2 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go
@@ -12,12 +12,11 @@
"strconv"
"strings"
- "golang.org/x/sys/unix"
-
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/fs"
"github.com/containerd/containerd/images"
+ "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/user"
@@ -101,7 +100,7 @@
parts := strings.Split(config.User, ":")
switch len(parts) {
case 1:
- v, err := strconv.ParseUint(parts[0], 0, 10)
+ v, err := strconv.Atoi(parts[0])
if err != nil {
// if we cannot parse as a uint they try to see if it is a username
if err := WithUsername(config.User)(ctx, client, c, s); err != nil {
@@ -113,13 +112,13 @@
return err
}
case 2:
- v, err := strconv.ParseUint(parts[0], 0, 10)
+ v, err := strconv.Atoi(parts[0])
if err != nil {
- return err
+ return errors.Wrapf(err, "parse uid %s", parts[0])
}
uid := uint32(v)
- if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
- return err
+ if v, err = strconv.Atoi(parts[1]); err != nil {
+ return errors.Wrapf(err, "parse gid %s", parts[1])
}
gid := uint32(v)
s.Process.User.UID, s.Process.User.GID = uid, gid
@@ -260,7 +259,7 @@
// or uid is not found in /etc/passwd, it sets gid to be the same with
// uid, and not returns error.
func WithUserID(uid uint32) SpecOpts {
- return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+ return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
if c.Snapshotter == "" {
return errors.Errorf("no snapshotter set for container")
}
@@ -276,13 +275,19 @@
if err != nil {
return err
}
- defer os.RemoveAll(root)
+ defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
- defer unix.Unmount(root, 0)
+ defer func() {
+ if uerr := mount.Unmount(root, 0); uerr != nil {
+ if err == nil {
+ err = uerr
+ }
+ }
+ }()
ppath, err := fs.RootPath(root, "/etc/passwd")
if err != nil {
return err
@@ -317,7 +322,7 @@
// does not exist, or the username is not found in /etc/passwd,
// it returns error.
func WithUsername(username string) SpecOpts {
- return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+ return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) (err error) {
if c.Snapshotter == "" {
return errors.Errorf("no snapshotter set for container")
}
@@ -333,13 +338,19 @@
if err != nil {
return err
}
- defer os.RemoveAll(root)
+ defer os.Remove(root)
for _, m := range mounts {
if err := m.Mount(root); err != nil {
return err
}
}
- defer unix.Unmount(root, 0)
+ defer func() {
+ if uerr := mount.Unmount(root, 0); uerr != nil {
+ if err == nil {
+ err = uerr
+ }
+ }
+ }()
ppath, err := fs.RootPath(root, "/etc/passwd")
if err != nil {
return err
diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
index 3605f8e..796ad55 100644
--- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
+++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go
@@ -60,3 +60,11 @@
return nil
}
}
+
+// WithUsername sets the username on the process
+func WithUsername(username string) SpecOpts {
+ return func(ctx context.Context, client Client, c *containers.Container, s *specs.Spec) error {
+ s.Process.User.Username = username
+ return nil
+ }
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go
index e583391..ad4cd9f 100644
--- a/vendor/github.com/containerd/containerd/remotes/handlers.go
+++ b/vendor/github.com/containerd/containerd/remotes/handlers.go
@@ -114,6 +114,7 @@
func commitOpts(desc ocispec.Descriptor, r io.Reader) (io.Reader, []content.Opt) {
var childrenF func(r io.Reader) ([]ocispec.Descriptor, error)
+ // TODO(AkihiroSuda): use images/oci.GetChildrenDescriptors?
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {
diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go
index e6d2be6..4051295 100644
--- a/vendor/github.com/containerd/containerd/rootfs/apply.go
+++ b/vendor/github.com/containerd/containerd/rootfs/apply.go
@@ -55,10 +55,10 @@
_, err := sn.Stat(ctx, chainID.String())
if err == nil {
- log.G(ctx).Debugf("Extraction not needed, layer snapshot exists")
+ log.G(ctx).Debugf("Extraction not needed, layer snapshot %s exists", chainID)
return false, nil
} else if !errdefs.IsNotFound(err) {
- return false, errors.Wrap(err, "failed to stat snapshot")
+ return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
}
key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
@@ -67,7 +67,7 @@
mounts, err := sn.Prepare(ctx, key, parent.String(), opts...)
if err != nil {
//TODO: If is snapshot exists error, retry
- return false, errors.Wrap(err, "failed to prepare extraction layer")
+ return false, errors.Wrapf(err, "failed to prepare extraction snapshot %q", key)
}
defer func() {
if err != nil {
@@ -89,7 +89,7 @@
if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
- return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
+ return false, errors.Wrapf(err, "failed to commit snapshot %s", key)
}
// Destination already exists, cleanup key and return without error
diff --git a/vendor/github.com/containerd/containerd/runtime/task_list.go b/vendor/github.com/containerd/containerd/runtime/task_list.go
index 7c52265..05f34c3 100644
--- a/vendor/github.com/containerd/containerd/runtime/task_list.go
+++ b/vendor/github.com/containerd/containerd/runtime/task_list.go
@@ -49,6 +49,8 @@
// GetAll tasks under a namespace
func (l *TaskList) GetAll(ctx context.Context) ([]Task, error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
namespace, err := namespaces.NamespaceRequired(ctx)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go
index 2cbcbaf..8d25683 100644
--- a/vendor/github.com/containerd/containerd/task.go
+++ b/vendor/github.com/containerd/containerd/task.go
@@ -277,7 +277,7 @@
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
}
-func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (Process, error) {
+func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creation) (_ Process, err error) {
if id == "" {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
}
@@ -285,6 +285,12 @@
if err != nil {
return nil, err
}
+ defer func() {
+ if err != nil && i != nil {
+ i.Cancel()
+ i.Close()
+ }
+ }()
any, err := typeurl.MarshalAny(spec)
if err != nil {
return nil, err
diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf
index 382aaa6..fea47e4 100644
--- a/vendor/github.com/containerd/containerd/vendor.conf
+++ b/vendor/github.com/containerd/containerd/vendor.conf
@@ -41,4 +41,4 @@
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
github.com/dmcgowan/go-tar go1.10
-github.com/stevvooe/ttrpc 8c92e22ce0c492875ccaac3ab06143a77d8ed0c1
+github.com/stevvooe/ttrpc 76e68349ad9ab4d03d764c713826d31216715e4f
diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go
index 236095c..24494ae 100644
--- a/vendor/github.com/docker/libnetwork/controller.go
+++ b/vendor/github.com/docker/libnetwork/controller.go
@@ -837,11 +837,34 @@
if err = c.updateToStore(network); err != nil {
return nil, err
}
+ defer func() {
+ if err != nil {
+ if e := c.deleteFromStore(network); e != nil {
+ logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e)
+ }
+ }
+ }()
+
if network.configOnly {
return network, nil
}
joinCluster(network)
+ defer func() {
+ if err != nil {
+ network.cancelDriverWatches()
+ if e := network.leaveCluster(); e != nil {
+ logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e)
+ }
+ }
+ }()
+
+ if len(network.loadBalancerIP) != 0 {
+ if err = network.createLoadBalancerSandbox(); err != nil {
+ return nil, err
+ }
+ }
+
if !c.isDistributedControl() {
c.Lock()
arrangeIngressFilterRule()
diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go
index 1ad4706..318c395 100644
--- a/vendor/github.com/docker/libnetwork/network.go
+++ b/vendor/github.com/docker/libnetwork/network.go
@@ -199,39 +199,40 @@
}
type network struct {
- ctrlr *controller
- name string
- networkType string
- id string
- created time.Time
- scope string // network data scope
- labels map[string]string
- ipamType string
- ipamOptions map[string]string
- addrSpace string
- ipamV4Config []*IpamConf
- ipamV6Config []*IpamConf
- ipamV4Info []*IpamInfo
- ipamV6Info []*IpamInfo
- enableIPv6 bool
- postIPv6 bool
- epCnt *endpointCnt
- generic options.Generic
- dbIndex uint64
- dbExists bool
- persist bool
- stopWatchCh chan struct{}
- drvOnce *sync.Once
- resolverOnce sync.Once
- resolver []Resolver
- internal bool
- attachable bool
- inDelete bool
- ingress bool
- driverTables []networkDBTable
- dynamic bool
- configOnly bool
- configFrom string
+ ctrlr *controller
+ name string
+ networkType string
+ id string
+ created time.Time
+ scope string // network data scope
+ labels map[string]string
+ ipamType string
+ ipamOptions map[string]string
+ addrSpace string
+ ipamV4Config []*IpamConf
+ ipamV6Config []*IpamConf
+ ipamV4Info []*IpamInfo
+ ipamV6Info []*IpamInfo
+ enableIPv6 bool
+ postIPv6 bool
+ epCnt *endpointCnt
+ generic options.Generic
+ dbIndex uint64
+ dbExists bool
+ persist bool
+ stopWatchCh chan struct{}
+ drvOnce *sync.Once
+ resolverOnce sync.Once
+ resolver []Resolver
+ internal bool
+ attachable bool
+ inDelete bool
+ ingress bool
+ driverTables []networkDBTable
+ dynamic bool
+ configOnly bool
+ configFrom string
+ loadBalancerIP net.IP
sync.Mutex
}
@@ -473,6 +474,7 @@
dstN.ingress = n.ingress
dstN.configOnly = n.configOnly
dstN.configFrom = n.configFrom
+ dstN.loadBalancerIP = n.loadBalancerIP
// copy labels
if dstN.labels == nil {
@@ -589,6 +591,7 @@
netMap["ingress"] = n.ingress
netMap["configOnly"] = n.configOnly
netMap["configFrom"] = n.configFrom
+ netMap["loadBalancerIP"] = n.loadBalancerIP
return json.Marshal(netMap)
}
@@ -699,6 +702,9 @@
if v, ok := netMap["configFrom"]; ok {
n.configFrom = v.(string)
}
+ if v, ok := netMap["loadBalancerIP"]; ok {
+ n.loadBalancerIP = net.ParseIP(v.(string))
+ }
// Reconcile old networks with the recently added `--ipv6` flag
if !n.enableIPv6 {
n.enableIPv6 = len(n.ipamV6Info) > 0
@@ -799,6 +805,13 @@
}
}
+// NetworkOptionLBEndpoint function returns an option setter for the configuration of the load balancer endpoint for this network
+func NetworkOptionLBEndpoint(ip net.IP) NetworkOption {
+ return func(n *network) {
+ n.loadBalancerIP = ip
+ }
+}
+
// NetworkOptionDriverOpts function returns an option setter for any driver parameter described by a map
func NetworkOptionDriverOpts(opts map[string]string) NetworkOption {
return func(n *network) {
@@ -944,6 +957,18 @@
return &UnknownNetworkError{name: name, id: id}
}
+ if len(n.loadBalancerIP) != 0 {
+ endpoints := n.Endpoints()
+ if force || len(endpoints) == 1 {
+ n.deleteLoadBalancerSandbox()
+ }
+ //Reload the network from the store to update the epcnt.
+ n, err = c.getNetworkFromStore(id)
+ if err != nil {
+ return &UnknownNetworkError{name: name, id: id}
+ }
+ }
+
if !force && n.getEpCnt().EndpointCnt() != 0 {
if n.configOnly {
return types.ForbiddenErrorf("configuration network %q is in use", n.Name())
@@ -1071,12 +1096,19 @@
return nil, types.ForbiddenErrorf("endpoint with name %s already exists in network %s", name, n.Name())
}
- ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}}
- ep.id = stringid.GenerateRandomID()
-
n.ctrlr.networkLocker.Lock(n.id)
defer n.ctrlr.networkLocker.Unlock(n.id)
+ return n.createEndpoint(name, options...)
+
+}
+
+func (n *network) createEndpoint(name string, options ...EndpointOption) (Endpoint, error) {
+ var err error
+
+ ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}}
+ ep.id = stringid.GenerateRandomID()
+
// Initialize ep.network with a possibly stale copy of n. We need this to get network from
// store. But once we get it from store we will have the most uptodate copy possibly.
ep.network = n
@@ -2021,3 +2053,80 @@
return n.(*network), nil
}
+
+func (n *network) createLoadBalancerSandbox() error {
+ sandboxName := n.name + "-sbox"
+ sbOptions := []SandboxOption{}
+ if n.ingress {
+ sbOptions = append(sbOptions, OptionIngress())
+ }
+ sb, err := n.ctrlr.NewSandbox(sandboxName, sbOptions...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if e := n.ctrlr.SandboxDestroy(sandboxName); e != nil {
+ logrus.Warnf("could not delete sandbox %s on failure on failure (%v): %v", sandboxName, err, e)
+ }
+ }
+ }()
+
+ endpointName := n.name + "-endpoint"
+ epOptions := []EndpointOption{
+ CreateOptionIpam(n.loadBalancerIP, nil, nil, nil),
+ CreateOptionLoadBalancer(),
+ }
+ ep, err := n.createEndpoint(endpointName, epOptions...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if e := ep.Delete(true); e != nil {
+ logrus.Warnf("could not delete endpoint %s on failure on failure (%v): %v", endpointName, err, e)
+ }
+ }
+ }()
+
+ if err := ep.Join(sb, nil); err != nil {
+ return err
+ }
+ return sb.EnableService()
+}
+
+func (n *network) deleteLoadBalancerSandbox() {
+ n.Lock()
+ c := n.ctrlr
+ name := n.name
+ n.Unlock()
+
+ endpointName := name + "-endpoint"
+ sandboxName := name + "-sbox"
+
+ endpoint, err := n.EndpointByName(endpointName)
+ if err != nil {
+ logrus.Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err)
+ } else {
+
+ info := endpoint.Info()
+ if info != nil {
+ sb := info.Sandbox()
+ if sb != nil {
+ if err := sb.DisableService(); err != nil {
+ logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
+ //Ignore error and attempt to delete the load balancer endpoint
+ }
+ }
+ }
+
+ if err := endpoint.Delete(true); err != nil {
+ logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoint.Name(), endpoint.ID(), sandboxName, err)
+ //Ignore error and attempt to delete the sandbox.
+ }
+ }
+
+ if err := c.SandboxDestroy(sandboxName); err != nil {
+ logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
+ }
+}
diff --git a/vendor/github.com/docker/libnetwork/store.go b/vendor/github.com/docker/libnetwork/store.go
index da7ac1d..95943f6 100644
--- a/vendor/github.com/docker/libnetwork/store.go
+++ b/vendor/github.com/docker/libnetwork/store.go
@@ -256,7 +256,7 @@
if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil {
return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err)
}
- logrus.Errorf("Error (%v) deleting object %v, retrying....", err, kvObject.Key())
+ logrus.Warnf("Error (%v) deleting object %v, retrying....", err, kvObject.Key())
goto retry
}
return err
diff --git a/vendor/github.com/fluent/fluent-logger-golang/README.md b/vendor/github.com/fluent/fluent-logger-golang/README.md
index a6b9902..cbb8bdc 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/README.md
+++ b/vendor/github.com/fluent/fluent-logger-golang/README.md
@@ -21,7 +21,7 @@
GoDoc: http://godoc.org/github.com/fluent/fluent-logger-golang/fluent
-##Example
+## Example
```go
package main
@@ -44,14 +44,14 @@
"hoge": "hoge",
}
error := logger.Post(tag, data)
- // error := logger.Post(tag, time.Time.Now(), data)
+ // error := logger.PostWithTime(tag, time.Now(), data)
if error != nil {
panic(error)
}
}
```
-`data` must be a value like `map[string]literal`, `map[string]interface{}` or `struct`. Logger refers tags `msg` or `codec` of each fields of structs.
+`data` must be a value like `map[string]literal`, `map[string]interface{}`, `struct` or [`msgp.Marshaler`](http://godoc.org/github.com/tinylib/msgp/msgp#Marshaler). Logger refers tags `msg` or `codec` of each fields of structs.
## Setting config values
@@ -59,6 +59,11 @@
f := fluent.New(fluent.Config{FluentPort: 80, FluentHost: "example.com"})
```
+### WriteTimeout
+
+Sets the timeout for Write call of logger.Post.
+Since the default is zero value, Write will not time out.
+
## Tests
```
go test
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
index 655f623..4693c5c 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go
@@ -4,13 +4,14 @@
"encoding/json"
"errors"
"fmt"
- "io"
"math"
"net"
"reflect"
"strconv"
"sync"
"time"
+
+ "github.com/tinylib/msgp/msgp"
)
const (
@@ -19,10 +20,14 @@
defaultSocketPath = ""
defaultPort = 24224
defaultTimeout = 3 * time.Second
+ defaultWriteTimeout = time.Duration(0) // Write() will not time out
defaultBufferLimit = 8 * 1024 * 1024
defaultRetryWait = 500
defaultMaxRetry = 13
defaultReconnectWaitIncreRate = 1.5
+ // Default sub-second precision value to false since it is only compatible
+ // with fluentd versions v0.14 and above.
+ defaultSubSecondPrecision = false
)
type Config struct {
@@ -31,12 +36,17 @@
FluentNetwork string `json:"fluent_network"`
FluentSocketPath string `json:"fluent_socket_path"`
Timeout time.Duration `json:"timeout"`
+ WriteTimeout time.Duration `json:"write_timeout"`
BufferLimit int `json:"buffer_limit"`
RetryWait int `json:"retry_wait"`
MaxRetry int `json:"max_retry"`
TagPrefix string `json:"tag_prefix"`
AsyncConnect bool `json:"async_connect"`
MarshalAsJSON bool `json:"marshal_as_json"`
+
+ // Sub-second precision timestamps are only possible for those using fluentd
+ // v0.14+ and serializing their messages with msgpack.
+ SubSecondPrecision bool `json:"sub_second_precision"`
}
type Fluent struct {
@@ -46,7 +56,7 @@
pending []byte
muconn sync.Mutex
- conn io.WriteCloser
+ conn net.Conn
reconnecting bool
}
@@ -67,6 +77,9 @@
if config.Timeout == 0 {
config.Timeout = defaultTimeout
}
+ if config.WriteTimeout == 0 {
+ config.WriteTimeout = defaultWriteTimeout
+ }
if config.BufferLimit == 0 {
config.BufferLimit = defaultBufferLimit
}
@@ -90,9 +103,6 @@
//
// Examples:
//
-// // send string
-// f.Post("tag_name", "data")
-//
// // send map[string]
// mapStringData := map[string]string{
// "foo": "bar",
@@ -124,6 +134,10 @@
tag = f.TagPrefix + "." + tag
}
+ if m, ok := message.(msgp.Marshaler); ok {
+ return f.EncodeAndPostData(tag, tm, m)
+ }
+
msg := reflect.ValueOf(message)
msgtype := msg.Type()
@@ -203,6 +217,9 @@
msg := Message{Tag: tag, Time: timeUnix, Record: message}
chunk := &MessageChunk{message: msg}
data, err = json.Marshal(chunk)
+ } else if f.Config.SubSecondPrecision {
+ msg := &MessageExt{Tag: tag, Time: EventTime(tm), Record: message}
+ data, err = msg.MarshalMsg(nil)
} else {
msg := &Message{Tag: tag, Time: timeUnix, Record: message}
data, err = msg.MarshalMsg(nil)
@@ -297,6 +314,12 @@
var err error
if len(f.pending) > 0 {
+ t := f.Config.WriteTimeout
+ if time.Duration(0) < t {
+ f.conn.SetWriteDeadline(time.Now().Add(t))
+ } else {
+ f.conn.SetWriteDeadline(time.Time{})
+ }
_, err = f.conn.Write(f.pending)
if err != nil {
f.conn.Close()
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
index 268d614..158e22d 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go
@@ -2,6 +2,12 @@
package fluent
+import (
+ "time"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
//msgp:tuple Entry
type Entry struct {
Time int64 `msg:"time"`
@@ -22,3 +28,69 @@
Record interface{} `msg:"record"`
Option interface{} `msg:"option"`
}
+
+//msgp:tuple MessageExt
+type MessageExt struct {
+ Tag string `msg:"tag"`
+ Time EventTime `msg:"time,extension"`
+ Record interface{} `msg:"record"`
+ Option interface{} `msg:"option"`
+}
+
+// EventTime is an extension to the serialized time value. It builds in support
+// for sub-second (nanosecond) precision in serialized timestamps.
+//
+// You can find the full specification for the msgpack message payload here:
+// https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1.
+//
+// You can find more information on msgpack extension types here:
+// https://github.com/tinylib/msgp/wiki/Using-Extensions.
+type EventTime time.Time
+
+const (
+ extensionType = 0
+ length = 8
+)
+
+func init() {
+ msgp.RegisterExtension(extensionType, func() msgp.Extension { return new(EventTime) })
+}
+
+func (t *EventTime) ExtensionType() int8 { return extensionType }
+
+func (t *EventTime) Len() int { return length }
+
+func (t *EventTime) MarshalBinaryTo(b []byte) error {
+ // Unwrap to Golang time
+ goTime := time.Time(*t)
+
+ // There's no support for timezones in fluentd's protocol for EventTime.
+ // Convert to UTC.
+ utc := goTime.UTC()
+
+ // Warning! Converting seconds to an int32 is a lossy operation. This code
+ // will hit the "Year 2038" problem.
+ sec := int32(utc.Unix())
+ nsec := utc.Nanosecond()
+
+ // Fill the buffer with 4 bytes for the second component of the timestamp.
+ b[0] = byte(sec >> 24)
+ b[1] = byte(sec >> 16)
+ b[2] = byte(sec >> 8)
+ b[3] = byte(sec)
+
+ // Fill the buffer with 4 bytes for the nanosecond component of the
+ // timestamp.
+ b[4] = byte(nsec >> 24)
+ b[5] = byte(nsec >> 16)
+ b[6] = byte(nsec >> 8)
+ b[7] = byte(nsec)
+
+ return nil
+}
+
+// UnmarshalBinary is not implemented since decoding messages is not supported
+// by this library.
+func (t *EventTime) UnmarshalBinary(b []byte) error {
+ return nil
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
index afb9d6d..5b88a68 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go
@@ -10,13 +10,13 @@
// DecodeMsg implements msgp.Decodable
func (z *Entry) DecodeMsg(dc *msgp.Reader) (err error) {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ var zxvk uint32
+ zxvk, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
+ if zxvk != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zxvk}
return
}
z.Time, err = dc.ReadInt64()
@@ -32,9 +32,10 @@
// EncodeMsg implements msgp.Encodable
func (z Entry) EncodeMsg(en *msgp.Writer) (err error) {
- err = en.WriteArrayHeader(2)
+ // array header, size 2
+ err = en.Append(0x92)
if err != nil {
- return
+ return err
}
err = en.WriteInt64(z.Time)
if err != nil {
@@ -50,7 +51,8 @@
// MarshalMsg implements msgp.Marshaler
func (z Entry) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendArrayHeader(o, 2)
+ // array header, size 2
+ o = append(o, 0x92)
o = msgp.AppendInt64(o, z.Time)
o, err = msgp.AppendIntf(o, z.Record)
if err != nil {
@@ -61,16 +63,14 @@
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Entry) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
- return
- }
+ var zbzg uint32
+ zbzg, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zbzg != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zbzg}
+ return
}
z.Time, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
@@ -84,51 +84,52 @@
return
}
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Entry) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Record)
+ s = 1 + msgp.Int64Size + msgp.GuessSize(z.Record)
return
}
// DecodeMsg implements msgp.Decodable
func (z *Forward) DecodeMsg(dc *msgp.Reader) (err error) {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ var zcmr uint32
+ zcmr, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 3 {
- err = msgp.ArrayError{Wanted: 3, Got: ssz}
+ if zcmr != 3 {
+ err = msgp.ArrayError{Wanted: 3, Got: zcmr}
return
}
z.Tag, err = dc.ReadString()
if err != nil {
return
}
- var xsz uint32
- xsz, err = dc.ReadArrayHeader()
+ var zajw uint32
+ zajw, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if cap(z.Entries) >= int(xsz) {
- z.Entries = z.Entries[:xsz]
+ if cap(z.Entries) >= int(zajw) {
+ z.Entries = (z.Entries)[:zajw]
} else {
- z.Entries = make([]Entry, xsz)
+ z.Entries = make([]Entry, zajw)
}
- for xvk := range z.Entries {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ for zbai := range z.Entries {
+ var zwht uint32
+ zwht, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
+ if zwht != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zwht}
return
}
- z.Entries[xvk].Time, err = dc.ReadInt64()
+ z.Entries[zbai].Time, err = dc.ReadInt64()
if err != nil {
return
}
- z.Entries[xvk].Record, err = dc.ReadIntf()
+ z.Entries[zbai].Record, err = dc.ReadIntf()
if err != nil {
return
}
@@ -142,9 +143,10 @@
// EncodeMsg implements msgp.Encodable
func (z *Forward) EncodeMsg(en *msgp.Writer) (err error) {
- err = en.WriteArrayHeader(3)
+ // array header, size 3
+ err = en.Append(0x93)
if err != nil {
- return
+ return err
}
err = en.WriteString(z.Tag)
if err != nil {
@@ -154,16 +156,17 @@
if err != nil {
return
}
- for xvk := range z.Entries {
- err = en.WriteArrayHeader(2)
+ for zbai := range z.Entries {
+ // array header, size 2
+ err = en.Append(0x92)
+ if err != nil {
+ return err
+ }
+ err = en.WriteInt64(z.Entries[zbai].Time)
if err != nil {
return
}
- err = en.WriteInt64(z.Entries[xvk].Time)
- if err != nil {
- return
- }
- err = en.WriteIntf(z.Entries[xvk].Record)
+ err = en.WriteIntf(z.Entries[zbai].Record)
if err != nil {
return
}
@@ -178,13 +181,15 @@
// MarshalMsg implements msgp.Marshaler
func (z *Forward) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendArrayHeader(o, 3)
+ // array header, size 3
+ o = append(o, 0x93)
o = msgp.AppendString(o, z.Tag)
o = msgp.AppendArrayHeader(o, uint32(len(z.Entries)))
- for xvk := range z.Entries {
- o = msgp.AppendArrayHeader(o, 2)
- o = msgp.AppendInt64(o, z.Entries[xvk].Time)
- o, err = msgp.AppendIntf(o, z.Entries[xvk].Record)
+ for zbai := range z.Entries {
+ // array header, size 2
+ o = append(o, 0x92)
+ o = msgp.AppendInt64(o, z.Entries[zbai].Time)
+ o, err = msgp.AppendIntf(o, z.Entries[zbai].Record)
if err != nil {
return
}
@@ -198,48 +203,44 @@
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Forward) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 3 {
- err = msgp.ArrayError{Wanted: 3, Got: ssz}
- return
- }
+ var zhct uint32
+ zhct, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zhct != 3 {
+ err = msgp.ArrayError{Wanted: 3, Got: zhct}
+ return
}
z.Tag, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
- var xsz uint32
- xsz, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ var zcua uint32
+ zcua, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
- if cap(z.Entries) >= int(xsz) {
- z.Entries = z.Entries[:xsz]
+ if cap(z.Entries) >= int(zcua) {
+ z.Entries = (z.Entries)[:zcua]
} else {
- z.Entries = make([]Entry, xsz)
+ z.Entries = make([]Entry, zcua)
}
- for xvk := range z.Entries {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 2 {
- err = msgp.ArrayError{Wanted: 2, Got: ssz}
- return
- }
- }
- z.Entries[xvk].Time, bts, err = msgp.ReadInt64Bytes(bts)
+ for zbai := range z.Entries {
+ var zxhx uint32
+ zxhx, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
- z.Entries[xvk].Record, bts, err = msgp.ReadIntfBytes(bts)
+ if zxhx != 2 {
+ err = msgp.ArrayError{Wanted: 2, Got: zxhx}
+ return
+ }
+ z.Entries[zbai].Time, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ return
+ }
+ z.Entries[zbai].Record, bts, err = msgp.ReadIntfBytes(bts)
if err != nil {
return
}
@@ -252,10 +253,11 @@
return
}
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Forward) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize
- for xvk := range z.Entries {
- s += msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Entries[xvk].Record)
+ s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize
+ for zbai := range z.Entries {
+ s += 1 + msgp.Int64Size + msgp.GuessSize(z.Entries[zbai].Record)
}
s += msgp.GuessSize(z.Option)
return
@@ -263,13 +265,13 @@
// DecodeMsg implements msgp.Decodable
func (z *Message) DecodeMsg(dc *msgp.Reader) (err error) {
- var ssz uint32
- ssz, err = dc.ReadArrayHeader()
+ var zlqf uint32
+ zlqf, err = dc.ReadArrayHeader()
if err != nil {
return
}
- if ssz != 4 {
- err = msgp.ArrayError{Wanted: 4, Got: ssz}
+ if zlqf != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zlqf}
return
}
z.Tag, err = dc.ReadString()
@@ -293,9 +295,10 @@
// EncodeMsg implements msgp.Encodable
func (z *Message) EncodeMsg(en *msgp.Writer) (err error) {
- err = en.WriteArrayHeader(4)
+ // array header, size 4
+ err = en.Append(0x94)
if err != nil {
- return
+ return err
}
err = en.WriteString(z.Tag)
if err != nil {
@@ -319,7 +322,8 @@
// MarshalMsg implements msgp.Marshaler
func (z *Message) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
- o = msgp.AppendArrayHeader(o, 4)
+ // array header, size 4
+ o = append(o, 0x94)
o = msgp.AppendString(o, z.Tag)
o = msgp.AppendInt64(o, z.Time)
o, err = msgp.AppendIntf(o, z.Record)
@@ -335,16 +339,14 @@
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) {
- {
- var ssz uint32
- ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
- if err != nil {
- return
- }
- if ssz != 4 {
- err = msgp.ArrayError{Wanted: 4, Got: ssz}
- return
- }
+ var zdaf uint32
+ zdaf, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zdaf != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zdaf}
+ return
}
z.Tag, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
@@ -366,7 +368,122 @@
return
}
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Message) Msgsize() (s int) {
- s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
+ s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *MessageExt) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zpks uint32
+ zpks, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if zpks != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zpks}
+ return
+ }
+ z.Tag, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ err = dc.ReadExtension(&z.Time)
+ if err != nil {
+ return
+ }
+ z.Record, err = dc.ReadIntf()
+ if err != nil {
+ return
+ }
+ z.Option, err = dc.ReadIntf()
+ if err != nil {
+ return
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *MessageExt) EncodeMsg(en *msgp.Writer) (err error) {
+ // array header, size 4
+ err = en.Append(0x94)
+ if err != nil {
+ return err
+ }
+ err = en.WriteString(z.Tag)
+ if err != nil {
+ return
+ }
+ err = en.WriteExtension(&z.Time)
+ if err != nil {
+ return
+ }
+ err = en.WriteIntf(z.Record)
+ if err != nil {
+ return
+ }
+ err = en.WriteIntf(z.Option)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *MessageExt) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // array header, size 4
+ o = append(o, 0x94)
+ o = msgp.AppendString(o, z.Tag)
+ o, err = msgp.AppendExtension(o, &z.Time)
+ if err != nil {
+ return
+ }
+ o, err = msgp.AppendIntf(o, z.Record)
+ if err != nil {
+ return
+ }
+ o, err = msgp.AppendIntf(o, z.Option)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *MessageExt) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var zjfb uint32
+ zjfb, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ if zjfb != 4 {
+ err = msgp.ArrayError{Wanted: 4, Got: zjfb}
+ return
+ }
+ z.Tag, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ bts, err = msgp.ReadExtensionBytes(bts, &z.Time)
+ if err != nil {
+ return
+ }
+ z.Record, bts, err = msgp.ReadIntfBytes(bts)
+ if err != nil {
+ return
+ }
+ z.Option, bts, err = msgp.ReadIntfBytes(bts)
+ if err != nil {
+ return
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *MessageExt) Msgsize() (s int) {
+ s = 1 + msgp.StringPrefixSize + len(z.Tag) + msgp.ExtensionPrefixSize + z.Time.Len() + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option)
return
}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go
new file mode 100644
index 0000000..dcf5baa
--- /dev/null
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message.go
@@ -0,0 +1,7 @@
+package fluent
+
+//go:generate msgp
+type TestMessage struct {
+ Foo string `msg:"foo" json:"foo,omitempty"`
+ Hoge string `msg:"hoge" json:"hoge,omitempty"`
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go
new file mode 100644
index 0000000..17a45e2
--- /dev/null
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/test_message_gen.go
@@ -0,0 +1,125 @@
+package fluent
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *TestMessage) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zxvk uint32
+ zxvk, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zxvk > 0 {
+ zxvk--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "foo":
+ z.Foo, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "hoge":
+ z.Hoge, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z TestMessage) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 2
+ // write "foo"
+ err = en.Append(0x82, 0xa3, 0x66, 0x6f, 0x6f)
+ if err != nil {
+ return err
+ }
+ err = en.WriteString(z.Foo)
+ if err != nil {
+ return
+ }
+ // write "hoge"
+ err = en.Append(0xa4, 0x68, 0x6f, 0x67, 0x65)
+ if err != nil {
+ return err
+ }
+ err = en.WriteString(z.Hoge)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z TestMessage) MarshalMsg(b []byte) (o []byte, err error) {
+ o = msgp.Require(b, z.Msgsize())
+ // map header, size 2
+ // string "foo"
+ o = append(o, 0x82, 0xa3, 0x66, 0x6f, 0x6f)
+ o = msgp.AppendString(o, z.Foo)
+ // string "hoge"
+ o = append(o, 0xa4, 0x68, 0x6f, 0x67, 0x65)
+ o = msgp.AppendString(o, z.Hoge)
+ return
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *TestMessage) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zbzg uint32
+ zbzg, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if err != nil {
+ return
+ }
+ for zbzg > 0 {
+ zbzg--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "foo":
+ z.Foo, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ case "hoge":
+ z.Hoge, bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ return
+ }
+ default:
+ bts, err = msgp.Skip(bts)
+ if err != nil {
+ return
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z TestMessage) Msgsize() (s int) {
+ s = 1 + 4 + msgp.StringPrefixSize + len(z.Foo) + 5 + msgp.StringPrefixSize + len(z.Hoge)
+ return
+}
diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
index 8904726..c6ec7e4 100644
--- a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
+++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go
@@ -1,3 +1,3 @@
package fluent
-const Version = "1.2.1"
+const Version = "1.3.0"
diff --git a/vendor/github.com/stevvooe/ttrpc/config.go b/vendor/github.com/stevvooe/ttrpc/config.go
new file mode 100644
index 0000000..23bc603
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/config.go
@@ -0,0 +1,23 @@
+package ttrpc
+
+import "github.com/pkg/errors"
+
+type serverConfig struct {
+ handshaker Handshaker
+}
+
+type ServerOpt func(*serverConfig) error
+
+// WithServerHandshaker can be passed to NewServer to ensure that the
+// handshaker is called before every connection attempt.
+//
+// Only one handshaker is allowed per server.
+func WithServerHandshaker(handshaker Handshaker) ServerOpt {
+ return func(c *serverConfig) error {
+ if c.handshaker != nil {
+ return errors.New("only one handshaker allowed per server")
+ }
+ c.handshaker = handshaker
+ return nil
+ }
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/handshake.go b/vendor/github.com/stevvooe/ttrpc/handshake.go
new file mode 100644
index 0000000..a08ae8e
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/handshake.go
@@ -0,0 +1,34 @@
+package ttrpc
+
+import (
+ "context"
+ "net"
+)
+
+// Handshaker defines the interface for connection handshakes performed on the
+// server or client when first connecting.
+type Handshaker interface {
+ // Handshake should confirm or decorate a connection that may be incoming
+ // to a server or outgoing from a client.
+ //
+ // If this returns without an error, the caller should use the connection
+ // in place of the original connection.
+ //
+ // The second return value can contain credential specific data, such as
+ // unix socket credentials or TLS information.
+ //
+ // While we currently only have implementations on the server-side, this
+ // interface should be sufficient to implement similar handshakes on the
+ // client-side.
+ Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
+}
+
+type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error)
+
+func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+ return fn(ctx, conn)
+}
+
+func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+ return conn, nil, nil
+}
diff --git a/vendor/github.com/stevvooe/ttrpc/server.go b/vendor/github.com/stevvooe/ttrpc/server.go
index ed2d14c..edfca0c 100644
--- a/vendor/github.com/stevvooe/ttrpc/server.go
+++ b/vendor/github.com/stevvooe/ttrpc/server.go
@@ -2,6 +2,7 @@
import (
"context"
+ "io"
"math/rand"
"net"
"sync"
@@ -19,6 +20,7 @@
)
type Server struct {
+ config *serverConfig
services *serviceSet
codec codec
@@ -28,13 +30,21 @@
done chan struct{} // marks point at which we stop serving requests
}
-func NewServer() *Server {
+func NewServer(opts ...ServerOpt) (*Server, error) {
+ config := &serverConfig{}
+ for _, opt := range opts {
+ if err := opt(config); err != nil {
+ return nil, err
+ }
+ }
+
return &Server{
+ config: config,
services: newServiceSet(),
done: make(chan struct{}),
listeners: make(map[net.Listener]struct{}),
connections: make(map[*serverConn]struct{}),
- }
+ }, nil
}
func (s *Server) Register(name string, methods map[string]Method) {
@@ -46,10 +56,15 @@
defer s.closeListener(l)
var (
- ctx = context.Background()
- backoff time.Duration
+ ctx = context.Background()
+ backoff time.Duration
+ handshaker = s.config.handshaker
)
+ if handshaker == nil {
+ handshaker = handshakerFunc(noopHandshake)
+ }
+
for {
conn, err := l.Accept()
if err != nil {
@@ -82,7 +97,15 @@
}
backoff = 0
- sc := s.newConn(conn)
+
+ approved, handshake, err := handshaker.Handshake(ctx, conn)
+ if err != nil {
+ log.L.WithError(err).Errorf("ttrpc: refusing connection after handshake")
+ conn.Close()
+ continue
+ }
+
+ sc := s.newConn(approved, handshake)
go sc.run(ctx)
}
}
@@ -205,11 +228,12 @@
}
}
-func (s *Server) newConn(conn net.Conn) *serverConn {
+func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
c := &serverConn{
- server: s,
- conn: conn,
- shutdown: make(chan struct{}),
+ server: s,
+ conn: conn,
+ handshake: handshake,
+ shutdown: make(chan struct{}),
}
c.setState(connStateIdle)
s.addConnection(c)
@@ -217,9 +241,10 @@
}
type serverConn struct {
- server *Server
- conn net.Conn
- state atomic.Value
+ server *Server
+ conn net.Conn
+ handshake interface{} // data from handshake, not used for now
+ state atomic.Value
shutdownOnce sync.Once
shutdown chan struct{} // forced shutdown, used by close
@@ -406,7 +431,7 @@
// branch. Basically, it means that we are no longer receiving
// requests due to a terminal error.
recvErr = nil // connection is now "closing"
- if err != nil {
+ if err != nil && err != io.EOF {
log.L.WithError(err).Error("error receiving message")
}
case <-shutdown:
diff --git a/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go
new file mode 100644
index 0000000..812d927
--- /dev/null
+++ b/vendor/github.com/stevvooe/ttrpc/unixcreds_linux.go
@@ -0,0 +1,92 @@
+package ttrpc
+
+import (
+ "context"
+ "net"
+ "os"
+ "syscall"
+
+ "github.com/pkg/errors"
+ "golang.org/x/sys/unix"
+)
+
+type UnixCredentialsFunc func(*unix.Ucred) error
+
+func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) {
+ uc, err := requireUnixSocket(conn)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket")
+ }
+
+ rs, err := uc.SyscallConn()
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed")
+ }
+ var (
+ ucred *unix.Ucred
+ ucredErr error
+ )
+ if err := rs.Control(func(fd uintptr) {
+ ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED)
+ }); err != nil {
+ return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed")
+ }
+
+ if ucredErr != nil {
+ return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials")
+ }
+
+ if err := fn(ucred); err != nil {
+ return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed")
+ }
+
+ return uc, ucred, nil
+}
+
+// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID.
+//
+// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an
+// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001.
+// So calling this function with uid=0 allows a connection from effective UID 0 but rejects
+// a connection from effective UID 1001.
+//
+// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)."
+func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc {
+ return func(ucred *unix.Ucred) error {
+ return requireUidGid(ucred, uid, gid)
+ }
+}
+
+func UnixSocketRequireRoot() UnixCredentialsFunc {
+ return UnixSocketRequireUidGid(0, 0)
+}
+
+// UnixSocketRequireSameUser resolves the current effective unix user and returns a
+// UnixCredentialsFunc that will validate incoming unix connections against the
+// current credentials.
+//
+// This is useful when using abstract sockets that are accessible by all users.
+func UnixSocketRequireSameUser() UnixCredentialsFunc {
+ euid, egid := os.Geteuid(), os.Getegid()
+ return UnixSocketRequireUidGid(euid, egid)
+}
+
+func requireRoot(ucred *unix.Ucred) error {
+ return requireUidGid(ucred, 0, 0)
+}
+
+func requireUidGid(ucred *unix.Ucred, uid, gid int) error {
+ if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) {
+ return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials")
+ }
+ return nil
+}
+
+func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) {
+ uc, ok := conn.(*net.UnixConn)
+ if !ok {
+ return nil, errors.New("a unix socket connection is required")
+ }
+
+ return uc, nil
+}
diff --git a/vendor/github.com/tinylib/msgp/README.md b/vendor/github.com/tinylib/msgp/README.md
index a7cc849..1328cca 100644
--- a/vendor/github.com/tinylib/msgp/README.md
+++ b/vendor/github.com/tinylib/msgp/README.md
@@ -1,15 +1,12 @@
MessagePack Code Generator [![Build Status](https://travis-ci.org/tinylib/msgp.svg?branch=master)](https://travis-ci.org/tinylib/msgp)
=======
-[![forthebadge](http://forthebadge.com/badges/uses-badges.svg)](http://forthebadge.com)
-[![forthebadge](http://forthebadge.com/badges/ages-12.svg)](http://forthebadge.com)
-
-This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). It is targeted at the `go generate` [tool](http://tip.golang.org/cmd/go/#hdr-Generate_Go_files_by_processing_source). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
+This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
### Why?
- Use Go as your schema language
-- Speeeeeed (400MB/s on modern hardware)
+- Performance
- [JSON interop](http://godoc.org/github.com/tinylib/msgp/msgp#CopyToJSON)
- [User-defined extensions](http://github.com/tinylib/msgp/wiki/Using-Extensions)
- Type safety
@@ -17,8 +14,6 @@
### Quickstart
-Note: you need at least go 1.3 to compile this package, and at least go 1.4 to use `go generate`.
-
In a source file, include the following directive:
```go
@@ -45,7 +40,7 @@
By default, the code generator will satisfy `msgp.Sizer`, `msgp.Encodable`, `msgp.Decodable`,
`msgp.Marshaler`, and `msgp.Unmarshaler`. Carefully-designed applications can use these methods to do
-marshalling/unmarshalling with zero allocations.
+marshalling/unmarshalling with zero heap allocations.
While `msgp.Marshaler` and `msgp.Unmarshaler` are quite similar to the standard library's
`json.Marshaler` and `json.Unmarshaler`, `msgp.Encodable` and `msgp.Decodable` are useful for
@@ -62,6 +57,7 @@
- Generation of both `[]byte`-oriented and `io.Reader/io.Writer`-oriented methods
- Support for arbitrary type system extensions
- [Preprocessor directives](http://github.com/tinylib/msgp/wiki/Preprocessor-Directives)
+ - File-based dependency model means fast codegen regardless of source tree size.
Consider the following:
```go
@@ -84,21 +80,23 @@
### Status
-Alpha. I _will_ break stuff. There is an open milestone for Beta stability (targeted for January.) Only the `/msgp` sub-directory will have a stability guarantee.
+Mostly stable, in that no breaking changes have been made to the `/msgp` library in more than a year. Newer versions
+of the code may generate different code than older versions for performance reasons. I (@philhofer) am aware of a
+number of stability-critical commercial applications that use this code with good results. But, caveat emptor.
You can read more about how `msgp` maps MessagePack types onto Go types [in the wiki](http://github.com/tinylib/msgp/wiki).
Here some of the known limitations/restrictions:
- - Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
- - Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
- - Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
- - _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
+- Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
+- Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
+- Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
+- _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
If the output compiles, then there's a pretty good chance things are fine. (Plus, we generate tests for you.) *Please, please, please* file an issue if you think the generator is writing broken code.
### Performance
-If you like benchmarks, see [here.](https://github.com/alecthomas/go_serialization_benchmarks)
+If you like benchmarks, see [here](http://bravenewgeek.com/so-you-wanna-go-fast/) and [here](https://github.com/alecthomas/go_serialization_benchmarks).
-As one might expect, the generated methods that deal with `[]byte` are faster, but the `io.Reader/Writer` methods are generally more memory-efficient for large (> 2KB) objects.
+As one might expect, the generated methods that deal with `[]byte` are faster for small objects, but the `io.Reader/Writer` methods are generally more memory-efficient (and, at some point, faster) for large (> 2KB) objects.
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
new file mode 100644
index 0000000..6c6bb37
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
@@ -0,0 +1,24 @@
+// +build linux,!appengine
+
+package msgp
+
+import (
+ "os"
+ "syscall"
+)
+
+func adviseRead(mem []byte) {
+ syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
+}
+
+func adviseWrite(mem []byte) {
+ syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
+}
+
+func fallocate(f *os.File, sz int64) error {
+ err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
+ if err == syscall.ENOTSUP {
+ return f.Truncate(sz)
+ }
+ return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
new file mode 100644
index 0000000..da65ea5
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
@@ -0,0 +1,17 @@
+// +build !linux appengine
+
+package msgp
+
+import (
+ "os"
+)
+
+// TODO: darwin, BSD support
+
+func adviseRead(mem []byte) {}
+
+func adviseWrite(mem []byte) {}
+
+func fallocate(f *os.File, sz int64) error {
+ return f.Truncate(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/appengine.go b/vendor/github.com/tinylib/msgp/msgp/appengine.go
new file mode 100644
index 0000000..bff9e76
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package msgp
+
+// let's just assume appengine
+// uses 64-bit hardware...
+const smallint = false
+
+func UnsafeString(b []byte) string {
+ return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go
index 35583ba..a0434c7 100644
--- a/vendor/github.com/tinylib/msgp/msgp/circular.go
+++ b/vendor/github.com/tinylib/msgp/msgp/circular.go
@@ -1,20 +1,21 @@
package msgp
-import (
- "testing"
-)
+type timer interface {
+ StartTimer()
+ StopTimer()
+}
// EndlessReader is an io.Reader
// that loops over the same data
// endlessly. It is used for benchmarking.
type EndlessReader struct {
- tb *testing.B
+ tb timer
data []byte
offset int
}
// NewEndlessReader returns a new endless reader
-func NewEndlessReader(b []byte, tb *testing.B) *EndlessReader {
+func NewEndlessReader(b []byte, tb timer) *EndlessReader {
return &EndlessReader{tb: tb, data: b, offset: 0}
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go
index 32a0ada..588b18f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/extension.go
+++ b/vendor/github.com/tinylib/msgp/msgp/extension.go
@@ -226,7 +226,7 @@
// peek at the extension type, assuming the next
// kind to be read is Extension
func (m *Reader) peekExtensionType() (int8, error) {
- p, err := m.r.Peek(2)
+ p, err := m.R.Peek(2)
if err != nil {
return 0, err
}
@@ -238,7 +238,7 @@
return int8(p[1]), nil
}
size := spec.size
- p, err = m.r.Peek(int(size))
+ p, err = m.R.Peek(int(size))
if err != nil {
return 0, err
}
@@ -273,7 +273,7 @@
// e.Type() is not the same as the wire type.
func (m *Reader) ReadExtension(e Extension) (err error) {
var p []byte
- p, err = m.r.Peek(2)
+ p, err = m.R.Peek(2)
if err != nil {
return
}
@@ -286,13 +286,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(3)
+ p, err = m.R.Peek(3)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(3)
+ _, err = m.R.Skip(3)
}
return
@@ -301,13 +301,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(4)
+ p, err = m.R.Peek(4)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(4)
+ _, err = m.R.Skip(4)
}
return
@@ -316,13 +316,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(6)
+ p, err = m.R.Peek(6)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(6)
+ _, err = m.R.Skip(6)
}
return
@@ -331,13 +331,13 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(10)
+ p, err = m.R.Peek(10)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(10)
+ _, err = m.R.Skip(10)
}
return
@@ -346,18 +346,18 @@
err = errExt(int8(p[1]), e.ExtensionType())
return
}
- p, err = m.r.Peek(18)
+ p, err = m.R.Peek(18)
if err != nil {
return
}
err = e.UnmarshalBinary(p[2:])
if err == nil {
- _, err = m.r.Skip(18)
+ _, err = m.R.Skip(18)
}
return
case mext8:
- p, err = m.r.Peek(3)
+ p, err = m.R.Peek(3)
if err != nil {
return
}
@@ -369,7 +369,7 @@
off = 3
case mext16:
- p, err = m.r.Peek(4)
+ p, err = m.R.Peek(4)
if err != nil {
return
}
@@ -381,7 +381,7 @@
off = 4
case mext32:
- p, err = m.r.Peek(6)
+ p, err = m.R.Peek(6)
if err != nil {
return
}
@@ -397,13 +397,13 @@
return
}
- p, err = m.r.Peek(read + off)
+ p, err = m.R.Peek(read + off)
if err != nil {
return
}
err = e.UnmarshalBinary(p[off:])
if err == nil {
- _, err = m.r.Skip(read + off)
+ _, err = m.R.Skip(read + off)
}
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go
new file mode 100644
index 0000000..8e7370e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file.go
@@ -0,0 +1,92 @@
+// +build linux darwin dragonfly freebsd netbsd openbsd
+// +build !appengine
+
+package msgp
+
+import (
+ "os"
+ "syscall"
+)
+
+// ReadFile reads a file into 'dst' using
+// a read-only memory mapping. Consequently,
+// the file must be mmap-able, and the
+// Unmarshaler should never write to
+// the source memory. (Methods generated
+// by the msgp tool obey that constraint, but
+// user-defined implementations may not.)
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+func ReadFile(dst Unmarshaler, file *os.File) error {
+ stat, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+ adviseRead(data)
+ _, err = dst.UnmarshalMsg(data)
+ uerr := syscall.Munmap(data)
+ if err == nil {
+ err = uerr
+ }
+ return err
+}
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+// WriteFile writes a file from 'src' using
+// memory mapping. It overwrites the entire
+// contents of the previous file.
+// The mapping size is calculated
+// using the `Msgsize()` method
+// of 'src', so it must produce a result
+// equal to or greater than the actual encoded
+// size of the object. Otherwise,
+// a fault (SIGBUS) will occur.
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+// NOTE: The performance of this call
+// is highly OS- and filesystem-dependent.
+// Users should take care to test that this
+// performs as expected in a production environment.
+// (Linux users should run a kernel and filesystem
+// that support fallocate(2) for the best results.)
+func WriteFile(src MarshalSizer, file *os.File) error {
+ sz := src.Msgsize()
+ err := fallocate(file, int64(sz))
+ if err != nil {
+ return err
+ }
+ data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+ adviseWrite(data)
+ chunk := data[:0]
+ chunk, err = src.MarshalMsg(chunk)
+ if err != nil {
+ return err
+ }
+ uerr := syscall.Munmap(data)
+ if uerr != nil {
+ return uerr
+ }
+ return file.Truncate(int64(len(chunk)))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go
new file mode 100644
index 0000000..6e654db
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go
@@ -0,0 +1,47 @@
+// +build windows appengine
+
+package msgp
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+func ReadFile(dst Unmarshaler, file *os.File) error {
+ if u, ok := dst.(Decodable); ok {
+ return u.DecodeMsg(NewReader(file))
+ }
+
+ data, err := ioutil.ReadAll(file)
+ if err != nil {
+ return err
+ }
+ _, err = dst.UnmarshalMsg(data)
+ return err
+}
+
+func WriteFile(src MarshalSizer, file *os.File) error {
+ if e, ok := src.(Encodable); ok {
+ w := NewWriter(file)
+ err := e.EncodeMsg(w)
+ if err == nil {
+ err = w.Flush()
+ }
+ return err
+ }
+
+ raw, err := src.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ _, err = file.Write(raw)
+ return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go
index 5c799ff..4325860 100644
--- a/vendor/github.com/tinylib/msgp/msgp/json.go
+++ b/vendor/github.com/tinylib/msgp/msgp/json.go
@@ -66,7 +66,7 @@
if jsw, ok := w.(jsWriter); ok {
j = jsw
} else {
- bf = bufio.NewWriterSize(w, 512)
+ bf = bufio.NewWriter(w)
j = bf
}
var nn int
@@ -333,7 +333,7 @@
func rwString(dst jsWriter, src *Reader) (n int, err error) {
var p []byte
- p, err = src.r.Peek(1)
+ p, err = src.R.Peek(1)
if err != nil {
return
}
@@ -342,25 +342,25 @@
if isfixstr(lead) {
read = int(rfixstr(lead))
- src.r.Skip(1)
+ src.R.Skip(1)
goto write
}
switch lead {
case mstr8:
- p, err = src.r.Next(2)
+ p, err = src.R.Next(2)
if err != nil {
return
}
read = int(uint8(p[1]))
case mstr16:
- p, err = src.r.Next(3)
+ p, err = src.R.Next(3)
if err != nil {
return
}
read = int(big.Uint16(p[1:]))
case mstr32:
- p, err = src.r.Next(5)
+ p, err = src.R.Next(5)
if err != nil {
return
}
@@ -370,7 +370,7 @@
return
}
write:
- p, err = src.r.Next(read)
+ p, err = src.R.Next(read)
if err != nil {
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go
index 69afc8a..ad07ef9 100644
--- a/vendor/github.com/tinylib/msgp/msgp/number.go
+++ b/vendor/github.com/tinylib/msgp/msgp/number.go
@@ -1,11 +1,105 @@
package msgp
import (
+ "math"
"strconv"
)
// The portable parts of the Number implementation
+// Number can be
+// an int64, uint64, float32,
+// or float64 internally.
+// It can decode itself
+// from any of the native
+// messagepack number types.
+// The zero-value of Number
+// is Int(0). Using the equality
+// operator with Number compares
+// both the type and the value
+// of the number.
+type Number struct {
+ // internally, this
+ // is just a tagged union.
+ // the raw bits of the number
+ // are stored the same way regardless.
+ bits uint64
+ typ Type
+}
+
+// AsInt sets the number to an int64.
+func (n *Number) AsInt(i int64) {
+
+ // we always store int(0)
+ // as {0, InvalidType} in
+ // order to preserve
+ // the behavior of the == operator
+ if i == 0 {
+ n.typ = InvalidType
+ n.bits = 0
+ return
+ }
+
+ n.typ = IntType
+ n.bits = uint64(i)
+}
+
+// AsUint sets the number to a uint64.
+func (n *Number) AsUint(u uint64) {
+ n.typ = UintType
+ n.bits = u
+}
+
+// AsFloat32 sets the value of the number
+// to a float32.
+func (n *Number) AsFloat32(f float32) {
+ n.typ = Float32Type
+ n.bits = uint64(math.Float32bits(f))
+}
+
+// AsFloat64 sets the value of the
+// number to a float64.
+func (n *Number) AsFloat64(f float64) {
+ n.typ = Float64Type
+ n.bits = math.Float64bits(f)
+}
+
+// Int casts the number as an int64, and
+// returns whether or not that was the
+// underlying type.
+func (n *Number) Int() (int64, bool) {
+ return int64(n.bits), n.typ == IntType || n.typ == InvalidType
+}
+
+// Uint casts the number as a uint64, and returns
+// whether or not that was the underlying type.
+func (n *Number) Uint() (uint64, bool) {
+ return n.bits, n.typ == UintType
+}
+
+// Float casts the number to a float64, and
+// returns whether or not that was the underlying
+// type (either a float64 or a float32).
+func (n *Number) Float() (float64, bool) {
+ switch n.typ {
+ case Float32Type:
+ return float64(math.Float32frombits(uint32(n.bits))), true
+ case Float64Type:
+ return math.Float64frombits(n.bits), true
+ default:
+ return 0.0, false
+ }
+}
+
+// Type will return one of:
+// Float64Type, Float32Type, UintType, or IntType.
+func (n *Number) Type() Type {
+ if n.typ == InvalidType {
+ return IntType
+ }
+ return n.typ
+}
+
// DecodeMsg implements msgp.Decodable
func (n *Number) DecodeMsg(r *Reader) error {
typ, err := r.NextType()
@@ -83,6 +177,38 @@
}
}
+// MarshalMsg implements msgp.Marshaler
+func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
+ switch n.typ {
+ case IntType:
+ return AppendInt64(b, int64(n.bits)), nil
+ case UintType:
+ return AppendUint64(b, uint64(n.bits)), nil
+ case Float64Type:
+ return AppendFloat64(b, math.Float64frombits(n.bits)), nil
+ case Float32Type:
+ return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
+ default:
+ return AppendInt64(b, 0), nil
+ }
+}
+
+// EncodeMsg implements msgp.Encodable
+func (n *Number) EncodeMsg(w *Writer) error {
+ switch n.typ {
+ case IntType:
+ return w.WriteInt64(int64(n.bits))
+ case UintType:
+ return w.WriteUint64(n.bits)
+ case Float64Type:
+ return w.WriteFloat64(math.Float64frombits(n.bits))
+ case Float32Type:
+ return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
+ default:
+ return w.WriteInt64(0)
+ }
+}
+
// Msgsize implements msgp.Sizer
func (n *Number) Msgsize() int {
switch n.typ {
@@ -121,6 +247,7 @@
}
}
+// String implements fmt.Stringer
func (n *Number) String() string {
switch n.typ {
case InvalidType:
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_appengine.go b/vendor/github.com/tinylib/msgp/msgp/number_appengine.go
deleted file mode 100644
index c94140d..0000000
--- a/vendor/github.com/tinylib/msgp/msgp/number_appengine.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// +build appengine
-
-package msgp
-
-// let's just assume appengine
-// uses 64-bit hardware...
-const smallint = false
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Number struct {
- ibits uint64 // zero or bits
- fbits float64 // zero or bits
- typ Type // zero or type
-}
-
-func (n *Number) AsFloat64(f float64) {
- n.typ = Float64Type
- n.fbits = f
- n.ibits = 0
-}
-
-func (n *Number) AsFloat32(f float32) {
- n.typ = Float32Type
- n.fbits = float64(f)
- n.ibits = 0
-}
-
-func (n *Number) AsInt(i int64) {
- n.fbits = 0
- if i == 0 {
- n.typ = InvalidType
- n.ibits = 0
- return
- }
- n.ibits = uint64(i)
- n.typ = IntType
-}
-
-func (n *Number) AsUint(u uint64) {
- n.ibits = u
- n.fbits = 0
- n.typ = UintType
-}
-
-func (n *Number) Float() (float64, bool) {
- return n.fbits, n.typ == Float64Type || n.typ == Float32Type
-}
-
-func (n *Number) Int() (int64, bool) {
- return int64(n.ibits), n.typ == IntType
-}
-
-func (n *Number) Uint() (uint64, bool) {
- return n.ibits, n.typ == UintType
-}
-
-func (n *Number) Type() Type {
- if n.typ == InvalidType {
- return IntType
- }
- return n.typ
-}
-
-func (n *Number) MarshalMsg(o []byte) ([]byte, error) {
- switch n.typ {
- case InvalidType:
- return AppendInt64(o, 0), nil
- case IntType:
- return AppendInt64(o, int64(n.ibits)), nil
- case UintType:
- return AppendUint64(o, n.ibits), nil
- case Float32Type:
- return AppendFloat32(o, float32(n.fbits)), nil
- case Float64Type:
- return AppendFloat64(o, n.fbits), nil
- }
- panic("unreachable code!")
-}
-
-func (n *Number) EncodeMsg(w *Writer) error {
- switch n.typ {
- case InvalidType:
- return w.WriteInt64(0)
- case IntType:
- return w.WriteInt64(int64(n.ibits))
- case UintType:
- return w.WriteUint64(n.ibits)
- case Float32Type:
- return w.WriteFloat32(float32(n.fbits))
- case Float64Type:
- return w.WriteFloat64(n.fbits)
- }
- panic("unreachable code!")
-}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go b/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go
deleted file mode 100644
index 8ea0462..0000000
--- a/vendor/github.com/tinylib/msgp/msgp/number_unsafe.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// +build !appengine
-
-package msgp
-
-import (
- "reflect"
- "unsafe"
-)
-
-const (
- // spec says int and uint are always
- // the same size, but that int/uint
- // size may not be machine word size
- smallint = unsafe.Sizeof(int(0)) == 4
-)
-
-// UnsafeString returns the byte slice as a volatile string
-// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
-// THIS IS EVIL CODE.
-// YOU HAVE BEEN WARNED.
-func UnsafeString(b []byte) string {
- return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)}))
-}
-
-// UnsafeBytes returns the string as a byte slice
-// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
-// THIS IS EVIL CODE.
-// YOU HAVE BEEN WARNED.
-func UnsafeBytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Len: len(s),
- Cap: len(s),
- Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
- }))
-}
-
-// Number can be
-// an int64, uint64, float32,
-// or float64 internally.
-// It can decode itself
-// from any of the native
-// messagepack number types.
-// The zero-value of Number
-// is Int(0). Using the equality
-// operator with Number compares
-// both the type and the value
-// of the number.
-type Number struct {
- // internally, this
- // is just a tagged union.
- // the raw bits of the number
- // are stored the same way regardless.
- bits uint64
- typ Type
-}
-
-// AsFloat64 sets the number to
-// a float64.
-func (n *Number) AsFloat64(f float64) {
- n.typ = Float64Type
- n.bits = *(*uint64)(unsafe.Pointer(&f))
-}
-
-// AsInt sets the number to an int64.
-func (n *Number) AsInt(i int64) {
-
- // we always store int(0)
- // as {0, InvalidType} in
- // order to preserve
- // the behavior of the == operator
- if i == 0 {
- n.typ = InvalidType
- n.bits = 0
- return
- }
-
- n.typ = IntType
- n.bits = uint64(i)
-}
-
-// AsUint sets the number to a uint64.
-func (n *Number) AsUint(u uint64) {
- n.typ = UintType
- n.bits = u
-}
-
-// AsFloat32 sets the number to a float32.
-func (n *Number) AsFloat32(f float32) {
- n.typ = Float32Type
- g := float64(f)
- n.bits = *(*uint64)(unsafe.Pointer(&g))
-}
-
-// Type will return one of:
-// Float64Type, Float32Type, UintType, or IntType.
-func (n *Number) Type() Type {
- if n.typ == InvalidType {
- return IntType
- }
- return n.typ
-}
-
-// Float casts the number of the float,
-// and returns whether or not that was
-// the underlying type. (This is legal
-// for both float32 and float64 types.)
-func (n *Number) Float() (float64, bool) {
- return *(*float64)(unsafe.Pointer(&n.bits)), n.typ == Float64Type || n.typ == Float32Type
-}
-
-// Int casts the number as an int64, and
-// returns whether or not that was the
-// underlying type.
-func (n *Number) Int() (int64, bool) {
- return int64(n.bits), n.typ == IntType || n.typ == InvalidType
-}
-
-// Uint casts the number as a uint64, and returns
-// whether or not that was the underlying type.
-func (n *Number) Uint() (uint64, bool) {
- return n.bits, n.typ == UintType
-}
-
-// EncodeMsg implements msgp.Encodable
-func (n *Number) EncodeMsg(w *Writer) error {
- switch n.typ {
- case InvalidType:
- return w.WriteInt(0)
- case IntType:
- return w.WriteInt64(int64(n.bits))
- case UintType:
- return w.WriteUint64(n.bits)
- case Float64Type:
- return w.WriteFloat64(*(*float64)(unsafe.Pointer(&n.bits)))
- case Float32Type:
- return w.WriteFloat32(float32(*(*float64)(unsafe.Pointer(&n.bits))))
- default:
- // this should never ever happen
- panic("(*Number).typ is invalid")
- }
-}
-
-// MarshalMsg implements msgp.Marshaler
-func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
- switch n.typ {
- case InvalidType:
- return AppendInt(b, 0), nil
- case IntType:
- return AppendInt64(b, int64(n.bits)), nil
- case UintType:
- return AppendUint64(b, n.bits), nil
- case Float64Type:
- return AppendFloat64(b, *(*float64)(unsafe.Pointer(&n.bits))), nil
- case Float32Type:
- return AppendFloat32(b, float32(*(*float64)(unsafe.Pointer(&n.bits)))), nil
- default:
- panic("(*Number).typ is invalid")
- }
-}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go
index c34482e..20cd1ef 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read.go
@@ -1,11 +1,12 @@
package msgp
import (
- "github.com/philhofer/fwd"
"io"
"math"
"sync"
"time"
+
+ "github.com/philhofer/fwd"
)
// where we keep old *Readers
@@ -111,10 +112,10 @@
// reader will be buffered.
func NewReader(r io.Reader) *Reader {
p := readerPool.Get().(*Reader)
- if p.r == nil {
- p.r = fwd.NewReader(r)
+ if p.R == nil {
+ p.R = fwd.NewReader(r)
} else {
- p.r.Reset(r)
+ p.R.Reset(r)
}
return p
}
@@ -122,39 +123,96 @@
// NewReaderSize returns a *Reader with a buffer of the given size.
// (This is vastly preferable to passing the decoder a reader that is already buffered.)
func NewReaderSize(r io.Reader, sz int) *Reader {
- return &Reader{r: fwd.NewReaderSize(r, sz)}
+ return &Reader{R: fwd.NewReaderSize(r, sz)}
}
// Reader wraps an io.Reader and provides
// methods to read MessagePack-encoded values
// from it. Readers are buffered.
type Reader struct {
- r *fwd.Reader
+ // R is the buffered reader
+ // that the Reader uses
+ // to decode MessagePack.
+ // The Reader itself
+ // is stateless; all the
+ // buffering is done
+ // within R.
+ R *fwd.Reader
scratch []byte
}
// Read implements `io.Reader`
func (m *Reader) Read(p []byte) (int, error) {
- return m.r.Read(p)
+ return m.R.Read(p)
+}
+
+// CopyNext reads the next object from m without decoding it and writes it to w.
+// It avoids unnecessary copies internally.
+func (m *Reader) CopyNext(w io.Writer) (int64, error) {
+ sz, o, err := getNextSize(m.R)
+ if err != nil {
+ return 0, err
+ }
+
+ var n int64
+ // Opportunistic optimization: if we can fit the whole thing in the m.R
+ // buffer, then just get a pointer to that, and pass it to w.Write,
+ // avoiding an allocation.
+ if int(sz) <= m.R.BufferSize() {
+ var nn int
+ var buf []byte
+ buf, err = m.R.Next(int(sz))
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = ErrShortBytes
+ }
+ return 0, err
+ }
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ } else {
+ // Fall back to io.CopyN.
+ // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer)
+ n, err = io.CopyN(w, m.R, int64(sz))
+ if err == io.ErrUnexpectedEOF {
+ err = ErrShortBytes
+ }
+ }
+ if err != nil {
+ return n, err
+ } else if n < int64(sz) {
+ return n, io.ErrShortWrite
+ }
+
+ // for maps and slices, read elements
+ for x := uintptr(0); x < o; x++ {
+ var n2 int64
+ n2, err = m.CopyNext(w)
+ if err != nil {
+ return n, err
+ }
+ n += n2
+ }
+ return n, nil
}
// ReadFull implements `io.ReadFull`
func (m *Reader) ReadFull(p []byte) (int, error) {
- return m.r.ReadFull(p)
+ return m.R.ReadFull(p)
}
// Reset resets the underlying reader.
-func (m *Reader) Reset(r io.Reader) { m.r.Reset(r) }
+func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) }
// Buffered returns the number of bytes currently in the read buffer.
-func (m *Reader) Buffered() int { return m.r.Buffered() }
+func (m *Reader) Buffered() int { return m.R.Buffered() }
// BufferSize returns the capacity of the read buffer.
-func (m *Reader) BufferSize() int { return m.r.BufferSize() }
+func (m *Reader) BufferSize() int { return m.R.BufferSize() }
// NextType returns the next object type to be decoded.
func (m *Reader) NextType() (Type, error) {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
if err != nil {
return InvalidType, err
}
@@ -182,12 +240,14 @@
// IsNil returns whether or not
// the next byte is a null messagepack byte
func (m *Reader) IsNil() bool {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
return err == nil && p[0] == mnil
}
+// getNextSize returns the size of the next object on the wire.
// returns (obj size, obj elements, error)
// only maps and arrays have non-zero obj elements
+// for maps and arrays, obj size does not include elements
//
// use uintptr b/c it's guaranteed to be large enough
// to hold whatever we can fit in memory.
@@ -243,8 +303,8 @@
// we can use the faster
// method if we have enough
// buffered data
- if m.r.Buffered() >= 5 {
- p, err = m.r.Peek(5)
+ if m.R.Buffered() >= 5 {
+ p, err = m.R.Peek(5)
if err != nil {
return err
}
@@ -253,7 +313,7 @@
return err
}
} else {
- v, o, err = getNextSize(m.r)
+ v, o, err = getNextSize(m.R)
if err != nil {
return err
}
@@ -261,7 +321,7 @@
// 'v' is always non-zero
// if err == nil
- _, err = m.r.Skip(int(v))
+ _, err = m.R.Skip(int(v))
if err != nil {
return err
}
@@ -284,26 +344,26 @@
func (m *Reader) ReadMapHeader() (sz uint32, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
lead = p[0]
if isfixmap(lead) {
sz = uint32(rfixmap(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case mmap16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
sz = uint32(big.Uint16(p[1:]))
return
case mmap32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -338,7 +398,7 @@
// method; writing into the returned slice may
// corrupt future reads.
func (m *Reader) ReadMapKeyPtr() ([]byte, error) {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
if err != nil {
return nil, err
}
@@ -346,24 +406,24 @@
var read int
if isfixstr(lead) {
read = int(rfixstr(lead))
- m.r.Skip(1)
+ m.R.Skip(1)
goto fill
}
switch lead {
case mstr8, mbin8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return nil, err
}
read = int(p[1])
case mstr16, mbin16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return nil, err
}
read = int(big.Uint16(p[1:]))
case mstr32, mbin32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return nil, err
}
@@ -375,7 +435,7 @@
if read == 0 {
return nil, ErrShortBytes
}
- return m.r.Next(read)
+ return m.R.Next(read)
}
// ReadArrayHeader reads the next object as an
@@ -384,19 +444,19 @@
func (m *Reader) ReadArrayHeader() (sz uint32, err error) {
var lead byte
var p []byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
lead = p[0]
if isfixarray(lead) {
sz = uint32(rfixarray(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case marray16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
@@ -404,7 +464,7 @@
return
case marray32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -419,14 +479,14 @@
// ReadNil reads a 'nil' MessagePack byte from the reader
func (m *Reader) ReadNil() error {
- p, err := m.r.Peek(1)
+ p, err := m.R.Peek(1)
if err != nil {
return err
}
if p[0] != mnil {
return badPrefix(NilType, p[0])
}
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return err
}
@@ -435,7 +495,7 @@
// it will be up-cast to a float64.)
func (m *Reader) ReadFloat64() (f float64, err error) {
var p []byte
- p, err = m.r.Peek(9)
+ p, err = m.R.Peek(9)
if err != nil {
// we'll allow a coversion from float32 to float64,
// since we don't lose any precision
@@ -455,14 +515,14 @@
return
}
f = math.Float64frombits(getMuint64(p))
- _, err = m.r.Skip(9)
+ _, err = m.R.Skip(9)
return
}
// ReadFloat32 reads a float32 from the reader
func (m *Reader) ReadFloat32() (f float32, err error) {
var p []byte
- p, err = m.r.Peek(5)
+ p, err = m.R.Peek(5)
if err != nil {
return
}
@@ -471,14 +531,14 @@
return
}
f = math.Float32frombits(getMuint32(p))
- _, err = m.r.Skip(5)
+ _, err = m.R.Skip(5)
return
}
// ReadBool reads a bool from the reader
func (m *Reader) ReadBool() (b bool, err error) {
var p []byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -490,7 +550,7 @@
err = badPrefix(BoolType, p[0])
return
}
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
@@ -498,7 +558,7 @@
func (m *Reader) ReadInt64() (i int64, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -506,17 +566,17 @@
if isfixint(lead) {
i = int64(rfixint(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
} else if isnfixint(lead) {
i = int64(rnfixint(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case mint8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
@@ -524,7 +584,7 @@
return
case mint16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
@@ -532,7 +592,7 @@
return
case mint32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -540,7 +600,7 @@
return
case mint64:
- p, err = m.r.Next(9)
+ p, err = m.R.Next(9)
if err != nil {
return
}
@@ -607,19 +667,19 @@
func (m *Reader) ReadUint64() (u uint64, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
lead = p[0]
if isfixint(lead) {
u = uint64(rfixint(lead))
- _, err = m.r.Skip(1)
+ _, err = m.R.Skip(1)
return
}
switch lead {
case muint8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
@@ -627,7 +687,7 @@
return
case muint16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
@@ -635,7 +695,7 @@
return
case muint32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -643,7 +703,7 @@
return
case muint64:
- p, err = m.r.Next(9)
+ p, err = m.R.Next(9)
if err != nil {
return
}
@@ -707,6 +767,10 @@
return
}
+// ReadByte is analogous to ReadUint8.
+//
+// NOTE: this is *not* an implementation
+// of io.ByteReader.
func (m *Reader) ReadByte() (b byte, err error) {
var in uint64
in, err = m.ReadUint64()
@@ -724,7 +788,7 @@
func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(2)
+ p, err = m.R.Peek(2)
if err != nil {
return
}
@@ -733,15 +797,15 @@
switch lead {
case mbin8:
read = int64(p[1])
- m.r.Skip(2)
+ m.R.Skip(2)
case mbin16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
read = int64(big.Uint16(p[1:]))
case mbin32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -755,16 +819,55 @@
} else {
b = scratch[0:read]
}
- _, err = m.r.ReadFull(b)
+ _, err = m.R.ReadFull(b)
return
}
+// ReadBytesHeader reads the size header
+// of a MessagePack 'bin' object. The user
+// is responsible for dealing with the next
+// 'sz' bytes from the reader in an application-specific
+// way.
+func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ switch p[0] {
+ case mbin8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ sz = uint32(p[1])
+ return
+ case mbin16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mbin32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint32(p[1:]))
+ return
+ default:
+ err = badPrefix(BinType, p[0])
+ return
+ }
+}
+
// ReadExactBytes reads a MessagePack 'bin'-encoded
// object off of the wire into the provided slice. An
// ArrayError will be returned if the object is not
// exactly the length of the input slice.
func (m *Reader) ReadExactBytes(into []byte) error {
- p, err := m.r.Peek(2)
+ p, err := m.R.Peek(2)
if err != nil {
return err
}
@@ -776,14 +879,14 @@
read = int64(p[1])
skip = 2
case mbin16:
- p, err = m.r.Peek(3)
+ p, err = m.R.Peek(3)
if err != nil {
return err
}
read = int64(big.Uint16(p[1:]))
skip = 3
case mbin32:
- p, err = m.r.Peek(5)
+ p, err = m.R.Peek(5)
if err != nil {
return err
}
@@ -795,8 +898,8 @@
if read != int64(len(into)) {
return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)}
}
- m.r.Skip(skip)
- _, err = m.r.ReadFull(into)
+ m.R.Skip(skip)
+ _, err = m.R.ReadFull(into)
return err
}
@@ -806,7 +909,7 @@
func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
var p []byte
var lead byte
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -815,25 +918,25 @@
if isfixstr(lead) {
read = int64(rfixstr(lead))
- m.r.Skip(1)
+ m.R.Skip(1)
goto fill
}
switch lead {
case mstr8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
read = int64(uint8(p[1]))
case mstr16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
read = int64(big.Uint16(p[1:]))
case mstr32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -848,16 +951,60 @@
} else {
b = scratch[0:read]
}
- _, err = m.r.ReadFull(b)
+ _, err = m.R.ReadFull(b)
return
}
+// ReadStringHeader reads a string header
+// off of the wire. The user is then responsible
+// for dealing with the next 'sz' bytes from
+// the reader in an application-specific manner.
+func (m *Reader) ReadStringHeader() (sz uint32, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead := p[0]
+ if isfixstr(lead) {
+ sz = uint32(rfixstr(lead))
+ m.R.Skip(1)
+ return
+ }
+ switch lead {
+ case mstr8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ sz = uint32(p[1])
+ return
+ case mstr16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mstr32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = big.Uint32(p[1:])
+ return
+ default:
+ err = badPrefix(StrType, lead)
+ return
+ }
+}
+
// ReadString reads a utf-8 string from the reader
func (m *Reader) ReadString() (s string, err error) {
var p []byte
var lead byte
var read int64
- p, err = m.r.Peek(1)
+ p, err = m.R.Peek(1)
if err != nil {
return
}
@@ -865,25 +1012,25 @@
if isfixstr(lead) {
read = int64(rfixstr(lead))
- m.r.Skip(1)
+ m.R.Skip(1)
goto fill
}
switch lead {
case mstr8:
- p, err = m.r.Next(2)
+ p, err = m.R.Next(2)
if err != nil {
return
}
read = int64(uint8(p[1]))
case mstr16:
- p, err = m.r.Next(3)
+ p, err = m.R.Next(3)
if err != nil {
return
}
read = int64(big.Uint16(p[1:]))
case mstr32:
- p, err = m.r.Next(5)
+ p, err = m.R.Next(5)
if err != nil {
return
}
@@ -915,7 +1062,7 @@
// thus escape analysis *must* conclude that
// 'out' escapes.
out := make([]byte, read)
- _, err = m.r.ReadFull(out)
+ _, err = m.R.ReadFull(out)
if err != nil {
return
}
@@ -926,7 +1073,7 @@
// ReadComplex64 reads a complex64 from the reader
func (m *Reader) ReadComplex64() (f complex64, err error) {
var p []byte
- p, err = m.r.Peek(10)
+ p, err = m.R.Peek(10)
if err != nil {
return
}
@@ -940,14 +1087,14 @@
}
f = complex(math.Float32frombits(big.Uint32(p[2:])),
math.Float32frombits(big.Uint32(p[6:])))
- _, err = m.r.Skip(10)
+ _, err = m.R.Skip(10)
return
}
// ReadComplex128 reads a complex128 from the reader
func (m *Reader) ReadComplex128() (f complex128, err error) {
var p []byte
- p, err = m.r.Peek(18)
+ p, err = m.R.Peek(18)
if err != nil {
return
}
@@ -961,7 +1108,7 @@
}
f = complex(math.Float64frombits(big.Uint64(p[2:])),
math.Float64frombits(big.Uint64(p[10:])))
- _, err = m.r.Skip(18)
+ _, err = m.R.Skip(18)
return
}
@@ -996,7 +1143,7 @@
// The returned time's location will be set to time.Local.
func (m *Reader) ReadTime() (t time.Time, err error) {
var p []byte
- p, err = m.r.Peek(15)
+ p, err = m.R.Peek(15)
if err != nil {
return
}
@@ -1010,7 +1157,7 @@
}
sec, nsec := getUnix(p[3:])
t = time.Unix(sec, int64(nsec)).Local()
- _, err = m.r.Skip(15)
+ _, err = m.R.Skip(15)
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
index 732fa68..78e466f 100644
--- a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
@@ -117,13 +117,13 @@
}
func appendNext(f *Reader, d *[]byte) error {
- amt, o, err := getNextSize(f.r)
+ amt, o, err := getNextSize(f.R)
if err != nil {
return err
}
var i int
*d, i = ensure(*d, int(amt))
- _, err = f.r.ReadFull((*d)[i:])
+ _, err = f.R.ReadFull((*d)[i:])
if err != nil {
return err
}
@@ -576,7 +576,7 @@
return uint(u), b, err
}
-// ReadByteBytes is analagous to ReadUint8Bytes
+// ReadByteBytes is analogous to ReadUint8Bytes
func ReadByteBytes(b []byte) (byte, []byte, error) {
return ReadUint8Bytes(b)
}
@@ -784,6 +784,22 @@
return string(v), o, err
}
+// ReadStringAsBytes reads a 'str' object
+// into a slice of bytes. 'v' is the value of
+// the 'str' object, which may reside in memory
+// pointed to by 'scratch.' 'o' is the remaining bytes
+// in 'b.''
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (not 'str' type)
+// - InvalidPrefixError (unknown type marker)
+func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+ var tmp []byte
+ tmp, o, err = ReadStringZC(b)
+ v = append(scratch[:0], tmp...)
+ return
+}
+
// ReadComplex128Bytes reads a complex128
// extension object from 'b' and returns the
// remaining bytes.
@@ -922,14 +938,14 @@
case ArrayType:
var sz uint32
- sz, b, err = ReadArrayHeaderBytes(b)
+ sz, o, err = ReadArrayHeaderBytes(b)
if err != nil {
return
}
j := make([]interface{}, int(sz))
i = j
for d := range j {
- j[d], b, err = ReadIntfBytes(b)
+ j[d], o, err = ReadIntfBytes(o)
if err != nil {
return
}
diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
new file mode 100644
index 0000000..4bcf321
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
@@ -0,0 +1,41 @@
+// +build !appengine
+
+package msgp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE:
+// all of the definition in this file
+// should be repeated in appengine.go,
+// but without using unsafe
+
+const (
+ // spec says int and uint are always
+ // the same size, but that int/uint
+ // size may not be machine word size
+ smallint = unsafe.Sizeof(int(0)) == 4
+)
+
+// UnsafeString returns the byte slice as a volatile string
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeString(b []byte) string {
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: sh.Data, Len: sh.Len}))
+}
+
+// UnsafeBytes returns the string as a byte slice
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Len: len(s),
+ Cap: len(s),
+ Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
+ }))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go
index 216697f..da9099c 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write.go
@@ -10,13 +10,6 @@
"time"
)
-func abs(i int64) int64 {
- if i < 0 {
- return -i
- }
- return i
-}
-
// Sizer is an interface implemented
// by types that can estimate their
// size when MessagePack encoded.
@@ -59,15 +52,26 @@
// it will cause undefined behavior.
func freeW(w *Writer) { pushWriter(w) }
-// Require ensures that cap(old)-len(old) >= extra
+// Require ensures that cap(old)-len(old) >= extra.
func Require(old []byte, extra int) []byte {
- if cap(old)-len(old) >= extra {
+ l := len(old)
+ c := cap(old)
+ r := l + extra
+ if c >= r {
return old
- }
- if len(old) == 0 {
+ } else if l == 0 {
return make([]byte, 0, extra)
}
- n := make([]byte, len(old), cap(old)-len(old)+extra)
+ // the new size is the greater
+ // of double the old capacity
+ // and the sum of the old length
+ // and the number of new bytes
+ // necessary.
+ c <<= 1
+ if c < r {
+ c = r
+ }
+ n := make([]byte, l, c)
copy(n, old)
return n
}
@@ -184,6 +188,17 @@
return wl, nil
}
+func (mw *Writer) Append(b ...byte) error {
+ if mw.avail() < len(b) {
+ err := mw.flush()
+ if err != nil {
+ return err
+ }
+ }
+ mw.wloc += copy(mw.buf[mw.wloc:], b)
+ return nil
+}
+
// push one byte onto the buffer
//
// NOTE: this is a hot code path
@@ -289,9 +304,9 @@
// size to the writer
func (mw *Writer) WriteMapHeader(sz uint32) error {
switch {
- case sz < 16:
+ case sz <= 15:
return mw.push(wfixmap(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
return mw.prefix16(mmap16, uint16(sz))
default:
return mw.prefix32(mmap32, sz)
@@ -302,9 +317,9 @@
// given size to the writer
func (mw *Writer) WriteArrayHeader(sz uint32) error {
switch {
- case sz < 16:
+ case sz <= 15:
return mw.push(wfixarray(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
return mw.prefix16(marray16, uint16(sz))
default:
return mw.prefix32(marray32, sz)
@@ -328,17 +343,26 @@
// WriteInt64 writes an int64 to the writer
func (mw *Writer) WriteInt64(i int64) error {
- a := abs(i)
+ if i >= 0 {
+ switch {
+ case i <= math.MaxInt8:
+ return mw.push(wfixint(uint8(i)))
+ case i <= math.MaxInt16:
+ return mw.prefix16(mint16, uint16(i))
+ case i <= math.MaxInt32:
+ return mw.prefix32(mint32, uint32(i))
+ default:
+ return mw.prefix64(mint64, uint64(i))
+ }
+ }
switch {
- case i < 0 && i > -32:
+ case i >= -32:
return mw.push(wnfixint(int8(i)))
- case i >= 0 && i < 128:
- return mw.push(wfixint(uint8(i)))
- case a < math.MaxInt8:
+ case i >= math.MinInt8:
return mw.prefix8(mint8, uint8(i))
- case a < math.MaxInt16:
+ case i >= math.MinInt16:
return mw.prefix16(mint16, uint16(i))
- case a < math.MaxInt32:
+ case i >= math.MinInt32:
return mw.prefix32(mint32, uint32(i))
default:
return mw.prefix64(mint64, uint64(i))
@@ -360,20 +384,20 @@
// WriteUint64 writes a uint64 to the writer
func (mw *Writer) WriteUint64(u uint64) error {
switch {
- case u < (1 << 7):
+ case u <= (1<<7)-1:
return mw.push(wfixint(uint8(u)))
- case u < math.MaxUint8:
+ case u <= math.MaxUint8:
return mw.prefix8(muint8, uint8(u))
- case u < math.MaxUint16:
+ case u <= math.MaxUint16:
return mw.prefix16(muint16, uint16(u))
- case u < math.MaxUint32:
+ case u <= math.MaxUint32:
return mw.prefix32(muint32, uint32(u))
default:
return mw.prefix64(muint64, u)
}
}
-// WriteByte is analagous to WriteUint8
+// WriteByte is analogous to WriteUint8
func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
// WriteUint8 writes a uint8 to the writer
@@ -393,9 +417,9 @@
sz := uint32(len(b))
var err error
switch {
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
err = mw.prefix8(mbin8, uint8(sz))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
err = mw.prefix16(mbin16, uint16(sz))
default:
err = mw.prefix32(mbin32, sz)
@@ -407,6 +431,20 @@
return err
}
+// WriteBytesHeader writes just the size header
+// of a MessagePack 'bin' object. The user is responsible
+// for then writing 'sz' more bytes into the stream.
+func (mw *Writer) WriteBytesHeader(sz uint32) error {
+ switch {
+ case sz <= math.MaxUint8:
+ return mw.prefix8(mbin8, uint8(sz))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mbin16, uint16(sz))
+ default:
+ return mw.prefix32(mbin32, sz)
+ }
+}
+
// WriteBool writes a bool to the writer
func (mw *Writer) WriteBool(b bool) error {
if b {
@@ -421,11 +459,11 @@
sz := uint32(len(s))
var err error
switch {
- case sz < 32:
+ case sz <= 31:
err = mw.push(wfixstr(uint8(sz)))
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
err = mw.prefix8(mstr8, uint8(sz))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
err = mw.prefix16(mstr16, uint16(sz))
default:
err = mw.prefix32(mstr32, sz)
@@ -436,6 +474,45 @@
return mw.writeString(s)
}
+// WriteStringHeader writes just the string size
+// header of a MessagePack 'str' object. The user
+// is responsible for writing 'sz' more valid UTF-8
+// bytes to the stream.
+func (mw *Writer) WriteStringHeader(sz uint32) error {
+ switch {
+ case sz <= 31:
+ return mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ return mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mstr16, uint16(sz))
+ default:
+ return mw.prefix32(mstr32, sz)
+ }
+}
+
+// WriteStringFromBytes writes a 'str' object
+// from a []byte.
+func (mw *Writer) WriteStringFromBytes(str []byte) error {
+ sz := uint32(len(str))
+ var err error
+ switch {
+ case sz <= 31:
+ err = mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ err = mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ err = mw.prefix16(mstr16, uint16(sz))
+ default:
+ err = mw.prefix32(mstr32, sz)
+ }
+ if err != nil {
+ return err
+ }
+ _, err = mw.Write(str)
+ return err
+}
+
// WriteComplex64 writes a complex64 to the writer
func (mw *Writer) WriteComplex64(f complex64) error {
o, err := mw.require(10)
@@ -509,7 +586,7 @@
// elapsed since "zero" Unix time, followed by 4 bytes
// for a big-endian 32-bit signed integer denoting
// the nanosecond offset of the time. This encoding
-// is intended to ease portability accross languages.
+// is intended to ease portability across languages.
// (Note that this is *not* the standard time.Time
// binary encoding, because its implementation relies
// heavily on the internal representation used by the
@@ -612,7 +689,7 @@
}
func (mw *Writer) writeMap(v reflect.Value) (err error) {
- if v.Elem().Kind() != reflect.String {
+ if v.Type().Key().Kind() != reflect.String {
return errors.New("msgp: map keys must be strings")
}
ks := v.MapKeys()
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
index 658102e..eaa03c4 100644
--- a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
@@ -22,10 +22,10 @@
// given size to the slice
func AppendMapHeader(b []byte, sz uint32) []byte {
switch {
- case sz < 16:
+ case sz <= 15:
return append(b, wfixmap(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n := ensure(b, 3)
prefixu16(o[n:], mmap16, uint16(sz))
return o
@@ -41,10 +41,10 @@
// the given size to the slice
func AppendArrayHeader(b []byte, sz uint32) []byte {
switch {
- case sz < 16:
+ case sz <= 15:
return append(b, wfixarray(uint8(sz)))
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n := ensure(b, 3)
prefixu16(o[n:], marray16, uint16(sz))
return o
@@ -75,29 +75,39 @@
// AppendInt64 appends an int64 to the slice
func AppendInt64(b []byte, i int64) []byte {
- a := abs(i)
+ if i >= 0 {
+ switch {
+ case i <= math.MaxInt8:
+ return append(b, wfixint(uint8(i)))
+ case i <= math.MaxInt16:
+ o, n := ensure(b, 3)
+ putMint16(o[n:], int16(i))
+ return o
+ case i <= math.MaxInt32:
+ o, n := ensure(b, 5)
+ putMint32(o[n:], int32(i))
+ return o
+ default:
+ o, n := ensure(b, 9)
+ putMint64(o[n:], i)
+ return o
+ }
+ }
switch {
- case i < 0 && i > -32:
+ case i >= -32:
return append(b, wnfixint(int8(i)))
-
- case i >= 0 && i < 128:
- return append(b, wfixint(uint8(i)))
-
- case a < math.MaxInt8:
+ case i >= math.MinInt8:
o, n := ensure(b, 2)
putMint8(o[n:], int8(i))
return o
-
- case a < math.MaxInt16:
+ case i >= math.MinInt16:
o, n := ensure(b, 3)
putMint16(o[n:], int16(i))
return o
-
- case a < math.MaxInt32:
+ case i >= math.MinInt32:
o, n := ensure(b, 5)
putMint32(o[n:], int32(i))
return o
-
default:
o, n := ensure(b, 9)
putMint64(o[n:], i)
@@ -120,20 +130,20 @@
// AppendUint64 appends a uint64 to the slice
func AppendUint64(b []byte, u uint64) []byte {
switch {
- case u < (1 << 7):
+ case u <= (1<<7)-1:
return append(b, wfixint(uint8(u)))
- case u < math.MaxUint8:
+ case u <= math.MaxUint8:
o, n := ensure(b, 2)
putMuint8(o[n:], uint8(u))
return o
- case u < math.MaxUint16:
+ case u <= math.MaxUint16:
o, n := ensure(b, 3)
putMuint16(o[n:], uint16(u))
return o
- case u < math.MaxUint32:
+ case u <= math.MaxUint32:
o, n := ensure(b, 5)
putMuint32(o[n:], uint32(u))
return o
@@ -152,7 +162,7 @@
// AppendUint8 appends a uint8 to the slice
func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
-// AppendByte is analagous to AppendUint8
+// AppendByte is analogous to AppendUint8
func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
// AppendUint16 appends a uint16 to the slice
@@ -167,11 +177,11 @@
var o []byte
var n int
switch {
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mbin8, uint8(sz))
n += 2
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mbin16, uint16(sz))
n += 3
@@ -197,15 +207,15 @@
var n int
var o []byte
switch {
- case sz < 32:
+ case sz <= 31:
o, n = ensure(b, 1+sz)
o[n] = wfixstr(uint8(sz))
n++
- case sz < math.MaxUint8:
+ case sz <= math.MaxUint8:
o, n = ensure(b, 2+sz)
prefixu8(o[n:], mstr8, uint8(sz))
n += 2
- case sz < math.MaxUint16:
+ case sz <= math.MaxUint16:
o, n = ensure(b, 3+sz)
prefixu16(o[n:], mstr16, uint16(sz))
n += 3
@@ -217,6 +227,33 @@
return o[:n+copy(o[n:], s)]
}
+// AppendStringFromBytes appends a []byte
+// as a MessagePack 'str' to the slice 'b.'
+func AppendStringFromBytes(b []byte, str []byte) []byte {
+ sz := len(str)
+ var n int
+ var o []byte
+ switch {
+ case sz <= 31:
+ o, n = ensure(b, 1+sz)
+ o[n] = wfixstr(uint8(sz))
+ n++
+ case sz <= math.MaxUint8:
+ o, n = ensure(b, 2+sz)
+ prefixu8(o[n:], mstr8, uint8(sz))
+ n += 2
+ case sz <= math.MaxUint16:
+ o, n = ensure(b, 3+sz)
+ prefixu16(o[n:], mstr16, uint16(sz))
+ n += 3
+ default:
+ o, n = ensure(b, 5+sz)
+ prefixu32(o[n:], mstr32, uint32(sz))
+ n += 5
+ }
+ return o[:n+copy(o[n:], str)]
+}
+
// AppendComplex64 appends a complex64 to the slice as a MessagePack extension
func AppendComplex64(b []byte, c complex64) []byte {
o, n := ensure(b, Complex64Size)
@@ -362,7 +399,12 @@
}
}
return b, nil
-
+ case reflect.Ptr:
+ if v.IsNil() {
+ return AppendNil(b), err
+ }
+ b, err = AppendIntf(b, v.Elem().Interface())
+ return b, err
default:
return b, &ErrUnsupportedType{T: v.Type()}
}