[vendor] Delete unused deps
A few old tools have been deleted recently (e.g. fxrev.dev/670663) so we
no longer need many of these dependencies.
Generated by running:
1. `go mod vendor` (deletes files from vendor/ directory)
2. `go mod tidy` (deletes entries from go.mod and go.sum)
Change-Id: I388f209dbefd41e374bd46589be180e3eddfd471
Reviewed-on: https://fuchsia-review.googlesource.com/c/infra/infra/+/679006
Commit-Queue: Auto-Submit <auto-submit@fuchsia-infra.iam.gserviceaccount.com>
Fuchsia-Auto-Submit: Oliver Newman <olivernewman@google.com>
Reviewed-by: Ina Huh <ihuh@google.com>
diff --git a/go.mod b/go.mod
index 8b0c844..e575673 100644
--- a/go.mod
+++ b/go.mod
@@ -7,18 +7,17 @@
cloud.google.com/go/logging v1.4.2
cloud.google.com/go/storage v1.18.2
github.com/bazelbuild/remote-apis-sdks v0.0.0-20220119194911-052cf871811d
- github.com/docker/docker v20.10.12+incompatible
github.com/fsnotify/fsnotify v1.5.1
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/golang/mock v1.6.0
github.com/google/go-cmp v0.5.6
github.com/google/subcommands v1.2.0
github.com/maruel/subcommands v1.1.1
- github.com/opencontainers/image-spec v1.0.2
github.com/pkg/errors v0.9.1
github.com/texttheater/golang-levenshtein v1.0.1
go.chromium.org/luci v0.0.0-20220214203325-112ac40a7268
go.skia.org/infra v0.0.0-20211215210855-9546fb8fb93a
+ golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
@@ -38,15 +37,10 @@
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
github.com/cncf/xds/go v0.0.0-20211215212155-112fc4fa679d // indirect
- github.com/containerd/containerd v1.5.8 // indirect
github.com/danjacques/gofslock v0.0.0-20220131014315-6e321f4509c8 // indirect
- github.com/docker/distribution v2.7.1+incompatible // indirect
- github.com/docker/go-connections v0.4.0 // indirect
- github.com/docker/go-units v0.4.0 // indirect
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
github.com/fiorix/go-web v1.0.1-0.20150221144011-5b593f1e8966 // indirect
- github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
@@ -62,10 +56,8 @@
github.com/klauspost/compress v1.13.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/mostynb/zstdpool-syncpool v0.0.11 // indirect
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 // indirect
- github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/pborman/uuid v1.2.1 // indirect
github.com/pkg/xattr v0.4.4 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
@@ -73,12 +65,10 @@
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
- github.com/sirupsen/logrus v1.8.1 // indirect
github.com/smartystreets/assertions v1.2.1 // indirect
github.com/zeebo/bencode v1.0.0 // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
- golang.org/x/exp v0.0.0-20220414153411-bcd21879b8fd // indirect
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
diff --git a/go.sum b/go.sum
index d4bc051..4624549 100644
--- a/go.sum
+++ b/go.sum
@@ -1,4 +1,3 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -89,18 +88,11 @@
contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
-github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
@@ -117,32 +109,15 @@
github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
-github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
-github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
-github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
-github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
-github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
-github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.6.0/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
@@ -161,7 +136,6 @@
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis/v2 v2.15.1/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
@@ -181,7 +155,6 @@
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
-github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
@@ -202,60 +175,38 @@
github.com/bazelbuild/remote-apis v0.0.0-20211004185116-636121a32fa7 h1:2GUS7QocpkOOone/q7st8Jo0QE2Q9fkw1mMisD7EJl0=
github.com/bazelbuild/remote-apis v0.0.0-20211004185116-636121a32fa7/go.mod h1:ry8Y6CkQqCVcYsjPOlLXDX2iRVjOnjogdNwhvHmRcz8=
github.com/bazelbuild/remote-apis-sdks v0.0.0-20201110004117-e776219c9bb7/go.mod h1:SvHtabqK9r562BLgmQfd+s5hzd9UUS903b0OmQHvzrI=
-github.com/bazelbuild/remote-apis-sdks v0.0.0-20211112060257-af570c2721ed/go.mod h1:50ZasLDlgIh6V79vGr/vS7K8LXZC9xjJFJX8QCrxNhE=
-github.com/bazelbuild/remote-apis-sdks v0.0.0-20211209180244-db7d098b4a23 h1:Qy62wQBeELiwBtcTyKnjBCqzwsfMtDY1VgVXd+9kWKk=
-github.com/bazelbuild/remote-apis-sdks v0.0.0-20211209180244-db7d098b4a23/go.mod h1:XzcNCw8akq6qEXdeyeET8y74NX/JtD0RiFjk9I1Pqx0=
github.com/bazelbuild/remote-apis-sdks v0.0.0-20220119194911-052cf871811d h1:R/ovzS+mb/9G5MQ9PQEK1z16ICxlwoZzRJhcsQO+0gw=
github.com/bazelbuild/remote-apis-sdks v0.0.0-20220119194911-052cf871811d/go.mod h1:XzcNCw8akq6qEXdeyeET8y74NX/JtD0RiFjk9I1Pqx0=
github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M=
-github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
-github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
-github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
-github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bmatcuk/doublestar v1.2.2/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA=
-github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
-github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
-github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
-github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
-github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
-github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
-github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
-github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
-github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
-github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
-github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
-github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
-github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
@@ -276,108 +227,19 @@
github.com/cockroachdb/cockroach-go/v2 v2.1.0/go.mod h1:ilhrLnPDDwGHL+iK2UxQhp1UzUhst8sfItSAgCYwAyg=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
-github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
-github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
-github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
-github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
-github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
-github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
-github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
-github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
-github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
-github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
-github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
-github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
-github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
-github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
-github.com/containerd/containerd v1.5.8 h1:NmkCC1/QxyZFBny8JogwLpOy2f+VEbO/f6bV2Mqtwuw=
-github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
-github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
-github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
-github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
-github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
-github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
-github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
-github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
-github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
-github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
-github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
-github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
-github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
-github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
-github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
-github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
-github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
-github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
-github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
-github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
-github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
-github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
-github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
-github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
-github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
-github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
-github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
-github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
@@ -386,7 +248,6 @@
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
github.com/cznic/cc v0.0.0-20181122101902-d673e9b70d4d/go.mod h1:m3fD/V+XTB35Kh9zw6dzjMY+We0Q7PMf6LLIC4vuG9k=
github.com/cznic/fileutil v0.0.0-20181122101858-4d67cfea8c87/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
@@ -398,10 +259,6 @@
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/cznic/xc v0.0.0-20181122101856-45b06973881e/go.mod h1:3oFoiOvCDBYH+swwf5+k/woVmWy7h1Fcyu8Qig/jjX0=
-github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
-github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
-github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
-github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
github.com/danjacques/gofslock v0.0.0-20200623023034-5d0bd0fa6ef0/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U=
github.com/danjacques/gofslock v0.0.0-20220131014315-6e321f4509c8 h1:+4P40F8AqFAW4/ft2WXiZXrgtRbS8RLb61D8e6NcMw0=
github.com/danjacques/gofslock v0.0.0-20220131014315-6e321f4509c8/go.mod h1:VT5Ecrx/r1oHkQbiEBwkLiuQ51igUBmxXuiw9tnSLqY=
@@ -410,35 +267,19 @@
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/denisenkom/go-mssqldb v0.0.0-20200620013148-b91950f658ec/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
-github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E=
github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
-github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dhui/dktest v0.3.2/go.mod h1:l1/ib23a/CmxAe7yixtrYPc8Iy90Zy2udyaHINM5p58=
github.com/disintegration/gift v1.2.1/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI=
-github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20200213202729-31a86c4ab209/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U=
-github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
-github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
-github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
-github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@@ -480,17 +321,13 @@
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
-github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
-github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
-github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
@@ -500,7 +337,6 @@
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
-github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
@@ -562,24 +398,15 @@
github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
-github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
-github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
@@ -699,10 +526,8 @@
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@@ -720,10 +545,8 @@
github.com/gopherjs/gopherwasm v1.0.0/go.mod h1:SkZ8z7CWBz5VXbhJel8TxCmAcsQqzgWGR/8nMhyhZSI=
github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
-github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
@@ -745,7 +568,6 @@
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -755,7 +577,6 @@
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
@@ -792,13 +613,9 @@
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
-github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
@@ -854,8 +671,6 @@
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
-github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
-github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
@@ -883,8 +698,6 @@
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
@@ -926,7 +739,6 @@
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
-github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -935,7 +747,6 @@
github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
-github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/maruel/subcommands v1.1.0/go.mod h1:b25AG9Eho2Rs1NUPAPAYBFy1B5y63QMxw/2WmLGO8m8=
github.com/maruel/subcommands v1.1.1 h1:+063/UDFVMvzZcyo8qlfpPhmjeLsT9yLUq+IKgqBWHI=
github.com/maruel/subcommands v1.1.1/go.mod h1:b25AG9Eho2Rs1NUPAPAYBFy1B5y63QMxw/2WmLGO8m8=
@@ -960,7 +771,6 @@
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
@@ -971,8 +781,6 @@
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
-github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@@ -987,33 +795,21 @@
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
-github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
-github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
-github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
-github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
-github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
-github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
-github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mostynb/zstdpool-syncpool v0.0.7/go.mod h1:YpzqIpN8xvRZZvemem7CMLPWkjuaKR37MnkQruSj6aw=
github.com/mostynb/zstdpool-syncpool v0.0.10/go.mod h1:BmhpjzZxG8KCduFi0N/Do6j9w+JYt6vR2cM8J9AwujI=
github.com/mostynb/zstdpool-syncpool v0.0.11 h1:mc1yt5PO+PloYLzLRO3Sb9DksZ8qgcpAk+HdQrZiaLY=
github.com/mostynb/zstdpool-syncpool v0.0.11/go.mod h1:E1YI76RAWRxXqicBa0HXnj9V/MH0RZ4CKpZzhorQkfc=
-github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@@ -1027,7 +823,6 @@
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI=
@@ -1041,17 +836,13 @@
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/olivere/elastic/v7 v7.0.12/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo=
-github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -1061,32 +852,9 @@
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
-github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
-github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
-github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
-github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
-github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@@ -1110,7 +878,6 @@
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
-github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
@@ -1120,7 +887,6 @@
github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -1131,19 +897,15 @@
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
-github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -1151,26 +913,20 @@
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@@ -1201,11 +957,9 @@
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
-github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
@@ -1217,15 +971,11 @@
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skia-dev/go2ts v1.5.0/go.mod h1:pE59J8wf5IHSMtPhH8PmWq5ZTLt6p1Ih+4ODd9fkGXk=
github.com/skia-dev/google-api-go-client v0.10.1-0.20200109184256-16c3d6f408b2/go.mod h1:N0iTzjbw95ZVfOL7wVngmVXE2kG0NP+LYlq3QMwf9Qo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -1234,7 +984,6 @@
github.com/smartystreets/assertions v1.2.1 h1:bKNHfEv7tSIjZ8JbKaFjzFINljxG4lzZvmHUnElzOIg=
github.com/smartystreets/assertions v1.2.1/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8=
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
-github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
@@ -1252,17 +1001,14 @@
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
-github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -1270,19 +1016,15 @@
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
-github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
-github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
-github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -1294,12 +1036,8 @@
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
-github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/texttheater/golang-levenshtein v1.0.1 h1:+cRNoVrfiwufQPhoMzB6N0Yf/Mqajr6t1lOv8GyGE2U=
github.com/texttheater/golang-levenshtein v1.0.1/go.mod h1:PYAKrbF5sAiq9wd+H82hs7gNaen0CplQ9uvm6+enD/8=
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -1311,20 +1049,11 @@
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/unrolled/secure v1.0.8/go.mod h1:fO+mEan+FLB0CdEnHf6Q4ZZVNqG+5fuLFnP8p0BXDPI=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
-github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
-github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
-github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
-github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
-github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
-github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
-github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
@@ -1332,7 +1061,6 @@
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
@@ -1346,32 +1074,21 @@
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA=
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA=
-github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
-github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
-github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
github.com/zeebo/bencode v1.0.0 h1:zgop0Wu1nu4IexAZeCZ5qbsjU4O1vMrfCrVgUjbHVuA=
github.com/zeebo/bencode v1.0.0/go.mod h1:Ct7CkrWIQuLWAy9M3atFHYq4kG9Ao/SsY5cdtCXmp9Y=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
go.chromium.org/luci v0.0.0-20201029184154-594d11850ebf/go.mod h1:MIQewVTLvOvc0UioV0JNqTNO/RspKFS0XEeoKrOxsdM=
-go.chromium.org/luci v0.0.0-20211215195530-b69fde9e0c44 h1:fvPj4BCwu5ZNxap829UZBEKo+EYBHiXFvUZl82oIXX0=
-go.chromium.org/luci v0.0.0-20211215195530-b69fde9e0c44/go.mod h1:EmoCxnMjcckPMwjmLDLgAH4ZR1Vr16+VUG2Net3dc+o=
go.chromium.org/luci v0.0.0-20220214203325-112ac40a7268 h1:jA2VooTx5LzlXIzxAxzdT1SxI82fHY5+KMNA0X2MCZ8=
go.chromium.org/luci v0.0.0-20220214203325-112ac40a7268/go.mod h1:5QcjvBYAd2SlkTQL/TiZ0Nuy6LeORJ0NwuTaeMt5L1c=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
-go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.larrymyers.com/protoc-gen-twirp_typescript v0.0.0-20201012232926-5c91a3223921/go.mod h1:51F13nJvsTFin0RTIOZL4z8RxGitAz4ww1TD21okMZs=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -1393,21 +1110,16 @@
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
-go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/build v0.0.0-20191031202223-0706ea4fce0c/go.mod h1:Nl5grlQor/lxfX9FfGLe+g2cVSCiURG36KQgsg/ODs4=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1429,12 +1141,10 @@
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@@ -1486,7 +1196,6 @@
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1505,14 +1214,12 @@
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1548,7 +1255,6 @@
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 h1:kmreh1vGI63l2FxOAYS3Yv6ATsi7lSTuwNSVbGfJV9I=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1565,7 +1271,6 @@
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
@@ -1610,42 +1315,29 @@
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1656,25 +1348,17 @@
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1683,11 +1367,9 @@
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210507014357-30e306a8bba5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1702,13 +1384,10 @@
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211215211219-4abf325e0275 h1:QrMf/wK/gIZUuD4s06oYPfuSxBzj4mL/rjA7D++U14o=
-golang.org/x/sys v0.0.0-20211215211219-4abf325e0275/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1831,7 +1510,6 @@
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@@ -1860,7 +1538,6 @@
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
-google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
@@ -1886,14 +1563,12 @@
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
@@ -1903,7 +1578,6 @@
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
-google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1935,7 +1609,6 @@
google.golang.org/genproto v0.0.0-20201021134325-0d71844de594/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201029200359-8ce4113da6f7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1979,7 +1652,6 @@
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1991,7 +1663,6 @@
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -2032,11 +1703,9 @@
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -2045,18 +1714,14 @@
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
-gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8=
@@ -2072,6 +1737,7 @@
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -2080,10 +1746,7 @@
gorm.io/driver/postgres v1.0.5/go.mod h1:qrD92UurYzNctBMVCJ8C3VQEjffEuphycXtxOudXNCA=
gorm.io/gorm v1.20.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gorm.io/gorm v1.20.6/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
-gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -2094,42 +1757,20 @@
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
-k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
-k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
-k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
-k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
-k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
-k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
-k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo=
-k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
-k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
-k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q=
-k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
-k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
-k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw=
k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U=
-k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
-k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
-k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
-k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
@@ -2147,14 +1788,11 @@
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY=
sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0=
sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk=
sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE
deleted file mode 100644
index 584149b..0000000
--- a/vendor/github.com/containerd/containerd/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright The containerd Authors
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE
deleted file mode 100644
index 8915f02..0000000
--- a/vendor/github.com/containerd/containerd/NOTICE
+++ /dev/null
@@ -1,16 +0,0 @@
-Docker
-Copyright 2012-2015 Docker, Inc.
-
-This product includes software developed at Docker, Inc. (https://www.docker.com).
-
-The following is courtesy of our legal counsel:
-
-
-Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see https://www.bis.doc.gov
-
-See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go
deleted file mode 100644
index 05a3522..0000000
--- a/vendor/github.com/containerd/containerd/errdefs/errors.go
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-// Package errdefs defines the common errors used throughout containerd
-// packages.
-//
-// Use with errors.Wrap and error.Wrapf to add context to an error.
-//
-// To detect an error class, use the IsXXX functions to tell whether an error
-// is of a certain type.
-//
-// The functions ToGRPC and FromGRPC can be used to map server-side and
-// client-side errors to the correct types.
-package errdefs
-
-import (
- "context"
-
- "github.com/pkg/errors"
-)
-
-// Definitions of common error types used throughout containerd. All containerd
-// errors returned by most packages will map into one of these errors classes.
-// Packages should return errors of these types when they want to instruct a
-// client to take a particular action.
-//
-// For the most part, we just try to provide local grpc errors. Most conditions
-// map very well to those defined by grpc.
-var (
- ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
- ErrInvalidArgument = errors.New("invalid argument")
- ErrNotFound = errors.New("not found")
- ErrAlreadyExists = errors.New("already exists")
- ErrFailedPrecondition = errors.New("failed precondition")
- ErrUnavailable = errors.New("unavailable")
- ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
-)
-
-// IsInvalidArgument returns true if the error is due to an invalid argument
-func IsInvalidArgument(err error) bool {
- return errors.Is(err, ErrInvalidArgument)
-}
-
-// IsNotFound returns true if the error is due to a missing object
-func IsNotFound(err error) bool {
- return errors.Is(err, ErrNotFound)
-}
-
-// IsAlreadyExists returns true if the error is due to an already existing
-// metadata item
-func IsAlreadyExists(err error) bool {
- return errors.Is(err, ErrAlreadyExists)
-}
-
-// IsFailedPrecondition returns true if an operation could not proceed to the
-// lack of a particular condition
-func IsFailedPrecondition(err error) bool {
- return errors.Is(err, ErrFailedPrecondition)
-}
-
-// IsUnavailable returns true if the error is due to a resource being unavailable
-func IsUnavailable(err error) bool {
- return errors.Is(err, ErrUnavailable)
-}
-
-// IsNotImplemented returns true if the error is due to not being implemented
-func IsNotImplemented(err error) bool {
- return errors.Is(err, ErrNotImplemented)
-}
-
-// IsCanceled returns true if the error is due to `context.Canceled`.
-func IsCanceled(err error) bool {
- return errors.Is(err, context.Canceled)
-}
-
-// IsDeadlineExceeded returns true if the error is due to
-// `context.DeadlineExceeded`.
-func IsDeadlineExceeded(err error) bool {
- return errors.Is(err, context.DeadlineExceeded)
-}
diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go
deleted file mode 100644
index 209f63b..0000000
--- a/vendor/github.com/containerd/containerd/errdefs/grpc.go
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package errdefs
-
-import (
- "context"
- "strings"
-
- "github.com/pkg/errors"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// ToGRPC will attempt to map the backend containerd error into a grpc error,
-// using the original error message as a description.
-//
-// Further information may be extracted from certain errors depending on their
-// type.
-//
-// If the error is unmapped, the original error will be returned to be handled
-// by the regular grpc error handling stack.
-func ToGRPC(err error) error {
- if err == nil {
- return nil
- }
-
- if isGRPCError(err) {
- // error has already been mapped to grpc
- return err
- }
-
- switch {
- case IsInvalidArgument(err):
- return status.Errorf(codes.InvalidArgument, err.Error())
- case IsNotFound(err):
- return status.Errorf(codes.NotFound, err.Error())
- case IsAlreadyExists(err):
- return status.Errorf(codes.AlreadyExists, err.Error())
- case IsFailedPrecondition(err):
- return status.Errorf(codes.FailedPrecondition, err.Error())
- case IsUnavailable(err):
- return status.Errorf(codes.Unavailable, err.Error())
- case IsNotImplemented(err):
- return status.Errorf(codes.Unimplemented, err.Error())
- case IsCanceled(err):
- return status.Errorf(codes.Canceled, err.Error())
- case IsDeadlineExceeded(err):
- return status.Errorf(codes.DeadlineExceeded, err.Error())
- }
-
- return err
-}
-
-// ToGRPCf maps the error to grpc error codes, assembling the formatting string
-// and combining it with the target error string.
-//
-// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
-func ToGRPCf(err error, format string, args ...interface{}) error {
- return ToGRPC(errors.Wrapf(err, format, args...))
-}
-
-// FromGRPC returns the underlying error from a grpc service based on the grpc error code
-func FromGRPC(err error) error {
- if err == nil {
- return nil
- }
-
- var cls error // divide these into error classes, becomes the cause
-
- switch code(err) {
- case codes.InvalidArgument:
- cls = ErrInvalidArgument
- case codes.AlreadyExists:
- cls = ErrAlreadyExists
- case codes.NotFound:
- cls = ErrNotFound
- case codes.Unavailable:
- cls = ErrUnavailable
- case codes.FailedPrecondition:
- cls = ErrFailedPrecondition
- case codes.Unimplemented:
- cls = ErrNotImplemented
- case codes.Canceled:
- cls = context.Canceled
- case codes.DeadlineExceeded:
- cls = context.DeadlineExceeded
- default:
- cls = ErrUnknown
- }
-
- msg := rebaseMessage(cls, err)
- if msg != "" {
- err = errors.Wrap(cls, msg)
- } else {
- err = errors.WithStack(cls)
- }
-
- return err
-}
-
-// rebaseMessage removes the repeats for an error at the end of an error
-// string. This will happen when taking an error over grpc then remapping it.
-//
-// Effectively, we just remove the string of cls from the end of err if it
-// appears there.
-func rebaseMessage(cls error, err error) string {
- desc := errDesc(err)
- clss := cls.Error()
- if desc == clss {
- return ""
- }
-
- return strings.TrimSuffix(desc, ": "+clss)
-}
-
-func isGRPCError(err error) bool {
- _, ok := status.FromError(err)
- return ok
-}
-
-func code(err error) codes.Code {
- if s, ok := status.FromError(err); ok {
- return s.Code()
- }
- return codes.Unknown
-}
-
-func errDesc(err error) string {
- if s, ok := status.FromError(err); ok {
- return s.Message()
- }
- return err.Error()
-}
diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go
deleted file mode 100644
index 37b6a7d..0000000
--- a/vendor/github.com/containerd/containerd/log/context.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package log
-
-import (
- "context"
-
- "github.com/sirupsen/logrus"
-)
-
-var (
- // G is an alias for GetLogger.
- //
- // We may want to define this locally to a package to get package tagged log
- // messages.
- G = GetLogger
-
- // L is an alias for the standard logger.
- L = logrus.NewEntry(logrus.StandardLogger())
-)
-
-type (
- loggerKey struct{}
-)
-
-const (
- // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
- // ensure the formatted time is always the same number of characters.
- RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-
- // TextFormat represents the text logging format
- TextFormat = "text"
-
- // JSONFormat represents the JSON logging format
- JSONFormat = "json"
-)
-
-// WithLogger returns a new context with the provided logger. Use in
-// combination with logger.WithField(s) for great effect.
-func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
- return context.WithValue(ctx, loggerKey{}, logger)
-}
-
-// GetLogger retrieves the current logger from the context. If no logger is
-// available, the default logger is returned.
-func GetLogger(ctx context.Context) *logrus.Entry {
- logger := ctx.Value(loggerKey{})
-
- if logger == nil {
- return L
- }
-
- return logger.(*logrus.Entry)
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go
deleted file mode 100644
index c7657e1..0000000
--- a/vendor/github.com/containerd/containerd/platforms/compare.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "strconv"
- "strings"
-
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// MatchComparer is able to match and compare platforms to
-// filter and sort platforms.
-type MatchComparer interface {
- Matcher
-
- Less(specs.Platform, specs.Platform) bool
-}
-
-// platformVector returns an (ordered) vector of appropriate specs.Platform
-// objects to try matching for the given platform object (see platforms.Only).
-func platformVector(platform specs.Platform) []specs.Platform {
- vector := []specs.Platform{platform}
-
- switch platform.Architecture {
- case "amd64":
- vector = append(vector, specs.Platform{
- Architecture: "386",
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: platform.Variant,
- })
- case "arm":
- if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 {
- for armVersion--; armVersion >= 5; armVersion-- {
- vector = append(vector, specs.Platform{
- Architecture: platform.Architecture,
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: "v" + strconv.Itoa(armVersion),
- })
- }
- }
- case "arm64":
- variant := platform.Variant
- if variant == "" {
- variant = "v8"
- }
- vector = append(vector, platformVector(specs.Platform{
- Architecture: "arm",
- OS: platform.OS,
- OSVersion: platform.OSVersion,
- OSFeatures: platform.OSFeatures,
- Variant: variant,
- })...)
- }
-
- return vector
-}
-
-// Only returns a match comparer for a single platform
-// using default resolution logic for the platform.
-//
-// For arm/v8, will also match arm/v7, arm/v6 and arm/v5
-// For arm/v7, will also match arm/v6 and arm/v5
-// For arm/v6, will also match arm/v5
-// For amd64, will also match 386
-func Only(platform specs.Platform) MatchComparer {
- return Ordered(platformVector(Normalize(platform))...)
-}
-
-// OnlyStrict returns a match comparer for a single platform.
-//
-// Unlike Only, OnlyStrict does not match sub platforms.
-// So, "arm/vN" will not match "arm/vM" where M < N,
-// and "amd64" will not also match "386".
-//
-// OnlyStrict matches non-canonical forms.
-// So, "arm64" matches "arm/64/v8".
-func OnlyStrict(platform specs.Platform) MatchComparer {
- return Ordered(Normalize(platform))
-}
-
-// Ordered returns a platform MatchComparer which matches any of the platforms
-// but orders them in order they are provided.
-func Ordered(platforms ...specs.Platform) MatchComparer {
- matchers := make([]Matcher, len(platforms))
- for i := range platforms {
- matchers[i] = NewMatcher(platforms[i])
- }
- return orderedPlatformComparer{
- matchers: matchers,
- }
-}
-
-// Any returns a platform MatchComparer which matches any of the platforms
-// with no preference for ordering.
-func Any(platforms ...specs.Platform) MatchComparer {
- matchers := make([]Matcher, len(platforms))
- for i := range platforms {
- matchers[i] = NewMatcher(platforms[i])
- }
- return anyPlatformComparer{
- matchers: matchers,
- }
-}
-
-// All is a platform MatchComparer which matches all platforms
-// with preference for ordering.
-var All MatchComparer = allPlatformComparer{}
-
-type orderedPlatformComparer struct {
- matchers []Matcher
-}
-
-func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
- for _, m := range c.matchers {
- if m.Match(platform) {
- return true
- }
- }
- return false
-}
-
-func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
- for _, m := range c.matchers {
- p1m := m.Match(p1)
- p2m := m.Match(p2)
- if p1m && !p2m {
- return true
- }
- if p1m || p2m {
- return false
- }
- }
- return false
-}
-
-type anyPlatformComparer struct {
- matchers []Matcher
-}
-
-func (c anyPlatformComparer) Match(platform specs.Platform) bool {
- for _, m := range c.matchers {
- if m.Match(platform) {
- return true
- }
- }
- return false
-}
-
-func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
- var p1m, p2m bool
- for _, m := range c.matchers {
- if !p1m && m.Match(p1) {
- p1m = true
- }
- if !p2m && m.Match(p2) {
- p2m = true
- }
- if p1m && p2m {
- return false
- }
- }
- // If one matches, and the other does, sort match first
- return p1m && !p2m
-}
-
-type allPlatformComparer struct{}
-
-func (allPlatformComparer) Match(specs.Platform) bool {
- return true
-}
-
-func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
- return false
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
deleted file mode 100644
index 4a7177e..0000000
--- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "bufio"
- "os"
- "runtime"
- "strings"
- "sync"
-
- "github.com/containerd/containerd/errdefs"
- "github.com/containerd/containerd/log"
- "github.com/pkg/errors"
-)
-
-// Present the ARM instruction set architecture, eg: v7, v8
-// Don't use this value directly; call cpuVariant() instead.
-var cpuVariantValue string
-
-var cpuVariantOnce sync.Once
-
-func cpuVariant() string {
- cpuVariantOnce.Do(func() {
- if isArmArch(runtime.GOARCH) {
- cpuVariantValue = getCPUVariant()
- }
- })
- return cpuVariantValue
-}
-
-// For Linux, the kernel has already detected the ABI, ISA and Features.
-// So we don't need to access the ARM registers to detect platform information
-// by ourselves. We can just parse these information from /proc/cpuinfo
-func getCPUInfo(pattern string) (info string, err error) {
- if !isLinuxOS(runtime.GOOS) {
- return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
- }
-
- cpuinfo, err := os.Open("/proc/cpuinfo")
- if err != nil {
- return "", err
- }
- defer cpuinfo.Close()
-
- // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
- // the first core is enough.
- scanner := bufio.NewScanner(cpuinfo)
- for scanner.Scan() {
- newline := scanner.Text()
- list := strings.Split(newline, ":")
-
- if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
- return strings.TrimSpace(list[1]), nil
- }
- }
-
- // Check whether the scanner encountered errors
- err = scanner.Err()
- if err != nil {
- return "", err
- }
-
- return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
-}
-
-func getCPUVariant() string {
- if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
- // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use
- // runtime.GOARCH to determine the variants
- var variant string
- switch runtime.GOARCH {
- case "arm64":
- variant = "v8"
- case "arm":
- variant = "v7"
- default:
- variant = "unknown"
- }
-
- return variant
- }
-
- variant, err := getCPUInfo("Cpu architecture")
- if err != nil {
- log.L.WithError(err).Error("failure getting variant")
- return ""
- }
-
- // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7")
- // https://www.raspberrypi.org/forums/viewtopic.php?t=12614
- if runtime.GOARCH == "arm" && variant == "7" {
- model, err := getCPUInfo("model name")
- if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
- variant = "6"
- }
- }
-
- switch strings.ToLower(variant) {
- case "8", "aarch64":
- variant = "v8"
- case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
- variant = "v7"
- case "6", "6tej":
- variant = "v6"
- case "5", "5t", "5te", "5tej":
- variant = "v5"
- case "4", "4t":
- variant = "v4"
- case "3":
- variant = "v3"
- default:
- variant = "unknown"
- }
-
- return variant
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go
deleted file mode 100644
index 6ede940..0000000
--- a/vendor/github.com/containerd/containerd/platforms/database.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "runtime"
- "strings"
-)
-
-// isLinuxOS returns true if the operating system is Linux.
-//
-// The OS value should be normalized before calling this function.
-func isLinuxOS(os string) bool {
- return os == "linux"
-}
-
-// These function are generated from https://golang.org/src/go/build/syslist.go.
-//
-// We use switch statements because they are slightly faster than map lookups
-// and use a little less memory.
-
-// isKnownOS returns true if we know about the operating system.
-//
-// The OS value should be normalized before calling this function.
-func isKnownOS(os string) bool {
- switch os {
- case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
- return true
- }
- return false
-}
-
-// isArmArch returns true if the architecture is ARM.
-//
-// The arch value should be normalized before being passed to this function.
-func isArmArch(arch string) bool {
- switch arch {
- case "arm", "arm64":
- return true
- }
- return false
-}
-
-// isKnownArch returns true if we know about the architecture.
-//
-// The arch value should be normalized before being passed to this function.
-func isKnownArch(arch string) bool {
- switch arch {
- case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
- return true
- }
- return false
-}
-
-func normalizeOS(os string) string {
- if os == "" {
- return runtime.GOOS
- }
- os = strings.ToLower(os)
-
- switch os {
- case "macos":
- os = "darwin"
- }
- return os
-}
-
-// normalizeArch normalizes the architecture.
-func normalizeArch(arch, variant string) (string, string) {
- arch, variant = strings.ToLower(arch), strings.ToLower(variant)
- switch arch {
- case "i386":
- arch = "386"
- variant = ""
- case "x86_64", "x86-64":
- arch = "amd64"
- variant = ""
- case "aarch64", "arm64":
- arch = "arm64"
- switch variant {
- case "8", "v8":
- variant = ""
- }
- case "armhf":
- arch = "arm"
- variant = "v7"
- case "armel":
- arch = "arm"
- variant = "v6"
- case "arm":
- switch variant {
- case "", "7":
- variant = "v7"
- case "5", "6", "8":
- variant = "v" + variant
- }
- }
-
- return arch, variant
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go
deleted file mode 100644
index cb77fbc..0000000
--- a/vendor/github.com/containerd/containerd/platforms/defaults.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "runtime"
-
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// DefaultString returns the default string specifier for the platform.
-func DefaultString() string {
- return Format(DefaultSpec())
-}
-
-// DefaultSpec returns the current platform's default platform specification.
-func DefaultSpec() specs.Platform {
- return specs.Platform{
- OS: runtime.GOOS,
- Architecture: runtime.GOARCH,
- // The Variant field will be empty if arch != ARM.
- Variant: cpuVariant(),
- }
-}
-
-// DefaultStrict returns strict form of Default.
-func DefaultStrict() MatchComparer {
- return OnlyStrict(DefaultSpec())
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go
deleted file mode 100644
index e8a7d5f..0000000
--- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-// Default returns the default matcher for the platform.
-func Default() MatchComparer {
- return Only(DefaultSpec())
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
deleted file mode 100644
index 0c380e3..0000000
--- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// +build windows
-
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-package platforms
-
-import (
- "fmt"
- "runtime"
- "strconv"
- "strings"
-
- imagespec "github.com/opencontainers/image-spec/specs-go/v1"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
- "golang.org/x/sys/windows"
-)
-
-type matchComparer struct {
- defaults Matcher
- osVersionPrefix string
-}
-
-// Match matches platform with the same windows major, minor
-// and build version.
-func (m matchComparer) Match(p imagespec.Platform) bool {
- if m.defaults.Match(p) {
- // TODO(windows): Figure out whether OSVersion is deprecated.
- return strings.HasPrefix(p.OSVersion, m.osVersionPrefix)
- }
- return false
-}
-
-// Less sorts matched platforms in front of other platforms.
-// For matched platforms, it puts platforms with larger revision
-// number in front.
-func (m matchComparer) Less(p1, p2 imagespec.Platform) bool {
- m1, m2 := m.Match(p1), m.Match(p2)
- if m1 && m2 {
- r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion)
- return r1 > r2
- }
- return m1 && !m2
-}
-
-func revision(v string) int {
- parts := strings.Split(v, ".")
- if len(parts) < 4 {
- return 0
- }
- r, err := strconv.Atoi(parts[3])
- if err != nil {
- return 0
- }
- return r
-}
-
-// Default returns the current platform's default platform specification.
-func Default() MatchComparer {
- major, minor, build := windows.RtlGetNtVersionNumbers()
- return matchComparer{
- defaults: Ordered(DefaultSpec(), specs.Platform{
- OS: "linux",
- Architecture: runtime.GOARCH,
- }),
- osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build),
- }
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go
deleted file mode 100644
index 088bdea..0000000
--- a/vendor/github.com/containerd/containerd/platforms/platforms.go
+++ /dev/null
@@ -1,278 +0,0 @@
-/*
- Copyright The containerd Authors.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-*/
-
-// Package platforms provides a toolkit for normalizing, matching and
-// specifying container platforms.
-//
-// Centered around OCI platform specifications, we define a string-based
-// specifier syntax that can be used for user input. With a specifier, users
-// only need to specify the parts of the platform that are relevant to their
-// context, providing an operating system or architecture or both.
-//
-// How do I use this package?
-//
-// The vast majority of use cases should simply use the match function with
-// user input. The first step is to parse a specifier into a matcher:
-//
-// m, err := Parse("linux")
-// if err != nil { ... }
-//
-// Once you have a matcher, use it to match against the platform declared by a
-// component, typically from an image or runtime. Since extracting an images
-// platform is a little more involved, we'll use an example against the
-// platform default:
-//
-// if ok := m.Match(Default()); !ok { /* doesn't match */ }
-//
-// This can be composed in loops for resolving runtimes or used as a filter for
-// fetch and select images.
-//
-// More details of the specifier syntax and platform spec follow.
-//
-// Declaring Platform Support
-//
-// Components that have strict platform requirements should use the OCI
-// platform specification to declare their support. Typically, this will be
-// images and runtimes that should make these declaring which platform they
-// support specifically. This looks roughly as follows:
-//
-// type Platform struct {
-// Architecture string
-// OS string
-// Variant string
-// }
-//
-// Most images and runtimes should at least set Architecture and OS, according
-// to their GOARCH and GOOS values, respectively (follow the OCI image
-// specification when in doubt). ARM should set variant under certain
-// discussions, which are outlined below.
-//
-// Platform Specifiers
-//
-// While the OCI platform specifications provide a tool for components to
-// specify structured information, user input typically doesn't need the full
-// context and much can be inferred. To solve this problem, we introduced
-// "specifiers". A specifier has the format
-// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the
-// operating system or the architecture or both.
-//
-// An example of a common specifier is `linux/amd64`. If the host has a default
-// of runtime that matches this, the user can simply provide the component that
-// matters. For example, if a image provides amd64 and arm64 support, the
-// operating system, `linux` can be inferred, so they only have to provide
-// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
-// where the architecture may be known but a runtime may support images from
-// different operating systems.
-//
-// Normalization
-//
-// Because not all users are familiar with the way the Go runtime represents
-// platforms, several normalizations have been provided to make this package
-// easier to user.
-//
-// The following are performed for architectures:
-//
-// Value Normalized
-// aarch64 arm64
-// armhf arm
-// armel arm/v6
-// i386 386
-// x86_64 amd64
-// x86-64 amd64
-//
-// We also normalize the operating system `macos` to `darwin`.
-//
-// ARM Support
-//
-// To qualify ARM architecture, the Variant field is used to qualify the arm
-// version. The most common arm version, v7, is represented without the variant
-// unless it is explicitly provided. This is treated as equivalent to armhf. A
-// previous architecture, armel, will be normalized to arm/v6.
-//
-// While these normalizations are provided, their support on arm platforms has
-// not yet been fully implemented and tested.
-package platforms
-
-import (
- "regexp"
- "runtime"
- "strconv"
- "strings"
-
- "github.com/containerd/containerd/errdefs"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
- "github.com/pkg/errors"
-)
-
-var (
- specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
-)
-
-// Matcher matches platforms specifications, provided by an image or runtime.
-type Matcher interface {
- Match(platform specs.Platform) bool
-}
-
-// NewMatcher returns a simple matcher based on the provided platform
-// specification. The returned matcher only looks for equality based on os,
-// architecture and variant.
-//
-// One may implement their own matcher if this doesn't provide the required
-// functionality.
-//
-// Applications should opt to use `Match` over directly parsing specifiers.
-func NewMatcher(platform specs.Platform) Matcher {
- return &matcher{
- Platform: Normalize(platform),
- }
-}
-
-type matcher struct {
- specs.Platform
-}
-
-func (m *matcher) Match(platform specs.Platform) bool {
- normalized := Normalize(platform)
- return m.OS == normalized.OS &&
- m.Architecture == normalized.Architecture &&
- m.Variant == normalized.Variant
-}
-
-func (m *matcher) String() string {
- return Format(m.Platform)
-}
-
-// Parse parses the platform specifier syntax into a platform declaration.
-//
-// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
-// The minimum required information for a platform specifier is the operating
-// system or architecture. If there is only a single string (no slashes), the
-// value will be matched against the known set of operating systems, then fall
-// back to the known set of architectures. The missing component will be
-// inferred based on the local environment.
-func Parse(specifier string) (specs.Platform, error) {
- if strings.Contains(specifier, "*") {
- // TODO(stevvooe): need to work out exact wildcard handling
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
- }
-
- parts := strings.Split(specifier, "/")
-
- for _, part := range parts {
- if !specifierRe.MatchString(part) {
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
- }
- }
-
- var p specs.Platform
- switch len(parts) {
- case 1:
- // in this case, we will test that the value might be an OS, then look
- // it up. If it is not known, we'll treat it as an architecture. Since
- // we have very little information about the platform here, we are
- // going to be a little more strict if we don't know about the argument
- // value.
- p.OS = normalizeOS(parts[0])
- if isKnownOS(p.OS) {
- // picks a default architecture
- p.Architecture = runtime.GOARCH
- if p.Architecture == "arm" && cpuVariant() != "v7" {
- p.Variant = cpuVariant()
- }
-
- return p, nil
- }
-
- p.Architecture, p.Variant = normalizeArch(parts[0], "")
- if p.Architecture == "arm" && p.Variant == "v7" {
- p.Variant = ""
- }
- if isKnownArch(p.Architecture) {
- p.OS = runtime.GOOS
- return p, nil
- }
-
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
- case 2:
- // In this case, we treat as a regular os/arch pair. We don't care
- // about whether or not we know of the platform.
- p.OS = normalizeOS(parts[0])
- p.Architecture, p.Variant = normalizeArch(parts[1], "")
- if p.Architecture == "arm" && p.Variant == "v7" {
- p.Variant = ""
- }
-
- return p, nil
- case 3:
- // we have a fully specified variant, this is rare
- p.OS = normalizeOS(parts[0])
- p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
- if p.Architecture == "arm64" && p.Variant == "" {
- p.Variant = "v8"
- }
-
- return p, nil
- }
-
- return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
-}
-
-// MustParse is like Parses but panics if the specifier cannot be parsed.
-// Simplifies initialization of global variables.
-func MustParse(specifier string) specs.Platform {
- p, err := Parse(specifier)
- if err != nil {
- panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
- }
- return p
-}
-
-// Format returns a string specifier from the provided platform specification.
-func Format(platform specs.Platform) string {
- if platform.OS == "" {
- return "unknown"
- }
-
- return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
-}
-
-func joinNotEmpty(s ...string) string {
- var ss []string
- for _, s := range s {
- if s == "" {
- continue
- }
-
- ss = append(ss, s)
- }
-
- return strings.Join(ss, "/")
-}
-
-// Normalize validates and translate the platform to the canonical value.
-//
-// For example, if "Aarch64" is encountered, we change it to "arm64" or if
-// "x86_64" is encountered, it becomes "amd64".
-func Normalize(platform specs.Platform) specs.Platform {
- platform.OS = normalizeOS(platform.OS)
- platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
-
- // these fields are deprecated, remove them
- platform.OSFeatures = nil
- platform.OSVersion = ""
-
- return platform
-}
diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE
deleted file mode 100644
index e06d208..0000000
--- a/vendor/github.com/docker/distribution/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go
deleted file mode 100644
index 71327dc..0000000
--- a/vendor/github.com/docker/distribution/digestset/set.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package digestset
-
-import (
- "errors"
- "sort"
- "strings"
- "sync"
-
- digest "github.com/opencontainers/go-digest"
-)
-
-var (
- // ErrDigestNotFound is used when a matching digest
- // could not be found in a set.
- ErrDigestNotFound = errors.New("digest not found")
-
- // ErrDigestAmbiguous is used when multiple digests
- // are found in a set. None of the matching digests
- // should be considered valid matches.
- ErrDigestAmbiguous = errors.New("ambiguous digest string")
-)
-
-// Set is used to hold a unique set of digests which
-// may be easily referenced by easily referenced by a string
-// representation of the digest as well as short representation.
-// The uniqueness of the short representation is based on other
-// digests in the set. If digests are omitted from this set,
-// collisions in a larger set may not be detected, therefore it
-// is important to always do short representation lookups on
-// the complete set of digests. To mitigate collisions, an
-// appropriately long short code should be used.
-type Set struct {
- mutex sync.RWMutex
- entries digestEntries
-}
-
-// NewSet creates an empty set of digests
-// which may have digests added.
-func NewSet() *Set {
- return &Set{
- entries: digestEntries{},
- }
-}
-
-// checkShortMatch checks whether two digests match as either whole
-// values or short values. This function does not test equality,
-// rather whether the second value could match against the first
-// value.
-func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool {
- if len(hex) == len(shortHex) {
- if hex != shortHex {
- return false
- }
- if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- } else if !strings.HasPrefix(hex, shortHex) {
- return false
- } else if len(shortAlg) > 0 && string(alg) != shortAlg {
- return false
- }
- return true
-}
-
-// Lookup looks for a digest matching the given string representation.
-// If no digests could be found ErrDigestNotFound will be returned
-// with an empty digest value. If multiple matches are found
-// ErrDigestAmbiguous will be returned with an empty digest value.
-func (dst *Set) Lookup(d string) (digest.Digest, error) {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- if len(dst.entries) == 0 {
- return "", ErrDigestNotFound
- }
- var (
- searchFunc func(int) bool
- alg digest.Algorithm
- hex string
- )
- dgst, err := digest.Parse(d)
- if err == digest.ErrDigestInvalidFormat {
- hex = d
- searchFunc = func(i int) bool {
- return dst.entries[i].val >= d
- }
- } else {
- hex = dgst.Hex()
- alg = dgst.Algorithm()
- searchFunc = func(i int) bool {
- if dst.entries[i].val == hex {
- return dst.entries[i].alg >= alg
- }
- return dst.entries[i].val >= hex
- }
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) {
- return "", ErrDigestNotFound
- }
- if dst.entries[idx].alg == alg && dst.entries[idx].val == hex {
- return dst.entries[idx].digest, nil
- }
- if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) {
- return "", ErrDigestAmbiguous
- }
-
- return dst.entries[idx].digest, nil
-}
-
-// Add adds the given digest to the set. An error will be returned
-// if the given digest is invalid. If the digest already exists in the
-// set, this operation will be a no-op.
-func (dst *Set) Add(d digest.Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- if idx == len(dst.entries) {
- dst.entries = append(dst.entries, entry)
- return nil
- } else if dst.entries[idx].digest == d {
- return nil
- }
-
- entries := append(dst.entries, nil)
- copy(entries[idx+1:], entries[idx:len(entries)-1])
- entries[idx] = entry
- dst.entries = entries
- return nil
-}
-
-// Remove removes the given digest from the set. An err will be
-// returned if the given digest is invalid. If the digest does
-// not exist in the set, this operation will be a no-op.
-func (dst *Set) Remove(d digest.Digest) error {
- if err := d.Validate(); err != nil {
- return err
- }
- dst.mutex.Lock()
- defer dst.mutex.Unlock()
- entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d}
- searchFunc := func(i int) bool {
- if dst.entries[i].val == entry.val {
- return dst.entries[i].alg >= entry.alg
- }
- return dst.entries[i].val >= entry.val
- }
- idx := sort.Search(len(dst.entries), searchFunc)
- // Not found if idx is after or value at idx is not digest
- if idx == len(dst.entries) || dst.entries[idx].digest != d {
- return nil
- }
-
- entries := dst.entries
- copy(entries[idx:], entries[idx+1:])
- entries = entries[:len(entries)-1]
- dst.entries = entries
-
- return nil
-}
-
-// All returns all the digests in the set
-func (dst *Set) All() []digest.Digest {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- retValues := make([]digest.Digest, len(dst.entries))
- for i := range dst.entries {
- retValues[i] = dst.entries[i].digest
- }
-
- return retValues
-}
-
-// ShortCodeTable returns a map of Digest to unique short codes. The
-// length represents the minimum value, the maximum length may be the
-// entire value of digest if uniqueness cannot be achieved without the
-// full value. This function will attempt to make short codes as short
-// as possible to be unique.
-func ShortCodeTable(dst *Set, length int) map[digest.Digest]string {
- dst.mutex.RLock()
- defer dst.mutex.RUnlock()
- m := make(map[digest.Digest]string, len(dst.entries))
- l := length
- resetIdx := 0
- for i := 0; i < len(dst.entries); i++ {
- var short string
- extended := true
- for extended {
- extended = false
- if len(dst.entries[i].val) <= l {
- short = dst.entries[i].digest.String()
- } else {
- short = dst.entries[i].val[:l]
- for j := i + 1; j < len(dst.entries); j++ {
- if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) {
- if j > resetIdx {
- resetIdx = j
- }
- extended = true
- } else {
- break
- }
- }
- if extended {
- l++
- }
- }
- }
- m[dst.entries[i].digest] = short
- if i >= resetIdx {
- l = length
- }
- }
- return m
-}
-
-type digestEntry struct {
- alg digest.Algorithm
- val string
- digest digest.Digest
-}
-
-type digestEntries []*digestEntry
-
-func (d digestEntries) Len() int {
- return len(d)
-}
-
-func (d digestEntries) Less(i, j int) bool {
- if d[i].val != d[j].val {
- return d[i].val < d[j].val
- }
- return d[i].alg < d[j].alg
-}
-
-func (d digestEntries) Swap(i, j int) {
- d[i], d[j] = d[j], d[i]
-}
diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go
deleted file mode 100644
index 978df7e..0000000
--- a/vendor/github.com/docker/distribution/reference/helpers.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package reference
-
-import "path"
-
-// IsNameOnly returns true if reference only contains a repo name.
-func IsNameOnly(ref Named) bool {
- if _, ok := ref.(NamedTagged); ok {
- return false
- }
- if _, ok := ref.(Canonical); ok {
- return false
- }
- return true
-}
-
-// FamiliarName returns the familiar name string
-// for the given named, familiarizing if needed.
-func FamiliarName(ref Named) string {
- if nn, ok := ref.(normalizedNamed); ok {
- return nn.Familiar().Name()
- }
- return ref.Name()
-}
-
-// FamiliarString returns the familiar string representation
-// for the given reference, familiarizing if needed.
-func FamiliarString(ref Reference) string {
- if nn, ok := ref.(normalizedNamed); ok {
- return nn.Familiar().String()
- }
- return ref.String()
-}
-
-// FamiliarMatch reports whether ref matches the specified pattern.
-// See https://godoc.org/path#Match for supported patterns.
-func FamiliarMatch(pattern string, ref Reference) (bool, error) {
- matched, err := path.Match(pattern, FamiliarString(ref))
- if namedRef, isNamed := ref.(Named); isNamed && !matched {
- matched, _ = path.Match(pattern, FamiliarName(namedRef))
- }
- return matched, err
-}
diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go
deleted file mode 100644
index 2d71fc5..0000000
--- a/vendor/github.com/docker/distribution/reference/normalize.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package reference
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/docker/distribution/digestset"
- "github.com/opencontainers/go-digest"
-)
-
-var (
- legacyDefaultDomain = "index.docker.io"
- defaultDomain = "docker.io"
- officialRepoName = "library"
- defaultTag = "latest"
-)
-
-// normalizedNamed represents a name which has been
-// normalized and has a familiar form. A familiar name
-// is what is used in Docker UI. An example normalized
-// name is "docker.io/library/ubuntu" and corresponding
-// familiar name of "ubuntu".
-type normalizedNamed interface {
- Named
- Familiar() Named
-}
-
-// ParseNormalizedNamed parses a string into a named reference
-// transforming a familiar name from Docker UI to a fully
-// qualified reference. If the value may be an identifier
-// use ParseAnyReference.
-func ParseNormalizedNamed(s string) (Named, error) {
- if ok := anchoredIdentifierRegexp.MatchString(s); ok {
- return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
- }
- domain, remainder := splitDockerDomain(s)
- var remoteName string
- if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
- remoteName = remainder[:tagSep]
- } else {
- remoteName = remainder
- }
- if strings.ToLower(remoteName) != remoteName {
- return nil, errors.New("invalid reference format: repository name must be lowercase")
- }
-
- ref, err := Parse(domain + "/" + remainder)
- if err != nil {
- return nil, err
- }
- named, isNamed := ref.(Named)
- if !isNamed {
- return nil, fmt.Errorf("reference %s has no name", ref.String())
- }
- return named, nil
-}
-
-// splitDockerDomain splits a repository name to domain and remotename string.
-// If no valid domain is found, the default domain is used. Repository name
-// needs to be already validated before.
-func splitDockerDomain(name string) (domain, remainder string) {
- i := strings.IndexRune(name, '/')
- if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
- domain, remainder = defaultDomain, name
- } else {
- domain, remainder = name[:i], name[i+1:]
- }
- if domain == legacyDefaultDomain {
- domain = defaultDomain
- }
- if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
- remainder = officialRepoName + "/" + remainder
- }
- return
-}
-
-// familiarizeName returns a shortened version of the name familiar
-// to to the Docker UI. Familiar names have the default domain
-// "docker.io" and "library/" repository prefix removed.
-// For example, "docker.io/library/redis" will have the familiar
-// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
-// Returns a familiarized named only reference.
-func familiarizeName(named namedRepository) repository {
- repo := repository{
- domain: named.Domain(),
- path: named.Path(),
- }
-
- if repo.domain == defaultDomain {
- repo.domain = ""
- // Handle official repositories which have the pattern "library/<official repo name>"
- if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
- repo.path = split[1]
- }
- }
- return repo
-}
-
-func (r reference) Familiar() Named {
- return reference{
- namedRepository: familiarizeName(r.namedRepository),
- tag: r.tag,
- digest: r.digest,
- }
-}
-
-func (r repository) Familiar() Named {
- return familiarizeName(r)
-}
-
-func (t taggedReference) Familiar() Named {
- return taggedReference{
- namedRepository: familiarizeName(t.namedRepository),
- tag: t.tag,
- }
-}
-
-func (c canonicalReference) Familiar() Named {
- return canonicalReference{
- namedRepository: familiarizeName(c.namedRepository),
- digest: c.digest,
- }
-}
-
-// TagNameOnly adds the default tag "latest" to a reference if it only has
-// a repo name.
-func TagNameOnly(ref Named) Named {
- if IsNameOnly(ref) {
- namedTagged, err := WithTag(ref, defaultTag)
- if err != nil {
- // Default tag must be valid, to create a NamedTagged
- // type with non-validated input the WithTag function
- // should be used instead
- panic(err)
- }
- return namedTagged
- }
- return ref
-}
-
-// ParseAnyReference parses a reference string as a possible identifier,
-// full digest, or familiar name.
-func ParseAnyReference(ref string) (Reference, error) {
- if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
- return digestReference("sha256:" + ref), nil
- }
- if dgst, err := digest.Parse(ref); err == nil {
- return digestReference(dgst), nil
- }
-
- return ParseNormalizedNamed(ref)
-}
-
-// ParseAnyReferenceWithSet parses a reference string as a possible short
-// identifier to be matched in a digest set, a full digest, or familiar name.
-func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
- if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
- dgst, err := ds.Lookup(ref)
- if err == nil {
- return digestReference(dgst), nil
- }
- } else {
- if dgst, err := digest.Parse(ref); err == nil {
- return digestReference(dgst), nil
- }
- }
-
- return ParseNormalizedNamed(ref)
-}
diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go
deleted file mode 100644
index 2f66cca..0000000
--- a/vendor/github.com/docker/distribution/reference/reference.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Package reference provides a general type to represent any way of referencing images within the registry.
-// Its main purpose is to abstract tags and digests (content-addressable hash).
-//
-// Grammar
-//
-// reference := name [ ":" tag ] [ "@" digest ]
-// name := [domain '/'] path-component ['/' path-component]*
-// domain := domain-component ['.' domain-component]* [':' port-number]
-// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
-// port-number := /[0-9]+/
-// path-component := alpha-numeric [separator alpha-numeric]*
-// alpha-numeric := /[a-z0-9]+/
-// separator := /[_.]|__|[-]*/
-//
-// tag := /[\w][\w.-]{0,127}/
-//
-// digest := digest-algorithm ":" digest-hex
-// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
-// digest-algorithm-separator := /[+.-_]/
-// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
-// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
-//
-// identifier := /[a-f0-9]{64}/
-// short-identifier := /[a-f0-9]{6,64}/
-package reference
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/opencontainers/go-digest"
-)
-
-const (
- // NameTotalLengthMax is the maximum total number of characters in a repository name.
- NameTotalLengthMax = 255
-)
-
-var (
- // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
- ErrReferenceInvalidFormat = errors.New("invalid reference format")
-
- // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
- ErrTagInvalidFormat = errors.New("invalid tag format")
-
- // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
- ErrDigestInvalidFormat = errors.New("invalid digest format")
-
- // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
- ErrNameContainsUppercase = errors.New("repository name must be lowercase")
-
- // ErrNameEmpty is returned for empty, invalid repository names.
- ErrNameEmpty = errors.New("repository name must have at least one component")
-
- // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
- ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
-
- // ErrNameNotCanonical is returned when a name is not canonical.
- ErrNameNotCanonical = errors.New("repository name must be canonical")
-)
-
-// Reference is an opaque object reference identifier that may include
-// modifiers such as a hostname, name, tag, and digest.
-type Reference interface {
- // String returns the full reference
- String() string
-}
-
-// Field provides a wrapper type for resolving correct reference types when
-// working with encoding.
-type Field struct {
- reference Reference
-}
-
-// AsField wraps a reference in a Field for encoding.
-func AsField(reference Reference) Field {
- return Field{reference}
-}
-
-// Reference unwraps the reference type from the field to
-// return the Reference object. This object should be
-// of the appropriate type to further check for different
-// reference types.
-func (f Field) Reference() Reference {
- return f.reference
-}
-
-// MarshalText serializes the field to byte text which
-// is the string of the reference.
-func (f Field) MarshalText() (p []byte, err error) {
- return []byte(f.reference.String()), nil
-}
-
-// UnmarshalText parses text bytes by invoking the
-// reference parser to ensure the appropriately
-// typed reference object is wrapped by field.
-func (f *Field) UnmarshalText(p []byte) error {
- r, err := Parse(string(p))
- if err != nil {
- return err
- }
-
- f.reference = r
- return nil
-}
-
-// Named is an object with a full name
-type Named interface {
- Reference
- Name() string
-}
-
-// Tagged is an object which has a tag
-type Tagged interface {
- Reference
- Tag() string
-}
-
-// NamedTagged is an object including a name and tag.
-type NamedTagged interface {
- Named
- Tag() string
-}
-
-// Digested is an object which has a digest
-// in which it can be referenced by
-type Digested interface {
- Reference
- Digest() digest.Digest
-}
-
-// Canonical reference is an object with a fully unique
-// name including a name with domain and digest
-type Canonical interface {
- Named
- Digest() digest.Digest
-}
-
-// namedRepository is a reference to a repository with a name.
-// A namedRepository has both domain and path components.
-type namedRepository interface {
- Named
- Domain() string
- Path() string
-}
-
-// Domain returns the domain part of the Named reference
-func Domain(named Named) string {
- if r, ok := named.(namedRepository); ok {
- return r.Domain()
- }
- domain, _ := splitDomain(named.Name())
- return domain
-}
-
-// Path returns the name without the domain part of the Named reference
-func Path(named Named) (name string) {
- if r, ok := named.(namedRepository); ok {
- return r.Path()
- }
- _, path := splitDomain(named.Name())
- return path
-}
-
-func splitDomain(name string) (string, string) {
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if len(match) != 3 {
- return "", name
- }
- return match[1], match[2]
-}
-
-// SplitHostname splits a named reference into a
-// hostname and name string. If no valid hostname is
-// found, the hostname is empty and the full value
-// is returned as name
-// DEPRECATED: Use Domain or Path
-func SplitHostname(named Named) (string, string) {
- if r, ok := named.(namedRepository); ok {
- return r.Domain(), r.Path()
- }
- return splitDomain(named.Name())
-}
-
-// Parse parses s and returns a syntactically valid Reference.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: Parse will not handle short digests.
-func Parse(s string) (Reference, error) {
- matches := ReferenceRegexp.FindStringSubmatch(s)
- if matches == nil {
- if s == "" {
- return nil, ErrNameEmpty
- }
- if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
- return nil, ErrNameContainsUppercase
- }
- return nil, ErrReferenceInvalidFormat
- }
-
- if len(matches[1]) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- var repo repository
-
- nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
- if nameMatch != nil && len(nameMatch) == 3 {
- repo.domain = nameMatch[1]
- repo.path = nameMatch[2]
- } else {
- repo.domain = ""
- repo.path = matches[1]
- }
-
- ref := reference{
- namedRepository: repo,
- tag: matches[2],
- }
- if matches[3] != "" {
- var err error
- ref.digest, err = digest.Parse(matches[3])
- if err != nil {
- return nil, err
- }
- }
-
- r := getBestReferenceType(ref)
- if r == nil {
- return nil, ErrNameEmpty
- }
-
- return r, nil
-}
-
-// ParseNamed parses s and returns a syntactically valid reference implementing
-// the Named interface. The reference must have a name and be in the canonical
-// form, otherwise an error is returned.
-// If an error was encountered it is returned, along with a nil Reference.
-// NOTE: ParseNamed will not handle short digests.
-func ParseNamed(s string) (Named, error) {
- named, err := ParseNormalizedNamed(s)
- if err != nil {
- return nil, err
- }
- if named.String() != s {
- return nil, ErrNameNotCanonical
- }
- return named, nil
-}
-
-// WithName returns a named object representing the given string. If the input
-// is invalid ErrReferenceInvalidFormat will be returned.
-func WithName(name string) (Named, error) {
- if len(name) > NameTotalLengthMax {
- return nil, ErrNameTooLong
- }
-
- match := anchoredNameRegexp.FindStringSubmatch(name)
- if match == nil || len(match) != 3 {
- return nil, ErrReferenceInvalidFormat
- }
- return repository{
- domain: match[1],
- path: match[2],
- }, nil
-}
-
-// WithTag combines the name from "name" and the tag from "tag" to form a
-// reference incorporating both the name and the tag.
-func WithTag(name Named, tag string) (NamedTagged, error) {
- if !anchoredTagRegexp.MatchString(tag) {
- return nil, ErrTagInvalidFormat
- }
- var repo repository
- if r, ok := name.(namedRepository); ok {
- repo.domain = r.Domain()
- repo.path = r.Path()
- } else {
- repo.path = name.Name()
- }
- if canonical, ok := name.(Canonical); ok {
- return reference{
- namedRepository: repo,
- tag: tag,
- digest: canonical.Digest(),
- }, nil
- }
- return taggedReference{
- namedRepository: repo,
- tag: tag,
- }, nil
-}
-
-// WithDigest combines the name from "name" and the digest from "digest" to form
-// a reference incorporating both the name and the digest.
-func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
- if !anchoredDigestRegexp.MatchString(digest.String()) {
- return nil, ErrDigestInvalidFormat
- }
- var repo repository
- if r, ok := name.(namedRepository); ok {
- repo.domain = r.Domain()
- repo.path = r.Path()
- } else {
- repo.path = name.Name()
- }
- if tagged, ok := name.(Tagged); ok {
- return reference{
- namedRepository: repo,
- tag: tagged.Tag(),
- digest: digest,
- }, nil
- }
- return canonicalReference{
- namedRepository: repo,
- digest: digest,
- }, nil
-}
-
-// TrimNamed removes any tag or digest from the named reference.
-func TrimNamed(ref Named) Named {
- domain, path := SplitHostname(ref)
- return repository{
- domain: domain,
- path: path,
- }
-}
-
-func getBestReferenceType(ref reference) Reference {
- if ref.Name() == "" {
- // Allow digest only references
- if ref.digest != "" {
- return digestReference(ref.digest)
- }
- return nil
- }
- if ref.tag == "" {
- if ref.digest != "" {
- return canonicalReference{
- namedRepository: ref.namedRepository,
- digest: ref.digest,
- }
- }
- return ref.namedRepository
- }
- if ref.digest == "" {
- return taggedReference{
- namedRepository: ref.namedRepository,
- tag: ref.tag,
- }
- }
-
- return ref
-}
-
-type reference struct {
- namedRepository
- tag string
- digest digest.Digest
-}
-
-func (r reference) String() string {
- return r.Name() + ":" + r.tag + "@" + r.digest.String()
-}
-
-func (r reference) Tag() string {
- return r.tag
-}
-
-func (r reference) Digest() digest.Digest {
- return r.digest
-}
-
-type repository struct {
- domain string
- path string
-}
-
-func (r repository) String() string {
- return r.Name()
-}
-
-func (r repository) Name() string {
- if r.domain == "" {
- return r.path
- }
- return r.domain + "/" + r.path
-}
-
-func (r repository) Domain() string {
- return r.domain
-}
-
-func (r repository) Path() string {
- return r.path
-}
-
-type digestReference digest.Digest
-
-func (d digestReference) String() string {
- return digest.Digest(d).String()
-}
-
-func (d digestReference) Digest() digest.Digest {
- return digest.Digest(d)
-}
-
-type taggedReference struct {
- namedRepository
- tag string
-}
-
-func (t taggedReference) String() string {
- return t.Name() + ":" + t.tag
-}
-
-func (t taggedReference) Tag() string {
- return t.tag
-}
-
-type canonicalReference struct {
- namedRepository
- digest digest.Digest
-}
-
-func (c canonicalReference) String() string {
- return c.Name() + "@" + c.digest.String()
-}
-
-func (c canonicalReference) Digest() digest.Digest {
- return c.digest
-}
diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go
deleted file mode 100644
index 7860349..0000000
--- a/vendor/github.com/docker/distribution/reference/regexp.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package reference
-
-import "regexp"
-
-var (
- // alphaNumericRegexp defines the alpha numeric atom, typically a
- // component of names. This only allows lower case characters and digits.
- alphaNumericRegexp = match(`[a-z0-9]+`)
-
- // separatorRegexp defines the separators allowed to be embedded in name
- // components. This allow one period, one or two underscore and multiple
- // dashes.
- separatorRegexp = match(`(?:[._]|__|[-]*)`)
-
- // nameComponentRegexp restricts registry path component names to start
- // with at least one letter or number, with following parts able to be
- // separated by one period, one or two underscore and multiple dashes.
- nameComponentRegexp = expression(
- alphaNumericRegexp,
- optional(repeated(separatorRegexp, alphaNumericRegexp)))
-
- // domainComponentRegexp restricts the registry domain component of a
- // repository name to start with a component as defined by DomainRegexp
- // and followed by an optional port.
- domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
-
- // DomainRegexp defines the structure of potential domain components
- // that may be part of image names. This is purposely a subset of what is
- // allowed by DNS to ensure backwards compatibility with Docker image
- // names.
- DomainRegexp = expression(
- domainComponentRegexp,
- optional(repeated(literal(`.`), domainComponentRegexp)),
- optional(literal(`:`), match(`[0-9]+`)))
-
- // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
- TagRegexp = match(`[\w][\w.-]{0,127}`)
-
- // anchoredTagRegexp matches valid tag names, anchored at the start and
- // end of the matched string.
- anchoredTagRegexp = anchored(TagRegexp)
-
- // DigestRegexp matches valid digests.
- DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
-
- // anchoredDigestRegexp matches valid digests, anchored at the start and
- // end of the matched string.
- anchoredDigestRegexp = anchored(DigestRegexp)
-
- // NameRegexp is the format for the name component of references. The
- // regexp has capturing groups for the domain and name part omitting
- // the separating forward slash from either.
- NameRegexp = expression(
- optional(DomainRegexp, literal(`/`)),
- nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp)))
-
- // anchoredNameRegexp is used to parse a name value, capturing the
- // domain and trailing components.
- anchoredNameRegexp = anchored(
- optional(capture(DomainRegexp), literal(`/`)),
- capture(nameComponentRegexp,
- optional(repeated(literal(`/`), nameComponentRegexp))))
-
- // ReferenceRegexp is the full supported format of a reference. The regexp
- // is anchored and has capturing groups for name, tag, and digest
- // components.
- ReferenceRegexp = anchored(capture(NameRegexp),
- optional(literal(":"), capture(TagRegexp)),
- optional(literal("@"), capture(DigestRegexp)))
-
- // IdentifierRegexp is the format for string identifier used as a
- // content addressable identifier using sha256. These identifiers
- // are like digests without the algorithm, since sha256 is used.
- IdentifierRegexp = match(`([a-f0-9]{64})`)
-
- // ShortIdentifierRegexp is the format used to represent a prefix
- // of an identifier. A prefix may be used to match a sha256 identifier
- // within a list of trusted identifiers.
- ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
-
- // anchoredIdentifierRegexp is used to check or match an
- // identifier value, anchored at start and end of string.
- anchoredIdentifierRegexp = anchored(IdentifierRegexp)
-
- // anchoredShortIdentifierRegexp is used to check if a value
- // is a possible identifier prefix, anchored at start and end
- // of string.
- anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
-)
-
-// match compiles the string to a regular expression.
-var match = regexp.MustCompile
-
-// literal compiles s into a literal regular expression, escaping any regexp
-// reserved characters.
-func literal(s string) *regexp.Regexp {
- re := match(regexp.QuoteMeta(s))
-
- if _, complete := re.LiteralPrefix(); !complete {
- panic("must be a literal")
- }
-
- return re
-}
-
-// expression defines a full expression, where each regular expression must
-// follow the previous.
-func expression(res ...*regexp.Regexp) *regexp.Regexp {
- var s string
- for _, re := range res {
- s += re.String()
- }
-
- return match(s)
-}
-
-// optional wraps the expression in a non-capturing group and makes the
-// production optional.
-func optional(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `?`)
-}
-
-// repeated wraps the regexp in a non-capturing group to get one or more
-// matches.
-func repeated(res ...*regexp.Regexp) *regexp.Regexp {
- return match(group(expression(res...)).String() + `+`)
-}
-
-// group wraps the regexp in a non-capturing group.
-func group(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(?:` + expression(res...).String() + `)`)
-}
-
-// capture wraps the expression in a capturing group.
-func capture(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`(` + expression(res...).String() + `)`)
-}
-
-// anchored anchors the regular expression by adding start and end delimiters.
-func anchored(res ...*regexp.Regexp) *regexp.Regexp {
- return match(`^` + expression(res...).String() + `$`)
-}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
deleted file mode 100644
index 6d9bb4b..0000000
--- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package errcode
-
-import (
- "encoding/json"
- "fmt"
- "strings"
-)
-
-// ErrorCoder is the base interface for ErrorCode and Error allowing
-// users of each to just call ErrorCode to get the real ID of each
-type ErrorCoder interface {
- ErrorCode() ErrorCode
-}
-
-// ErrorCode represents the error type. The errors are serialized via strings
-// and the integer format may change and should *never* be exported.
-type ErrorCode int
-
-var _ error = ErrorCode(0)
-
-// ErrorCode just returns itself
-func (ec ErrorCode) ErrorCode() ErrorCode {
- return ec
-}
-
-// Error returns the ID/Value
-func (ec ErrorCode) Error() string {
- // NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
- return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
-}
-
-// Descriptor returns the descriptor for the error code.
-func (ec ErrorCode) Descriptor() ErrorDescriptor {
- d, ok := errorCodeToDescriptors[ec]
-
- if !ok {
- return ErrorCodeUnknown.Descriptor()
- }
-
- return d
-}
-
-// String returns the canonical identifier for this error code.
-func (ec ErrorCode) String() string {
- return ec.Descriptor().Value
-}
-
-// Message returned the human-readable error message for this error code.
-func (ec ErrorCode) Message() string {
- return ec.Descriptor().Message
-}
-
-// MarshalText encodes the receiver into UTF-8-encoded text and returns the
-// result.
-func (ec ErrorCode) MarshalText() (text []byte, err error) {
- return []byte(ec.String()), nil
-}
-
-// UnmarshalText decodes the form generated by MarshalText.
-func (ec *ErrorCode) UnmarshalText(text []byte) error {
- desc, ok := idToDescriptors[string(text)]
-
- if !ok {
- desc = ErrorCodeUnknown.Descriptor()
- }
-
- *ec = desc.Code
-
- return nil
-}
-
-// WithMessage creates a new Error struct based on the passed-in info and
-// overrides the Message property.
-func (ec ErrorCode) WithMessage(message string) Error {
- return Error{
- Code: ec,
- Message: message,
- }
-}
-
-// WithDetail creates a new Error struct based on the passed-in info and
-// set the Detail property appropriately
-func (ec ErrorCode) WithDetail(detail interface{}) Error {
- return Error{
- Code: ec,
- Message: ec.Message(),
- }.WithDetail(detail)
-}
-
-// WithArgs creates a new Error struct and sets the Args slice
-func (ec ErrorCode) WithArgs(args ...interface{}) Error {
- return Error{
- Code: ec,
- Message: ec.Message(),
- }.WithArgs(args...)
-}
-
-// Error provides a wrapper around ErrorCode with extra Details provided.
-type Error struct {
- Code ErrorCode `json:"code"`
- Message string `json:"message"`
- Detail interface{} `json:"detail,omitempty"`
-
- // TODO(duglin): See if we need an "args" property so we can do the
- // variable substitution right before showing the message to the user
-}
-
-var _ error = Error{}
-
-// ErrorCode returns the ID/Value of this Error
-func (e Error) ErrorCode() ErrorCode {
- return e.Code
-}
-
-// Error returns a human readable representation of the error.
-func (e Error) Error() string {
- return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
-}
-
-// WithDetail will return a new Error, based on the current one, but with
-// some Detail info added
-func (e Error) WithDetail(detail interface{}) Error {
- return Error{
- Code: e.Code,
- Message: e.Message,
- Detail: detail,
- }
-}
-
-// WithArgs uses the passed-in list of interface{} as the substitution
-// variables in the Error's Message string, but returns a new Error
-func (e Error) WithArgs(args ...interface{}) Error {
- return Error{
- Code: e.Code,
- Message: fmt.Sprintf(e.Code.Message(), args...),
- Detail: e.Detail,
- }
-}
-
-// ErrorDescriptor provides relevant information about a given error code.
-type ErrorDescriptor struct {
- // Code is the error code that this descriptor describes.
- Code ErrorCode
-
- // Value provides a unique, string key, often captilized with
- // underscores, to identify the error code. This value is used as the
- // keyed value when serializing api errors.
- Value string
-
- // Message is a short, human readable decription of the error condition
- // included in API responses.
- Message string
-
- // Description provides a complete account of the errors purpose, suitable
- // for use in documentation.
- Description string
-
- // HTTPStatusCode provides the http status code that is associated with
- // this error condition.
- HTTPStatusCode int
-}
-
-// ParseErrorCode returns the value by the string error code.
-// `ErrorCodeUnknown` will be returned if the error is not known.
-func ParseErrorCode(value string) ErrorCode {
- ed, ok := idToDescriptors[value]
- if ok {
- return ed.Code
- }
-
- return ErrorCodeUnknown
-}
-
-// Errors provides the envelope for multiple errors and a few sugar methods
-// for use within the application.
-type Errors []error
-
-var _ error = Errors{}
-
-func (errs Errors) Error() string {
- switch len(errs) {
- case 0:
- return "<nil>"
- case 1:
- return errs[0].Error()
- default:
- msg := "errors:\n"
- for _, err := range errs {
- msg += err.Error() + "\n"
- }
- return msg
- }
-}
-
-// Len returns the current number of errors.
-func (errs Errors) Len() int {
- return len(errs)
-}
-
-// MarshalJSON converts slice of error, ErrorCode or Error into a
-// slice of Error - then serializes
-func (errs Errors) MarshalJSON() ([]byte, error) {
- var tmpErrs struct {
- Errors []Error `json:"errors,omitempty"`
- }
-
- for _, daErr := range errs {
- var err Error
-
- switch daErr.(type) {
- case ErrorCode:
- err = daErr.(ErrorCode).WithDetail(nil)
- case Error:
- err = daErr.(Error)
- default:
- err = ErrorCodeUnknown.WithDetail(daErr)
-
- }
-
- // If the Error struct was setup and they forgot to set the
- // Message field (meaning its "") then grab it from the ErrCode
- msg := err.Message
- if msg == "" {
- msg = err.Code.Message()
- }
-
- tmpErrs.Errors = append(tmpErrs.Errors, Error{
- Code: err.Code,
- Message: msg,
- Detail: err.Detail,
- })
- }
-
- return json.Marshal(tmpErrs)
-}
-
-// UnmarshalJSON deserializes []Error and then converts it into slice of
-// Error or ErrorCode
-func (errs *Errors) UnmarshalJSON(data []byte) error {
- var tmpErrs struct {
- Errors []Error
- }
-
- if err := json.Unmarshal(data, &tmpErrs); err != nil {
- return err
- }
-
- var newErrs Errors
- for _, daErr := range tmpErrs.Errors {
- // If Message is empty or exactly matches the Code's message string
- // then just use the Code, no need for a full Error struct
- if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
- // Error's w/o details get converted to ErrorCode
- newErrs = append(newErrs, daErr.Code)
- } else {
- // Error's w/ details are untouched
- newErrs = append(newErrs, Error{
- Code: daErr.Code,
- Message: daErr.Message,
- Detail: daErr.Detail,
- })
- }
- }
-
- *errs = newErrs
- return nil
-}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
deleted file mode 100644
index d77e704..0000000
--- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package errcode
-
-import (
- "encoding/json"
- "net/http"
-)
-
-// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
-// and sets the content-type header to 'application/json'. It will handle
-// ErrorCoder and Errors, and if necessary will create an envelope.
-func ServeJSON(w http.ResponseWriter, err error) error {
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- var sc int
-
- switch errs := err.(type) {
- case Errors:
- if len(errs) < 1 {
- break
- }
-
- if err, ok := errs[0].(ErrorCoder); ok {
- sc = err.ErrorCode().Descriptor().HTTPStatusCode
- }
- case ErrorCoder:
- sc = errs.ErrorCode().Descriptor().HTTPStatusCode
- err = Errors{err} // create an envelope.
- default:
- // We just have an unhandled error type, so just place in an envelope
- // and move along.
- err = Errors{err}
- }
-
- if sc == 0 {
- sc = http.StatusInternalServerError
- }
-
- w.WriteHeader(sc)
-
- return json.NewEncoder(w).Encode(err)
-}
diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go
deleted file mode 100644
index d1e8826..0000000
--- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package errcode
-
-import (
- "fmt"
- "net/http"
- "sort"
- "sync"
-)
-
-var (
- errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
- idToDescriptors = map[string]ErrorDescriptor{}
- groupToDescriptors = map[string][]ErrorDescriptor{}
-)
-
-var (
- // ErrorCodeUnknown is a generic error that can be used as a last
- // resort if there is no situation-specific error message that can be used
- ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
- Value: "UNKNOWN",
- Message: "unknown error",
- Description: `Generic error returned when the error does not have an
- API classification.`,
- HTTPStatusCode: http.StatusInternalServerError,
- })
-
- // ErrorCodeUnsupported is returned when an operation is not supported.
- ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
- Value: "UNSUPPORTED",
- Message: "The operation is unsupported.",
- Description: `The operation was unsupported due to a missing
- implementation or invalid set of parameters.`,
- HTTPStatusCode: http.StatusMethodNotAllowed,
- })
-
- // ErrorCodeUnauthorized is returned if a request requires
- // authentication.
- ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
- Value: "UNAUTHORIZED",
- Message: "authentication required",
- Description: `The access controller was unable to authenticate
- the client. Often this will be accompanied by a
- Www-Authenticate HTTP response header indicating how to
- authenticate.`,
- HTTPStatusCode: http.StatusUnauthorized,
- })
-
- // ErrorCodeDenied is returned if a client does not have sufficient
- // permission to perform an action.
- ErrorCodeDenied = Register("errcode", ErrorDescriptor{
- Value: "DENIED",
- Message: "requested access to the resource is denied",
- Description: `The access controller denied access for the
- operation on a resource.`,
- HTTPStatusCode: http.StatusForbidden,
- })
-
- // ErrorCodeUnavailable provides a common error to report unavailability
- // of a service or endpoint.
- ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
- Value: "UNAVAILABLE",
- Message: "service unavailable",
- Description: "Returned when a service is not available",
- HTTPStatusCode: http.StatusServiceUnavailable,
- })
-
- // ErrorCodeTooManyRequests is returned if a client attempts too many
- // times to contact a service endpoint.
- ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{
- Value: "TOOMANYREQUESTS",
- Message: "too many requests",
- Description: `Returned when a client attempts to contact a
- service too many times`,
- HTTPStatusCode: http.StatusTooManyRequests,
- })
-)
-
-var nextCode = 1000
-var registerLock sync.Mutex
-
-// Register will make the passed-in error known to the environment and
-// return a new ErrorCode
-func Register(group string, descriptor ErrorDescriptor) ErrorCode {
- registerLock.Lock()
- defer registerLock.Unlock()
-
- descriptor.Code = ErrorCode(nextCode)
-
- if _, ok := idToDescriptors[descriptor.Value]; ok {
- panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
- }
- if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
- panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
- }
-
- groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
- errorCodeToDescriptors[descriptor.Code] = descriptor
- idToDescriptors[descriptor.Value] = descriptor
-
- nextCode++
- return descriptor.Code
-}
-
-type byValue []ErrorDescriptor
-
-func (a byValue) Len() int { return len(a) }
-func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
-
-// GetGroupNames returns the list of Error group names that are registered
-func GetGroupNames() []string {
- keys := []string{}
-
- for k := range groupToDescriptors {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- return keys
-}
-
-// GetErrorCodeGroup returns the named group of error descriptors
-func GetErrorCodeGroup(name string) []ErrorDescriptor {
- desc := groupToDescriptors[name]
- sort.Sort(byValue(desc))
- return desc
-}
-
-// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
-// registered, irrespective of what group they're in
-func GetErrorAllDescriptors() []ErrorDescriptor {
- result := []ErrorDescriptor{}
-
- for _, group := range GetGroupNames() {
- result = append(result, GetErrorCodeGroup(group)...)
- }
- sort.Sort(byValue(result))
- return result
-}
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
deleted file mode 100644
index dffacff..0000000
--- a/vendor/github.com/docker/docker/AUTHORS
+++ /dev/null
@@ -1,2175 +0,0 @@
-# This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `hack/generate-authors.sh`.
-
-Aanand Prasad <aanand.prasad@gmail.com>
-Aaron Davidson <aaron@databricks.com>
-Aaron Feng <aaron.feng@gmail.com>
-Aaron Hnatiw <aaron@griddio.com>
-Aaron Huslage <huslage@gmail.com>
-Aaron L. Xu <liker.xu@foxmail.com>
-Aaron Lehmann <aaron.lehmann@docker.com>
-Aaron Welch <welch@packet.net>
-Aaron.L.Xu <likexu@harmonycloud.cn>
-Abel Muiño <amuino@gmail.com>
-Abhijeet Kasurde <akasurde@redhat.com>
-Abhinandan Prativadi <abhi@docker.com>
-Abhinav Ajgaonkar <abhinav316@gmail.com>
-Abhishek Chanda <abhishek.becs@gmail.com>
-Abhishek Sharma <abhishek@asharma.me>
-Abin Shahab <ashahab@altiscale.com>
-Adam Avilla <aavilla@yp.com>
-Adam Dobrawy <naczelnik@jawnosc.tk>
-Adam Eijdenberg <adam.eijdenberg@gmail.com>
-Adam Kunk <adam.kunk@tiaa-cref.org>
-Adam Miller <admiller@redhat.com>
-Adam Mills <adam@armills.info>
-Adam Pointer <adam.pointer@skybettingandgaming.com>
-Adam Singer <financeCoding@gmail.com>
-Adam Walz <adam@adamwalz.net>
-Addam Hardy <addam.hardy@gmail.com>
-Aditi Rajagopal <arajagopal@us.ibm.com>
-Aditya <aditya@netroy.in>
-Adnan Khan <adnkha@amazon.com>
-Adolfo Ochagavía <aochagavia92@gmail.com>
-Adria Casas <adriacasas88@gmail.com>
-Adrian Moisey <adrian@changeover.za.net>
-Adrian Mouat <adrian.mouat@gmail.com>
-Adrian Oprea <adrian@codesi.nz>
-Adrien Folie <folie.adrien@gmail.com>
-Adrien Gallouët <adrien@gallouet.fr>
-Ahmed Kamal <email.ahmedkamal@googlemail.com>
-Ahmet Alp Balkan <ahmetb@microsoft.com>
-Aidan Feldman <aidan.feldman@gmail.com>
-Aidan Hobson Sayers <aidanhs@cantab.net>
-AJ Bowen <aj@soulshake.net>
-Ajey Charantimath <ajey.charantimath@gmail.com>
-ajneu <ajneu@users.noreply.github.com>
-Akash Gupta <akagup@microsoft.com>
-Akhil Mohan <akhil.mohan@mayadata.io>
-Akihiro Matsushima <amatsusbit@gmail.com>
-Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
-Akim Demaille <akim.demaille@docker.com>
-Akira Koyasu <mail@akirakoyasu.net>
-Akshay Karle <akshay.a.karle@gmail.com>
-Al Tobey <al@ooyala.com>
-alambike <alambike@gmail.com>
-Alan Hoyle <alan@alanhoyle.com>
-Alan Scherger <flyinprogrammer@gmail.com>
-Alan Thompson <cloojure@gmail.com>
-Albert Callarisa <shark234@gmail.com>
-Albert Zhang <zhgwenming@gmail.com>
-Albin Kerouanton <albin@akerouanton.name>
-Alejandro González Hevia <alejandrgh11@gmail.com>
-Aleksa Sarai <asarai@suse.de>
-Aleksandrs Fadins <aleks@s-ko.net>
-Alena Prokharchyk <alena@rancher.com>
-Alessandro Boch <aboch@tetrationanalytics.com>
-Alessio Biancalana <dottorblaster@gmail.com>
-Alex Chan <alex@alexwlchan.net>
-Alex Chen <alexchenunix@gmail.com>
-Alex Coventry <alx@empirical.com>
-Alex Crawford <alex.crawford@coreos.com>
-Alex Ellis <alexellis2@gmail.com>
-Alex Gaynor <alex.gaynor@gmail.com>
-Alex Goodman <wagoodman@gmail.com>
-Alex Olshansky <i@creagenics.com>
-Alex Samorukov <samm@os2.kiev.ua>
-Alex Warhawk <ax.warhawk@gmail.com>
-Alexander Artemenko <svetlyak.40wt@gmail.com>
-Alexander Boyd <alex@opengroove.org>
-Alexander Larsson <alexl@redhat.com>
-Alexander Midlash <amidlash@docker.com>
-Alexander Morozov <lk4d4@docker.com>
-Alexander Shopov <ash@kambanaria.org>
-Alexandre Beslic <alexandre.beslic@gmail.com>
-Alexandre Garnier <zigarn@gmail.com>
-Alexandre González <agonzalezro@gmail.com>
-Alexandre Jomin <alexandrejomin@gmail.com>
-Alexandru Sfirlogea <alexandru.sfirlogea@gmail.com>
-Alexei Margasov <alexei38@yandex.ru>
-Alexey Guskov <lexag@mail.ru>
-Alexey Kotlyarov <alexey@infoxchange.net.au>
-Alexey Shamrin <shamrin@gmail.com>
-Alexis THOMAS <fr.alexisthomas@gmail.com>
-Alfred Landrum <alfred.landrum@docker.com>
-Ali Dehghani <ali.dehghani.g@gmail.com>
-Alicia Lauerman <alicia@eta.im>
-Alihan Demir <alihan_6153@hotmail.com>
-Allen Madsen <blatyo@gmail.com>
-Allen Sun <allensun.shl@alibaba-inc.com>
-almoehi <almoehi@users.noreply.github.com>
-Alvaro Saurin <alvaro.saurin@gmail.com>
-Alvin Deng <alvin.q.deng@utexas.edu>
-Alvin Richards <alvin.richards@docker.com>
-amangoel <amangoel@gmail.com>
-Amen Belayneh <amenbelayneh@gmail.com>
-Amir Goldstein <amir73il@aquasec.com>
-Amit Bakshi <ambakshi@gmail.com>
-Amit Krishnan <amit.krishnan@oracle.com>
-Amit Shukla <amit.shukla@docker.com>
-Amr Gawish <amr.gawish@gmail.com>
-Amy Lindburg <amy.lindburg@docker.com>
-Anand Patil <anand.prabhakar.patil@gmail.com>
-AnandkumarPatel <anandkumarpatel@gmail.com>
-Anatoly Borodin <anatoly.borodin@gmail.com>
-Anca Iordache <anca.iordache@docker.com>
-Anchal Agrawal <aagrawa4@illinois.edu>
-Anda Xu <anda.xu@docker.com>
-Anders Janmyr <anders@janmyr.com>
-Andre Dublin <81dublin@gmail.com>
-Andre Granovsky <robotciti@live.com>
-Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
-Andrea Luzzardi <aluzzardi@gmail.com>
-Andrea Turli <andrea.turli@gmail.com>
-Andreas Elvers <andreas@work.de>
-Andreas Köhler <andi5.py@gmx.net>
-Andreas Savvides <andreas@editd.com>
-Andreas Tiefenthaler <at@an-ti.eu>
-Andrei Gherzan <andrei@resin.io>
-Andrei Vagin <avagin@gmail.com>
-Andrew C. Bodine <acbodine@us.ibm.com>
-Andrew Clay Shafer <andrewcshafer@gmail.com>
-Andrew Duckworth <grillopress@gmail.com>
-Andrew France <andrew@avito.co.uk>
-Andrew Gerrand <adg@golang.org>
-Andrew Guenther <guenther.andrew.j@gmail.com>
-Andrew He <he.andrew.mail@gmail.com>
-Andrew Hsu <andrewhsu@docker.com>
-Andrew Kuklewicz <kookster@gmail.com>
-Andrew Macgregor <andrew.macgregor@agworld.com.au>
-Andrew Macpherson <hopscotch23@gmail.com>
-Andrew Martin <sublimino@gmail.com>
-Andrew McDonnell <bugs@andrewmcdonnell.net>
-Andrew Munsell <andrew@wizardapps.net>
-Andrew Pennebaker <andrew.pennebaker@gmail.com>
-Andrew Po <absourd.noise@gmail.com>
-Andrew Weiss <andrew.weiss@docker.com>
-Andrew Williams <williams.andrew@gmail.com>
-Andrews Medina <andrewsmedina@gmail.com>
-Andrey Kolomentsev <andrey.kolomentsev@docker.com>
-Andrey Petrov <andrey.petrov@shazow.net>
-Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
-André Martins <aanm90@gmail.com>
-andy <ztao@tibco-support.com>
-Andy Chambers <anchambers@paypal.com>
-andy diller <dillera@gmail.com>
-Andy Goldstein <agoldste@redhat.com>
-Andy Kipp <andy@rstudio.com>
-Andy Rothfusz <github@developersupport.net>
-Andy Smith <github@anarkystic.com>
-Andy Wilson <wilson.andrew.j+github@gmail.com>
-Anes Hasicic <anes.hasicic@gmail.com>
-Anil Belur <askb23@gmail.com>
-Anil Madhavapeddy <anil@recoil.org>
-Ankit Jain <ajatkj@yahoo.co.in>
-Ankush Agarwal <ankushagarwal11@gmail.com>
-Anonmily <michelle@michelleliu.io>
-Anran Qiao <anran.qiao@daocloud.io>
-Anshul Pundir <anshul.pundir@docker.com>
-Anthon van der Neut <anthon@mnt.org>
-Anthony Baire <Anthony.Baire@irisa.fr>
-Anthony Bishopric <git@anthonybishopric.com>
-Anthony Dahanne <anthony.dahanne@gmail.com>
-Anthony Sottile <asottile@umich.edu>
-Anton Löfgren <anton.lofgren@gmail.com>
-Anton Nikitin <anton.k.nikitin@gmail.com>
-Anton Polonskiy <anton.polonskiy@gmail.com>
-Anton Tiurin <noxiouz@yandex.ru>
-Antonio Murdaca <antonio.murdaca@gmail.com>
-Antonis Kalipetis <akalipetis@gmail.com>
-Antony Messerli <amesserl@rackspace.com>
-Anuj Bahuguna <anujbahuguna.dev@gmail.com>
-Anusha Ragunathan <anusha.ragunathan@docker.com>
-apocas <petermdias@gmail.com>
-Arash Deshmeh <adeshmeh@ca.ibm.com>
-ArikaChen <eaglesora@gmail.com>
-Arko Dasgupta <arko.dasgupta@docker.com>
-Arnaud Lefebvre <a.lefebvre@outlook.fr>
-Arnaud Porterie <arnaud.porterie@docker.com>
-Arnaud Rebillout <arnaud.rebillout@collabora.com>
-Arthur Barr <arthur.barr@uk.ibm.com>
-Arthur Gautier <baloo@gandi.net>
-Artur Meyster <arthurfbi@yahoo.com>
-Arun Gupta <arun.gupta@gmail.com>
-Asad Saeeduddin <masaeedu@gmail.com>
-Asbjørn Enge <asbjorn@hanafjedle.net>
-averagehuman <averagehuman@users.noreply.github.com>
-Avi Das <andas222@gmail.com>
-Avi Kivity <avi@scylladb.com>
-Avi Miller <avi.miller@oracle.com>
-Avi Vaid <avaid1996@gmail.com>
-ayoshitake <airandfingers@gmail.com>
-Azat Khuyiyakhmetov <shadow_uz@mail.ru>
-Bardia Keyoumarsi <bkeyouma@ucsc.edu>
-Barnaby Gray <barnaby@pickle.me.uk>
-Barry Allard <barry.allard@gmail.com>
-Bartłomiej Piotrowski <b@bpiotrowski.pl>
-Bastiaan Bakker <bbakker@xebia.com>
-bdevloed <boris.de.vloed@gmail.com>
-Ben Bonnefoy <frenchben@docker.com>
-Ben Firshman <ben@firshman.co.uk>
-Ben Golub <ben.golub@dotcloud.com>
-Ben Gould <ben@bengould.co.uk>
-Ben Hall <ben@benhall.me.uk>
-Ben Sargent <ben@brokendigits.com>
-Ben Severson <BenSeverson@users.noreply.github.com>
-Ben Toews <mastahyeti@gmail.com>
-Ben Wiklund <ben@daisyowl.com>
-Benjamin Atkin <ben@benatkin.com>
-Benjamin Baker <Benjamin.baker@utexas.edu>
-Benjamin Boudreau <boudreau.benjamin@gmail.com>
-Benjamin Yolken <yolken@stripe.com>
-Benny Ng <benny.tpng@gmail.com>
-Benoit Chesneau <bchesneau@gmail.com>
-Bernerd Schaefer <bj.schaefer@gmail.com>
-Bernhard M. Wiedemann <bwiedemann@suse.de>
-Bert Goethals <bert@bertg.be>
-Bertrand Roussel <broussel@sierrawireless.com>
-Bevisy Zhang <binbin36520@gmail.com>
-Bharath Thiruveedula <bharath_ves@hotmail.com>
-Bhiraj Butala <abhiraj.butala@gmail.com>
-Bhumika Bayani <bhumikabayani@gmail.com>
-Bilal Amarni <bilal.amarni@gmail.com>
-Bill Wang <ozbillwang@gmail.com>
-Bily Zhang <xcoder@tenxcloud.com>
-Bin Liu <liubin0329@gmail.com>
-Bingshen Wang <bingshen.wbs@alibaba-inc.com>
-Blake Geno <blakegeno@gmail.com>
-Boaz Shuster <ripcurld.github@gmail.com>
-bobby abbott <ttobbaybbob@gmail.com>
-Boqin Qin <bobbqqin@gmail.com>
-Boris Pruessmann <boris@pruessmann.org>
-Boshi Lian <farmer1992@gmail.com>
-Bouke Haarsma <bouke@webatoom.nl>
-Boyd Hemphill <boyd@feedmagnet.com>
-boynux <boynux@gmail.com>
-Bradley Cicenas <bradley.cicenas@gmail.com>
-Bradley Wright <brad@intranation.com>
-Brandon Liu <bdon@bdon.org>
-Brandon Philips <brandon.philips@coreos.com>
-Brandon Rhodes <brandon@rhodesmill.org>
-Brendan Dixon <brendand@microsoft.com>
-Brent Salisbury <brent.salisbury@docker.com>
-Brett Higgins <brhiggins@arbor.net>
-Brett Kochendorfer <brett.kochendorfer@gmail.com>
-Brett Randall <javabrett@gmail.com>
-Brian (bex) Exelbierd <bexelbie@redhat.com>
-Brian Bland <brian.bland@docker.com>
-Brian DeHamer <brian@dehamer.com>
-Brian Dorsey <brian@dorseys.org>
-Brian Flad <bflad417@gmail.com>
-Brian Goff <cpuguy83@gmail.com>
-Brian McCallister <brianm@skife.org>
-Brian Olsen <brian@maven-group.org>
-Brian Schwind <brianmschwind@gmail.com>
-Brian Shumate <brian@couchbase.com>
-Brian Torres-Gil <brian@dralth.com>
-Brian Trump <btrump@yelp.com>
-Brice Jaglin <bjaglin@teads.tv>
-Briehan Lombaard <briehan.lombaard@gmail.com>
-Brielle Broder <bbroder@google.com>
-Bruno Bigras <bigras.bruno@gmail.com>
-Bruno Binet <bruno.binet@gmail.com>
-Bruno Gazzera <bgazzera@paginar.com>
-Bruno Renié <brutasse@gmail.com>
-Bruno Tavares <btavare@thoughtworks.com>
-Bryan Bess <squarejaw@bsbess.com>
-Bryan Boreham <bjboreham@gmail.com>
-Bryan Matsuo <bryan.matsuo@gmail.com>
-Bryan Murphy <bmurphy1976@gmail.com>
-Burke Libbey <burke@libbey.me>
-Byung Kang <byung.kang.ctr@amrdec.army.mil>
-Caleb Spare <cespare@gmail.com>
-Calen Pennington <cale@edx.org>
-Cameron Boehmer <cameron.boehmer@gmail.com>
-Cameron Spear <cameronspear@gmail.com>
-Campbell Allen <campbell.allen@gmail.com>
-Candid Dauth <cdauth@cdauth.eu>
-Cao Weiwei <cao.weiwei30@zte.com.cn>
-Carl Henrik Lunde <chlunde@ping.uio.no>
-Carl Loa Odin <carlodin@gmail.com>
-Carl X. Su <bcbcarl@gmail.com>
-Carlo Mion <mion00@gmail.com>
-Carlos Alexandro Becker <caarlos0@gmail.com>
-Carlos de Paula <me@carlosedp.com>
-Carlos Sanchez <carlos@apache.org>
-Carol Fager-Higgins <carol.fager-higgins@docker.com>
-Cary <caryhartline@users.noreply.github.com>
-Casey Bisson <casey.bisson@joyent.com>
-Catalin Pirvu <pirvu.catalin94@gmail.com>
-Ce Gao <ce.gao@outlook.com>
-Cedric Davies <cedricda@microsoft.com>
-Cezar Sa Espinola <cezarsa@gmail.com>
-Chad Swenson <chadswen@gmail.com>
-Chance Zibolski <chance.zibolski@gmail.com>
-Chander Govindarajan <chandergovind@gmail.com>
-Chanhun Jeong <keyolk@gmail.com>
-Chao Wang <wangchao.fnst@cn.fujitsu.com>
-Charles Chan <charleswhchan@users.noreply.github.com>
-Charles Hooper <charles.hooper@dotcloud.com>
-Charles Law <claw@conduce.com>
-Charles Lindsay <chaz@chazomatic.us>
-Charles Merriam <charles.merriam@gmail.com>
-Charles Sarrazin <charles@sarraz.in>
-Charles Smith <charles.smith@docker.com>
-Charlie Drage <charlie@charliedrage.com>
-Charlie Lewis <charliel@lab41.org>
-Chase Bolt <chase.bolt@gmail.com>
-ChaYoung You <yousbe@gmail.com>
-Chen Chao <cc272309126@gmail.com>
-Chen Chuanliang <chen.chuanliang@zte.com.cn>
-Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
-Chen Min <chenmin46@huawei.com>
-Chen Mingjie <chenmingjie0828@163.com>
-Chen Qiu <cheney-90@hotmail.com>
-Cheng-mean Liu <soccerl@microsoft.com>
-Chengfei Shang <cfshang@alauda.io>
-Chengguang Xu <cgxu519@gmx.com>
-chenyuzhu <chenyuzhi@oschina.cn>
-Chetan Birajdar <birajdar.chetan@gmail.com>
-Chewey <prosto-chewey@users.noreply.github.com>
-Chia-liang Kao <clkao@clkao.org>
-chli <chli@freewheel.tv>
-Cholerae Hu <choleraehyq@gmail.com>
-Chris Alfonso <calfonso@redhat.com>
-Chris Armstrong <chris@opdemand.com>
-Chris Dias <cdias@microsoft.com>
-Chris Dituri <csdituri@gmail.com>
-Chris Fordham <chris@fordham-nagy.id.au>
-Chris Gavin <chris@chrisgavin.me>
-Chris Gibson <chris@chrisg.io>
-Chris Khoo <chris.khoo@gmail.com>
-Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
-Chris McKinnel <chrismckinnel@gmail.com>
-Chris Price <cprice@mirantis.com>
-Chris Seto <chriskseto@gmail.com>
-Chris Snow <chsnow123@gmail.com>
-Chris St. Pierre <chris.a.st.pierre@gmail.com>
-Chris Stivers <chris@stivers.us>
-Chris Swan <chris.swan@iee.org>
-Chris Telfer <ctelfer@docker.com>
-Chris Wahl <github@wahlnetwork.com>
-Chris Weyl <cweyl@alumni.drew.edu>
-Chris White <me@cwprogram.com>
-Christian Berendt <berendt@b1-systems.de>
-Christian Brauner <christian.brauner@ubuntu.com>
-Christian Böhme <developement@boehme3d.de>
-Christian Muehlhaeuser <muesli@gmail.com>
-Christian Persson <saser@live.se>
-Christian Rotzoll <ch.rotzoll@gmail.com>
-Christian Simon <simon@swine.de>
-Christian Stefanescu <st.chris@gmail.com>
-Christophe Mehay <cmehay@online.net>
-Christophe Troestler <christophe.Troestler@umons.ac.be>
-Christophe Vidal <kriss@krizalys.com>
-Christopher Biscardi <biscarch@sketcht.com>
-Christopher Crone <christopher.crone@docker.com>
-Christopher Currie <codemonkey+github@gmail.com>
-Christopher Jones <tophj@linux.vnet.ibm.com>
-Christopher Latham <sudosurootdev@gmail.com>
-Christopher Rigor <crigor@gmail.com>
-Christy Norman <christy@linux.vnet.ibm.com>
-Chun Chen <ramichen@tencent.com>
-Ciro S. Costa <ciro.costa@usp.br>
-Clayton Coleman <ccoleman@redhat.com>
-Clinton Kitson <clintonskitson@gmail.com>
-Cody Roseborough <crrosebo@amazon.com>
-Coenraad Loubser <coenraad@wish.org.za>
-Colin Dunklau <colin.dunklau@gmail.com>
-Colin Hebert <hebert.colin@gmail.com>
-Colin Panisset <github@clabber.com>
-Colin Rice <colin@daedrum.net>
-Colin Walters <walters@verbum.org>
-Collin Guarino <collin.guarino@gmail.com>
-Colm Hally <colmhally@gmail.com>
-companycy <companycy@gmail.com>
-Corbin Coleman <corbin.coleman@docker.com>
-Corey Farrell <git@cfware.com>
-Cory Forsyth <cory.forsyth@gmail.com>
-cressie176 <github@stephen-cresswell.net>
-CrimsonGlory <CrimsonGlory@users.noreply.github.com>
-Cristian Ariza <dev@cristianrz.com>
-Cristian Staretu <cristian.staretu@gmail.com>
-cristiano balducci <cristiano.balducci@gmail.com>
-Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
-Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
-CUI Wei <ghostplant@qq.com>
-Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
-Cyril F <cyrilf7x@gmail.com>
-Daan van Berkel <daan.v.berkel.1980@gmail.com>
-Daehyeok Mun <daehyeok@gmail.com>
-Dafydd Crosby <dtcrsby@gmail.com>
-dalanlan <dalanlan925@gmail.com>
-Damian Smyth <damian@dsau.co>
-Damien Nadé <github@livna.org>
-Damien Nozay <damien.nozay@gmail.com>
-Damjan Georgievski <gdamjan@gmail.com>
-Dan Anolik <dan@anolik.net>
-Dan Buch <d.buch@modcloth.com>
-Dan Cotora <dan@bluevision.ro>
-Dan Feldman <danf@jfrog.com>
-Dan Griffin <dgriffin@peer1.com>
-Dan Hirsch <thequux@upstandinghackers.com>
-Dan Keder <dan.keder@gmail.com>
-Dan Levy <dan@danlevy.net>
-Dan McPherson <dmcphers@redhat.com>
-Dan Stine <sw@stinemail.com>
-Dan Williams <me@deedubs.com>
-Dani Hodovic <dani.hodovic@gmail.com>
-Dani Louca <dani.louca@docker.com>
-Daniel Antlinger <d.antlinger@gmx.at>
-Daniel Black <daniel@linux.ibm.com>
-Daniel Dao <dqminh@cloudflare.com>
-Daniel Exner <dex@dragonslave.de>
-Daniel Farrell <dfarrell@redhat.com>
-Daniel Garcia <daniel@danielgarcia.info>
-Daniel Gasienica <daniel@gasienica.ch>
-Daniel Grunwell <mwgrunny@gmail.com>
-Daniel Helfand <helfand.4@gmail.com>
-Daniel Hiltgen <daniel.hiltgen@docker.com>
-Daniel J Walsh <dwalsh@redhat.com>
-Daniel Menet <membership@sontags.ch>
-Daniel Mizyrycki <daniel.mizyrycki@dotcloud.com>
-Daniel Nephin <dnephin@docker.com>
-Daniel Norberg <dano@spotify.com>
-Daniel Nordberg <dnordberg@gmail.com>
-Daniel Robinson <gottagetmac@gmail.com>
-Daniel S <dan.streby@gmail.com>
-Daniel Sweet <danieljsweet@icloud.com>
-Daniel Von Fange <daniel@leancoder.com>
-Daniel Watkins <daniel@daniel-watkins.co.uk>
-Daniel X Moore <yahivin@gmail.com>
-Daniel YC Lin <dlin.tw@gmail.com>
-Daniel Zhang <jmzwcn@gmail.com>
-Danny Berger <dpb587@gmail.com>
-Danny Milosavljevic <dannym@scratchpost.org>
-Danny Yates <danny@codeaholics.org>
-Danyal Khaliq <danyal.khaliq@tenpearls.com>
-Darren Coxall <darren@darrencoxall.com>
-Darren Shepherd <darren.s.shepherd@gmail.com>
-Darren Stahl <darst@microsoft.com>
-Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
-Davanum Srinivas <davanum@gmail.com>
-Dave Barboza <dbarboza@datto.com>
-Dave Goodchild <buddhamagnet@gmail.com>
-Dave Henderson <dhenderson@gmail.com>
-Dave MacDonald <mindlapse@gmail.com>
-Dave Tucker <dt@docker.com>
-David Anderson <dave@natulte.net>
-David Calavera <david.calavera@gmail.com>
-David Chung <david.chung@docker.com>
-David Corking <dmc-source@dcorking.com>
-David Cramer <davcrame@cisco.com>
-David Currie <david_currie@uk.ibm.com>
-David Davis <daviddavis@redhat.com>
-David Dooling <dooling@gmail.com>
-David Gageot <david@gageot.net>
-David Gebler <davidgebler@gmail.com>
-David Glasser <glasser@davidglasser.net>
-David Lawrence <david.lawrence@docker.com>
-David Lechner <david@lechnology.com>
-David M. Karr <davidmichaelkarr@gmail.com>
-David Mackey <tdmackey@booleanhaiku.com>
-David Mat <david@davidmat.com>
-David Mcanulty <github@hellspark.com>
-David McKay <david@rawkode.com>
-David P Hilton <david.hilton.p@gmail.com>
-David Pelaez <pelaez89@gmail.com>
-David R. Jenni <david.r.jenni@gmail.com>
-David Röthlisberger <david@rothlis.net>
-David Sheets <dsheets@docker.com>
-David Sissitka <me@dsissitka.com>
-David Trott <github@davidtrott.com>
-David Wang <00107082@163.com>
-David Williamson <david.williamson@docker.com>
-David Xia <dxia@spotify.com>
-David Young <yangboh@cn.ibm.com>
-Davide Ceretti <davide.ceretti@hogarthww.com>
-Dawn Chen <dawnchen@google.com>
-dbdd <wangtong2712@gmail.com>
-dcylabs <dcylabs@gmail.com>
-Debayan De <debayande@users.noreply.github.com>
-Deborah Gertrude Digges <deborah.gertrude.digges@gmail.com>
-deed02392 <georgehafiz@gmail.com>
-Deep Debroy <ddebroy@docker.com>
-Deng Guangxing <dengguangxing@huawei.com>
-Deni Bertovic <deni@kset.org>
-Denis Defreyne <denis@soundcloud.com>
-Denis Gladkikh <denis@gladkikh.email>
-Denis Ollier <larchunix@users.noreply.github.com>
-Dennis Chen <barracks510@gmail.com>
-Dennis Chen <dennis.chen@arm.com>
-Dennis Docter <dennis@d23.nl>
-Derek <crq@kernel.org>
-Derek <crquan@gmail.com>
-Derek Ch <denc716@gmail.com>
-Derek McGowan <derek@mcgstyle.net>
-Deric Crago <deric.crago@gmail.com>
-Deshi Xiao <dxiao@redhat.com>
-devmeyster <arthurfbi@yahoo.com>
-Devon Estes <devon.estes@klarna.com>
-Devvyn Murphy <devvyn@devvyn.com>
-Dharmit Shah <shahdharmit@gmail.com>
-Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
-Diego Romero <idiegoromero@gmail.com>
-Diego Siqueira <dieg0@live.com>
-Dieter Reuter <dieter.reuter@me.com>
-Dillon Dixon <dillondixon@gmail.com>
-Dima Stopel <dima@twistlock.com>
-Dimitri John Ledkov <dimitri.j.ledkov@intel.com>
-Dimitris Mandalidis <dimitris.mandalidis@gmail.com>
-Dimitris Rozakis <dimrozakis@gmail.com>
-Dimitry Andric <d.andric@activevideo.com>
-Dinesh Subhraveti <dineshs@altiscale.com>
-Ding Fei <dingfei@stars.org.cn>
-Diogo Monica <diogo@docker.com>
-DiuDiugirl <sophia.wang@pku.edu.cn>
-Djibril Koné <kone.djibril@gmail.com>
-dkumor <daniel@dkumor.com>
-Dmitri Logvinenko <dmitri.logvinenko@gmail.com>
-Dmitri Shuralyov <shurcooL@gmail.com>
-Dmitry Demeshchuk <demeshchuk@gmail.com>
-Dmitry Gusev <dmitry.gusev@gmail.com>
-Dmitry Kononenko <d@dm42.ru>
-Dmitry Sharshakov <d3dx12.xx@gmail.com>
-Dmitry Shyshkin <dmitry@shyshkin.org.ua>
-Dmitry Smirnov <onlyjob@member.fsf.org>
-Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
-Dmitry Vorobev <dimahabr@gmail.com>
-Dolph Mathews <dolph.mathews@gmail.com>
-Dominic Tubach <dominic.tubach@to.com>
-Dominic Yin <yindongchao@inspur.com>
-Dominik Dingel <dingel@linux.vnet.ibm.com>
-Dominik Finkbeiner <finkes93@gmail.com>
-Dominik Honnef <dominik@honnef.co>
-Don Kirkby <donkirkby@users.noreply.github.com>
-Don Kjer <don.kjer@gmail.com>
-Don Spaulding <donspauldingii@gmail.com>
-Donald Huang <don.hcd@gmail.com>
-Dong Chen <dongluo.chen@docker.com>
-Donghwa Kim <shanytt@gmail.com>
-Donovan Jones <git@gamma.net.nz>
-Doron Podoleanu <doronp@il.ibm.com>
-Doug Davis <dug@us.ibm.com>
-Doug MacEachern <dougm@vmware.com>
-Doug Tangren <d.tangren@gmail.com>
-Douglas Curtis <dougcurtis1@gmail.com>
-Dr Nic Williams <drnicwilliams@gmail.com>
-dragon788 <dragon788@users.noreply.github.com>
-Dražen Lučanin <kermit666@gmail.com>
-Drew Erny <derny@mirantis.com>
-Drew Hubl <drew.hubl@gmail.com>
-Dustin Sallings <dustin@spy.net>
-Ed Costello <epc@epcostello.com>
-Edmund Wagner <edmund-wagner@web.de>
-Eiichi Tsukata <devel@etsukata.com>
-Eike Herzbach <eike@herzbach.net>
-Eivin Giske Skaaren <eivinsn@axis.com>
-Eivind Uggedal <eivind@uggedal.com>
-Elan Ruusamäe <glen@pld-linux.org>
-Elango Sivanandam <elango.siva@docker.com>
-Elena Morozova <lelenanam@gmail.com>
-Eli Uriegas <eli.uriegas@docker.com>
-Elias Faxö <elias.faxo@tre.se>
-Elias Probst <mail@eliasprobst.eu>
-Elijah Zupancic <elijah@zupancic.name>
-eluck <mail@eluck.me>
-Elvir Kuric <elvirkuric@gmail.com>
-Emil Davtyan <emil2k@gmail.com>
-Emil Hernvall <emil@quench.at>
-Emily Maier <emily@emilymaier.net>
-Emily Rose <emily@contactvibe.com>
-Emir Ozer <emirozer@yandex.com>
-Enguerran <engcolson@gmail.com>
-Eohyung Lee <liquidnuker@gmail.com>
-epeterso <epeterson@breakpoint-labs.com>
-Eric Barch <barch@tomesoftware.com>
-Eric Curtin <ericcurtin17@gmail.com>
-Eric G. Noriega <enoriega@vizuri.com>
-Eric Hanchrow <ehanchrow@ine.com>
-Eric Lee <thenorthsecedes@gmail.com>
-Eric Myhre <hash@exultant.us>
-Eric Paris <eparis@redhat.com>
-Eric Rafaloff <erafaloff@gmail.com>
-Eric Rosenberg <ehaydenr@gmail.com>
-Eric Sage <eric.david.sage@gmail.com>
-Eric Soderstrom <ericsoderstrom@gmail.com>
-Eric Yang <windfarer@gmail.com>
-Eric-Olivier Lamey <eo@lamey.me>
-Erica Windisch <erica@windisch.us>
-Erik Bray <erik.m.bray@gmail.com>
-Erik Dubbelboer <erik@dubbelboer.com>
-Erik Hollensbe <github@hollensbe.org>
-Erik Inge Bolsø <knan@redpill-linpro.com>
-Erik Kristensen <erik@erikkristensen.com>
-Erik St. Martin <alakriti@gmail.com>
-Erik Weathers <erikdw@gmail.com>
-Erno Hopearuoho <erno.hopearuoho@gmail.com>
-Erwin van der Koogh <info@erronis.nl>
-Ethan Bell <ebgamer29@gmail.com>
-Ethan Mosbaugh <ethan@replicated.com>
-Euan Kemp <euan.kemp@coreos.com>
-Eugen Krizo <eugen.krizo@gmail.com>
-Eugene Yakubovich <eugene.yakubovich@coreos.com>
-Evan Allrich <evan@unguku.com>
-Evan Carmi <carmi@users.noreply.github.com>
-Evan Hazlett <ejhazlett@gmail.com>
-Evan Krall <krall@yelp.com>
-Evan Phoenix <evan@fallingsnow.net>
-Evan Wies <evan@neomantra.net>
-Evelyn Xu <evelynhsu21@gmail.com>
-Everett Toews <everett.toews@rackspace.com>
-Evgeniy Makhrov <e.makhrov@corp.badoo.com>
-Evgeny Shmarnev <shmarnev@gmail.com>
-Evgeny Vereshchagin <evvers@ya.ru>
-Ewa Czechowska <ewa@ai-traders.com>
-Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
-ezbercih <cem.ezberci@gmail.com>
-Ezra Silvera <ezra@il.ibm.com>
-Fabian Kramm <kramm@covexo.com>
-Fabian Lauer <kontakt@softwareschmiede-saar.de>
-Fabian Raetz <fabian.raetz@gmail.com>
-Fabiano Rosas <farosas@br.ibm.com>
-Fabio Falci <fabiofalci@gmail.com>
-Fabio Kung <fabio.kung@gmail.com>
-Fabio Rapposelli <fabio@vmware.com>
-Fabio Rehm <fgrehm@gmail.com>
-Fabrizio Regini <freegenie@gmail.com>
-Fabrizio Soppelsa <fsoppelsa@mirantis.com>
-Faiz Khan <faizkhan00@gmail.com>
-falmp <chico.lopes@gmail.com>
-Fangming Fang <fangming.fang@arm.com>
-Fangyuan Gao <21551127@zju.edu.cn>
-fanjiyun <fan.jiyun@zte.com.cn>
-Fareed Dudhia <fareeddudhia@googlemail.com>
-Fathi Boudra <fathi.boudra@linaro.org>
-Federico Gimenez <fgimenez@coit.es>
-Felipe Oliveira <felipeweb.programador@gmail.com>
-Felipe Ruhland <felipe.ruhland@gmail.com>
-Felix Abecassis <fabecassis@nvidia.com>
-Felix Geisendörfer <felix@debuggable.com>
-Felix Hupfeld <felix@quobyte.com>
-Felix Rabe <felix@rabe.io>
-Felix Ruess <felix.ruess@gmail.com>
-Felix Schindler <fschindler@weluse.de>
-Feng Yan <fy2462@gmail.com>
-Fengtu Wang <wangfengtu@huawei.com>
-Ferenc Szabo <pragmaticfrank@gmail.com>
-Fernando <fermayo@gmail.com>
-Fero Volar <alian@alian.info>
-Ferran Rodenas <frodenas@gmail.com>
-Filipe Brandenburger <filbranden@google.com>
-Filipe Oliveira <contato@fmoliveira.com.br>
-Flavio Castelli <fcastelli@suse.com>
-Flavio Crisciani <flavio.crisciani@docker.com>
-Florian <FWirtz@users.noreply.github.com>
-Florian Klein <florian.klein@free.fr>
-Florian Maier <marsmensch@users.noreply.github.com>
-Florian Noeding <noeding@adobe.com>
-Florian Schmaus <flo@geekplace.eu>
-Florian Weingarten <flo@hackvalue.de>
-Florin Asavoaie <florin.asavoaie@gmail.com>
-Florin Patan <florinpatan@gmail.com>
-fonglh <fonglh@gmail.com>
-Foysal Iqbal <foysal.iqbal.fb@gmail.com>
-Francesc Campoy <campoy@google.com>
-Francesco Mari <mari.francesco@gmail.com>
-Francis Chuang <francis.chuang@boostport.com>
-Francisco Carriedo <fcarriedo@gmail.com>
-Francisco Souza <f@souza.cc>
-Frank Groeneveld <frank@ivaldi.nl>
-Frank Herrmann <fgh@4gh.tv>
-Frank Macreery <frank@macreery.com>
-Frank Rosquin <frank.rosquin+github@gmail.com>
-frankyang <yyb196@gmail.com>
-Fred Lifton <fred.lifton@docker.com>
-Frederick F. Kautz IV <fkautz@redhat.com>
-Frederik Loeffert <frederik@zitrusmedia.de>
-Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
-Freek Kalter <freek@kalteronline.org>
-Frieder Bluemle <frieder.bluemle@gmail.com>
-Fu JinLin <withlin@yeah.net>
-Félix Baylac-Jacqué <baylac.felix@gmail.com>
-Félix Cantournet <felix.cantournet@cloudwatt.com>
-Gabe Rosenhouse <gabe@missionst.com>
-Gabor Nagy <mail@aigeruth.hu>
-Gabriel Linder <linder.gabriel@gmail.com>
-Gabriel Monroy <gabriel@opdemand.com>
-Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
-Gaetan de Villele <gdevillele@gmail.com>
-Galen Sampson <galen.sampson@gmail.com>
-Gang Qiao <qiaohai8866@gmail.com>
-Gareth Rushgrove <gareth@morethanseven.net>
-Garrett Barboza <garrett@garrettbarboza.com>
-Gary Schaetz <gary@schaetzkc.com>
-Gaurav <gaurav.gosec@gmail.com>
-Gaurav Singh <gaurav1086@gmail.com>
-Gaël PORTAY <gael.portay@savoirfairelinux.com>
-Genki Takiuchi <genki@s21g.com>
-GennadySpb <lipenkov@gmail.com>
-Geoffrey Bachelet <grosfrais@gmail.com>
-Geon Kim <geon0250@gmail.com>
-George Kontridze <george@bugsnag.com>
-George MacRorie <gmacr31@gmail.com>
-George Xie <georgexsh@gmail.com>
-Georgi Hristozov <georgi@forkbomb.nl>
-Gereon Frey <gereon.frey@dynport.de>
-German DZ <germ@ndz.com.ar>
-Gert van Valkenhoef <g.h.m.van.valkenhoef@rug.nl>
-Gerwim Feiken <g.feiken@tfe.nl>
-Ghislain Bourgeois <ghislain.bourgeois@gmail.com>
-Giampaolo Mancini <giampaolo@trampolineup.com>
-Gianluca Borello <g.borello@gmail.com>
-Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
-Giovan Isa Musthofa <giovanism@outlook.co.id>
-gissehel <public-devgit-dantus@gissehel.org>
-Giuseppe Mazzotta <gdm85@users.noreply.github.com>
-Gleb Fotengauer-Malinovskiy <glebfm@altlinux.org>
-Gleb M Borisov <borisov.gleb@gmail.com>
-Glyn Normington <gnormington@gopivotal.com>
-GoBella <caili_welcome@163.com>
-Goffert van Gool <goffert@phusion.nl>
-Goldwyn Rodrigues <rgoldwyn@suse.com>
-Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
-Gosuke Miyashita <gosukenator@gmail.com>
-Gou Rao <gou@portworx.com>
-Govinda Fichtner <govinda.fichtner@googlemail.com>
-Grant Millar <rid@cylo.io>
-Grant Reaber <grant.reaber@gmail.com>
-Graydon Hoare <graydon@pobox.com>
-Greg Fausak <greg@tacodata.com>
-Greg Pflaum <gpflaum@users.noreply.github.com>
-Greg Stephens <greg@udon.org>
-Greg Thornton <xdissent@me.com>
-Grzegorz Jaśkiewicz <gj.jaskiewicz@gmail.com>
-Guilhem Lettron <guilhem+github@lettron.fr>
-Guilherme Salgado <gsalgado@gmail.com>
-Guillaume Dufour <gdufour.prestataire@voyages-sncf.com>
-Guillaume J. Charmes <guillaume.charmes@docker.com>
-guoxiuyan <guoxiuyan@huawei.com>
-Guri <odg0318@gmail.com>
-Gurjeet Singh <gurjeet@singh.im>
-Guruprasad <lgp171188@gmail.com>
-Gustav Sinder <gustav.sinder@gmail.com>
-gwx296173 <gaojing3@huawei.com>
-Günter Zöchbauer <guenter@gzoechbauer.com>
-Haichao Yang <yang.haichao@zte.com.cn>
-haikuoliu <haikuo@amazon.com>
-Hakan Özler <hakan.ozler@kodcu.com>
-Hamish Hutchings <moredhel@aoeu.me>
-Hannes Ljungberg <hannes@5monkeys.se>
-Hans Kristian Flaatten <hans@starefossen.com>
-Hans Rødtang <hansrodtang@gmail.com>
-Hao Shu Wei <haosw@cn.ibm.com>
-Hao Zhang <21521210@zju.edu.cn>
-Harald Albers <github@albersweb.de>
-Harald Niesche <harald@niesche.de>
-Harley Laue <losinggeneration@gmail.com>
-Harold Cooper <hrldcpr@gmail.com>
-Harrison Turton <harrisonturton@gmail.com>
-Harry Zhang <harryz@hyper.sh>
-Harshal Patil <harshal.patil@in.ibm.com>
-Harshal Patil <harshalp@linux.vnet.ibm.com>
-He Simei <hesimei@zju.edu.cn>
-He Xiaoxi <tossmilestone@gmail.com>
-He Xin <he_xinworld@126.com>
-heartlock <21521209@zju.edu.cn>
-Hector Castro <hectcastro@gmail.com>
-Helen Xie <chenjg@harmonycloud.cn>
-Henning Sprang <henning.sprang@gmail.com>
-Hiroshi Hatake <hatake@clear-code.com>
-Hiroyuki Sasagawa <hs19870702@gmail.com>
-Hobofan <goisser94@gmail.com>
-Hollie Teal <hollie@docker.com>
-Hong Xu <hong@topbug.net>
-Hongbin Lu <hongbin034@gmail.com>
-Hongxu Jia <hongxu.jia@windriver.com>
-Honza Pokorny <me@honza.ca>
-Hsing-Hui Hsu <hsinghui@amazon.com>
-hsinko <21551195@zju.edu.cn>
-Hu Keping <hukeping@huawei.com>
-Hu Tao <hutao@cn.fujitsu.com>
-HuanHuan Ye <logindaveye@gmail.com>
-Huanzhong Zhang <zhanghuanzhong90@gmail.com>
-Huayi Zhang <irachex@gmail.com>
-Hugo Duncan <hugo@hugoduncan.org>
-Hugo Marisco <0x6875676f@gmail.com>
-Hunter Blanks <hunter@twilio.com>
-huqun <huqun@zju.edu.cn>
-Huu Nguyen <huu@prismskylabs.com>
-hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
-Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
-Iago López Galeiras <iago@kinvolk.io>
-Ian Babrou <ibobrik@gmail.com>
-Ian Bishop <ianbishop@pace7.com>
-Ian Bull <irbull@gmail.com>
-Ian Calvert <ianjcalvert@gmail.com>
-Ian Campbell <ian.campbell@docker.com>
-Ian Chen <ianre657@gmail.com>
-Ian Lee <IanLee1521@gmail.com>
-Ian Main <imain@redhat.com>
-Ian Philpot <ian.philpot@microsoft.com>
-Ian Truslove <ian.truslove@gmail.com>
-Iavael <iavaelooeyt@gmail.com>
-Icaro Seara <icaro.seara@gmail.com>
-Ignacio Capurro <icapurrofagian@gmail.com>
-Igor Dolzhikov <bluesriverz@gmail.com>
-Igor Karpovich <i.karpovich@currencysolutions.com>
-Iliana Weller <iweller@amazon.com>
-Ilkka Laukkanen <ilkka@ilkka.io>
-Ilya Dmitrichenko <errordeveloper@gmail.com>
-Ilya Gusev <mail@igusev.ru>
-Ilya Khlopotov <ilya.khlopotov@gmail.com>
-imre Fitos <imre.fitos+github@gmail.com>
-inglesp <peter.inglesby@gmail.com>
-Ingo Gottwald <in.gottwald@gmail.com>
-Innovimax <innovimax@gmail.com>
-Isaac Dupree <antispam@idupree.com>
-Isabel Jimenez <contact.isabeljimenez@gmail.com>
-Isaiah Grace <irgkenya4@gmail.com>
-Isao Jonas <isao.jonas@gmail.com>
-Iskander Sharipov <quasilyte@gmail.com>
-Ivan Babrou <ibobrik@gmail.com>
-Ivan Fraixedes <ifcdev@gmail.com>
-Ivan Grcic <igrcic@gmail.com>
-Ivan Markin <sw@nogoegst.net>
-J Bruni <joaohbruni@yahoo.com.br>
-J. Nunn <jbnunn@gmail.com>
-Jack Danger Canty <jackdanger@squareup.com>
-Jack Laxson <jackjrabbit@gmail.com>
-Jacob Atzen <jacob@jacobatzen.dk>
-Jacob Edelman <edelman.jd@gmail.com>
-Jacob Tomlinson <jacob@tom.linson.uk>
-Jacob Vallejo <jakeev@amazon.com>
-Jacob Wen <jian.w.wen@oracle.com>
-Jaime Cepeda <jcepedavillamayor@gmail.com>
-Jaivish Kothari <janonymous.codevulture@gmail.com>
-Jake Champlin <jake.champlin.27@gmail.com>
-Jake Moshenko <jake@devtable.com>
-Jake Sanders <jsand@google.com>
-jakedt <jake@devtable.com>
-James Allen <jamesallen0108@gmail.com>
-James Carey <jecarey@us.ibm.com>
-James Carr <james.r.carr@gmail.com>
-James DeFelice <james.defelice@ishisystems.com>
-James Harrison Fisher <jameshfisher@gmail.com>
-James Kyburz <james.kyburz@gmail.com>
-James Kyle <james@jameskyle.org>
-James Lal <james@lightsofapollo.com>
-James Mills <prologic@shortcircuit.net.au>
-James Nesbitt <jnesbitt@mirantis.com>
-James Nugent <james@jen20.com>
-James Turnbull <james@lovedthanlost.net>
-James Watkins-Harvey <jwatkins@progi-media.com>
-Jamie Hannaford <jamie@limetree.org>
-Jamshid Afshar <jafshar@yahoo.com>
-Jan Chren <dev.rindeal@gmail.com>
-Jan Keromnes <janx@linux.com>
-Jan Koprowski <jan.koprowski@gmail.com>
-Jan Pazdziora <jpazdziora@redhat.com>
-Jan Toebes <jan@toebes.info>
-Jan-Gerd Tenberge <janten@gmail.com>
-Jan-Jaap Driessen <janjaapdriessen@gmail.com>
-Jana Radhakrishnan <mrjana@docker.com>
-Jannick Fahlbusch <git@jf-projects.de>
-Januar Wayong <januar@gmail.com>
-Jared Biel <jared.biel@bolderthinking.com>
-Jared Hocutt <jaredh@netapp.com>
-Jaroslaw Zabiello <hipertracker@gmail.com>
-jaseg <jaseg@jaseg.net>
-Jasmine Hegman <jasmine@jhegman.com>
-Jason A. Donenfeld <Jason@zx2c4.com>
-Jason Divock <jdivock@gmail.com>
-Jason Giedymin <jasong@apache.org>
-Jason Green <Jason.Green@AverInformatics.Com>
-Jason Hall <imjasonh@gmail.com>
-Jason Heiss <jheiss@aput.net>
-Jason Livesay <ithkuil@gmail.com>
-Jason McVetta <jason.mcvetta@gmail.com>
-Jason Plum <jplum@devonit.com>
-Jason Shepherd <jason@jasonshepherd.net>
-Jason Smith <jasonrichardsmith@gmail.com>
-Jason Sommer <jsdirv@gmail.com>
-Jason Stangroome <jason@codeassassin.com>
-jaxgeller <jacksongeller@gmail.com>
-Jay <imjching@hotmail.com>
-Jay <teguhwpurwanto@gmail.com>
-Jay Kamat <github@jgkamat.33mail.com>
-Jean Rouge <rougej+github@gmail.com>
-Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
-Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
-Jean-Christophe Berthon <huygens@berthon.eu>
-Jean-Paul Calderone <exarkun@twistedmatrix.com>
-Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
-Jean-Tiare Le Bigot <jt@yadutaf.fr>
-Jeeva S. Chelladhurai <sjeeva@gmail.com>
-Jeff Anderson <jeff@docker.com>
-Jeff Hajewski <jeff.hajewski@gmail.com>
-Jeff Johnston <jeff.johnston.mn@gmail.com>
-Jeff Lindsay <progrium@gmail.com>
-Jeff Mickey <j@codemac.net>
-Jeff Minard <jeff@creditkarma.com>
-Jeff Nickoloff <jeff.nickoloff@gmail.com>
-Jeff Silberman <jsilberm@gmail.com>
-Jeff Welch <whatthejeff@gmail.com>
-Jeffrey Bolle <jeffreybolle@gmail.com>
-Jeffrey Morgan <jmorganca@gmail.com>
-Jeffrey van Gogh <jvg@google.com>
-Jenny Gebske <jennifer@gebske.de>
-Jeremy Chambers <jeremy@thehipbot.com>
-Jeremy Grosser <jeremy@synack.me>
-Jeremy Price <jprice.rhit@gmail.com>
-Jeremy Qian <vanpire110@163.com>
-Jeremy Unruh <jeremybunruh@gmail.com>
-Jeremy Yallop <yallop@docker.com>
-Jeroen Franse <jeroenfranse@gmail.com>
-Jeroen Jacobs <github@jeroenj.be>
-Jesse Dearing <jesse.dearing@gmail.com>
-Jesse Dubay <jesse@thefortytwo.net>
-Jessica Frazelle <jess@oxide.computer>
-Jezeniel Zapanta <jpzapanta22@gmail.com>
-Jhon Honce <jhonce@redhat.com>
-Ji.Zhilong <zhilongji@gmail.com>
-Jian Liao <jliao@alauda.io>
-Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
-Jiang Jinyang <jjyruby@gmail.com>
-Jie Luo <luo612@zju.edu.cn>
-Jie Ma <jienius@outlook.com>
-Jihyun Hwang <jhhwang@telcoware.com>
-Jilles Oldenbeuving <ojilles@gmail.com>
-Jim Alateras <jima@comware.com.au>
-Jim Ehrismann <jim.ehrismann@docker.com>
-Jim Galasyn <jim.galasyn@docker.com>
-Jim Minter <jminter@redhat.com>
-Jim Perrin <jperrin@centos.org>
-Jimmy Cuadra <jimmy@jimmycuadra.com>
-Jimmy Puckett <jimmy.puckett@spinen.com>
-Jimmy Song <rootsongjc@gmail.com>
-Jinsoo Park <cellpjs@gmail.com>
-Jintao Zhang <zhangjintao9020@gmail.com>
-Jiri Appl <jiria@microsoft.com>
-Jiri Popelka <jpopelka@redhat.com>
-Jiuyue Ma <majiuyue@huawei.com>
-Jiří Župka <jzupka@redhat.com>
-Joao Fernandes <joao.fernandes@docker.com>
-Joao Trindade <trindade.joao@gmail.com>
-Joe Beda <joe.github@bedafamily.com>
-Joe Doliner <jdoliner@pachyderm.io>
-Joe Ferguson <joe@infosiftr.com>
-Joe Gordon <joe.gordon0@gmail.com>
-Joe Shaw <joe@joeshaw.org>
-Joe Van Dyk <joe@tanga.com>
-Joel Friedly <joelfriedly@gmail.com>
-Joel Handwell <joelhandwell@gmail.com>
-Joel Hansson <joel.hansson@ecraft.com>
-Joel Wurtz <jwurtz@jolicode.com>
-Joey Geiger <jgeiger@gmail.com>
-Joey Geiger <jgeiger@users.noreply.github.com>
-Joey Gibson <joey@joeygibson.com>
-Joffrey F <joffrey@docker.com>
-Johan Euphrosine <proppy@google.com>
-Johan Rydberg <johan.rydberg@gmail.com>
-Johanan Lieberman <johanan.lieberman@gmail.com>
-Johannes 'fish' Ziemke <github@freigeist.org>
-John Costa <john.costa@gmail.com>
-John Feminella <jxf@jxf.me>
-John Gardiner Myers <jgmyers@proofpoint.com>
-John Gossman <johngos@microsoft.com>
-John Harris <john@johnharris.io>
-John Howard <github@lowenna.com>
-John Laswell <john.n.laswell@gmail.com>
-John Maguire <jmaguire@duosecurity.com>
-John Mulhausen <john@docker.com>
-John OBrien III <jobrieniii@yahoo.com>
-John Starks <jostarks@microsoft.com>
-John Stephens <johnstep@docker.com>
-John Tims <john.k.tims@gmail.com>
-John V. Martinez <jvmatl@gmail.com>
-John Warwick <jwarwick@gmail.com>
-John Willis <john.willis@docker.com>
-Jon Johnson <jonjohnson@google.com>
-Jon Surrell <jon.surrell@gmail.com>
-Jon Wedaman <jweede@gmail.com>
-Jonas Dohse <jonas@dohse.ch>
-Jonas Heinrich <Jonas@JonasHeinrich.com>
-Jonas Pfenniger <jonas@pfenniger.name>
-Jonathan A. Schweder <jonathanschweder@gmail.com>
-Jonathan A. Sternberg <jonathansternberg@gmail.com>
-Jonathan Boulle <jonathanboulle@gmail.com>
-Jonathan Camp <jonathan@irondojo.com>
-Jonathan Choy <jonathan.j.choy@gmail.com>
-Jonathan Dowland <jon+github@alcopop.org>
-Jonathan Lebon <jlebon@redhat.com>
-Jonathan Lomas <jonathan@floatinglomas.ca>
-Jonathan McCrohan <jmccrohan@gmail.com>
-Jonathan Mueller <j.mueller@apoveda.ch>
-Jonathan Pares <jonathanpa@users.noreply.github.com>
-Jonathan Rudenberg <jonathan@titanous.com>
-Jonathan Stoppani <jonathan.stoppani@divio.com>
-Jonh Wendell <jonh.wendell@redhat.com>
-Joni Sar <yoni@cocycles.com>
-Joost Cassee <joost@cassee.net>
-Jordan Arentsen <blissdev@gmail.com>
-Jordan Jennings <jjn2009@gmail.com>
-Jordan Sissel <jls@semicomplete.com>
-Jorge Marin <chipironcin@users.noreply.github.com>
-Jorit Kleine-Möllhoff <joppich@bricknet.de>
-Jose Diaz-Gonzalez <email@josediazgonzalez.com>
-Joseph Anthony Pasquale Holsten <joseph@josephholsten.com>
-Joseph Hager <ajhager@gmail.com>
-Joseph Kern <jkern@semafour.net>
-Joseph Rothrock <rothrock@rothrock.org>
-Josh <jokajak@gmail.com>
-Josh Bodah <jb3689@yahoo.com>
-Josh Bonczkowski <josh.bonczkowski@gmail.com>
-Josh Chorlton <jchorlton@gmail.com>
-Josh Eveleth <joshe@opendns.com>
-Josh Hawn <josh.hawn@docker.com>
-Josh Horwitz <horwitz@addthis.com>
-Josh Poimboeuf <jpoimboe@redhat.com>
-Josh Soref <jsoref@gmail.com>
-Josh Wilson <josh.wilson@fivestars.com>
-Josiah Kiehl <jkiehl@riotgames.com>
-José Tomás Albornoz <jojo@eljojo.net>
-Joyce Jang <mail@joycejang.com>
-JP <jpellerin@leapfrogonline.com>
-Julian Taylor <jtaylor.debian@googlemail.com>
-Julien Barbier <write0@gmail.com>
-Julien Bisconti <veggiemonk@users.noreply.github.com>
-Julien Bordellier <julienbordellier@gmail.com>
-Julien Dubois <julien.dubois@gmail.com>
-Julien Kassar <github@kassisol.com>
-Julien Maitrehenry <julien.maitrehenry@me.com>
-Julien Pervillé <julien.perville@perfect-memory.com>
-Julien Pivotto <roidelapluie@inuits.eu>
-Julio Guerra <julio@sqreen.com>
-Julio Montes <imc.coder@gmail.com>
-Jun-Ru Chang <jrjang@gmail.com>
-Jussi Nummelin <jussi.nummelin@gmail.com>
-Justas Brazauskas <brazauskasjustas@gmail.com>
-Justen Martin <jmart@the-coder.com>
-Justin Cormack <justin.cormack@docker.com>
-Justin Force <justin.force@gmail.com>
-Justin Menga <justin.menga@gmail.com>
-Justin Plock <jplock@users.noreply.github.com>
-Justin Simonelis <justin.p.simonelis@gmail.com>
-Justin Terry <juterry@microsoft.com>
-Justyn Temme <justyntemme@gmail.com>
-Jyrki Puttonen <jyrkiput@gmail.com>
-Jérémy Leherpeur <amenophis@leherpeur.net>
-Jérôme Petazzoni <jerome.petazzoni@docker.com>
-Jörg Thalheim <joerg@higgsboson.tk>
-K. Heller <pestophagous@gmail.com>
-Kai Blin <kai@samba.org>
-Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
-Kamil Domański <kamil@domanski.co>
-Kamjar Gerami <kami.gerami@gmail.com>
-Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
-Kara Alexandra <kalexandra@us.ibm.com>
-Karan Lyons <karan@karanlyons.com>
-Kareem Khazem <karkhaz@karkhaz.com>
-kargakis <kargakis@users.noreply.github.com>
-Karl Grzeszczak <karlgrz@gmail.com>
-Karol Duleba <mr.fuxi@gmail.com>
-Karthik Karanth <karanth.karthik@gmail.com>
-Karthik Nayak <karthik.188@gmail.com>
-Kasper Fabæch Brandt <poizan@poizan.dk>
-Kate Heddleston <kate.heddleston@gmail.com>
-Katie McLaughlin <katie@glasnt.com>
-Kato Kazuyoshi <kato.kazuyoshi@gmail.com>
-Katrina Owen <katrina.owen@gmail.com>
-Kawsar Saiyeed <kawsar.saiyeed@projiris.com>
-Kay Yan <kay.yan@daocloud.io>
-kayrus <kay.diam@gmail.com>
-Kazuhiro Sera <seratch@gmail.com>
-Ke Li <kel@splunk.com>
-Ke Xu <leonhartx.k@gmail.com>
-Kei Ohmura <ohmura.kei@gmail.com>
-Keith Hudgins <greenman@greenman.org>
-Keli Hu <dev@keli.hu>
-Ken Cochrane <kencochrane@gmail.com>
-Ken Herner <kherner@progress.com>
-Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
-Ken Reese <krrgithub@gmail.com>
-Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
-Kenjiro Nakayama <nakayamakenjiro@gmail.com>
-Kent Johnson <kentoj@gmail.com>
-Kenta Tada <Kenta.Tada@sony.com>
-Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
-Kevin Burke <kev@inburke.com>
-Kevin Clark <kevin.clark@gmail.com>
-Kevin Feyrer <kevin.feyrer@btinternet.com>
-Kevin J. Lynagh <kevin@keminglabs.com>
-Kevin Jing Qiu <kevin@idempotent.ca>
-Kevin Kern <kaiwentan@harmonycloud.cn>
-Kevin Menard <kevin@nirvdrum.com>
-Kevin Meredith <kevin.m.meredith@gmail.com>
-Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
-Kevin Parsons <kevpar@microsoft.com>
-Kevin Richardson <kevin@kevinrichardson.co>
-Kevin Shi <kshi@andrew.cmu.edu>
-Kevin Wallace <kevin@pentabarf.net>
-Kevin Yap <me@kevinyap.ca>
-Keyvan Fatehi <keyvanfatehi@gmail.com>
-kies <lleelm@gmail.com>
-Kim BKC Carlbacker <kim.carlbacker@gmail.com>
-Kim Eik <kim@heldig.org>
-Kimbro Staken <kstaken@kstaken.com>
-Kir Kolyshkin <kolyshkin@gmail.com>
-Kiran Gangadharan <kiran.daredevil@gmail.com>
-Kirill SIbirev <l0kix2@gmail.com>
-knappe <tyler.knappe@gmail.com>
-Kohei Tsuruta <coheyxyz@gmail.com>
-Koichi Shiraishi <k@zchee.io>
-Konrad Kleine <konrad.wilhelm.kleine@gmail.com>
-Konstantin Gribov <grossws@gmail.com>
-Konstantin L <sw.double@gmail.com>
-Konstantin Pelykh <kpelykh@zettaset.com>
-Krasi Georgiev <krasi@vip-consult.solutions>
-Krasimir Georgiev <support@vip-consult.co.uk>
-Kris-Mikael Krister <krismikael@protonmail.com>
-Kristian Haugene <kristian.haugene@capgemini.com>
-Kristina Zabunova <triara.xiii@gmail.com>
-Krystian Wojcicki <kwojcicki@sympatico.ca>
-Kun Zhang <zkazure@gmail.com>
-Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
-Kunal Tyagi <tyagi.kunal@live.com>
-Kyle Conroy <kyle.j.conroy@gmail.com>
-Kyle Linden <linden.kyle@gmail.com>
-Kyle Wuolle <kyle.wuolle@gmail.com>
-kyu <leehk1227@gmail.com>
-Lachlan Coote <lcoote@vmware.com>
-Lai Jiangshan <jiangshanlai@gmail.com>
-Lajos Papp <lajos.papp@sequenceiq.com>
-Lakshan Perera <lakshan@laktek.com>
-Lalatendu Mohanty <lmohanty@redhat.com>
-Lance Chen <cyen0312@gmail.com>
-Lance Kinley <lkinley@loyaltymethods.com>
-Lars Butler <Lars.Butler@gmail.com>
-Lars Kellogg-Stedman <lars@redhat.com>
-Lars R. Damerow <lars@pixar.com>
-Lars-Magnus Skog <ralphtheninja@riseup.net>
-Laszlo Meszaros <lacienator@gmail.com>
-Laura Frank <ljfrank@gmail.com>
-Laurent Erignoux <lerignoux@gmail.com>
-Laurie Voss <github@seldo.com>
-Leandro Siqueira <leandro.siqueira@gmail.com>
-Lee Chao <932819864@qq.com>
-Lee, Meng-Han <sunrisedm4@gmail.com>
-leeplay <hyeongkyu.lee@navercorp.com>
-Lei Gong <lgong@alauda.io>
-Lei Jitang <leijitang@huawei.com>
-Len Weincier <len@cloudafrica.net>
-Lennie <github@consolejunkie.net>
-Leo Gallucci <elgalu3@gmail.com>
-Leszek Kowalski <github@leszekkowalski.pl>
-Levi Blackstone <levi.blackstone@rackspace.com>
-Levi Gross <levi@levigross.com>
-Lewis Daly <lewisdaly@me.com>
-Lewis Marshall <lewis@lmars.net>
-Lewis Peckover <lew+github@lew.io>
-Li Yi <denverdino@gmail.com>
-Liam Macgillavry <liam@kumina.nl>
-Liana Lo <liana.lixia@gmail.com>
-Liang Mingqiang <mqliang.zju@gmail.com>
-Liang-Chi Hsieh <viirya@gmail.com>
-Liao Qingwei <liaoqingwei@huawei.com>
-Lifubang <lifubang@acmcoder.com>
-Lihua Tang <lhtang@alauda.io>
-Lily Guo <lily.guo@docker.com>
-limsy <seongyeol37@gmail.com>
-Lin Lu <doraalin@163.com>
-LingFaKe <lingfake@huawei.com>
-Linus Heckemann <lheckemann@twig-world.com>
-Liran Tal <liran.tal@gmail.com>
-Liron Levin <liron@twistlock.com>
-Liu Bo <bo.li.liu@oracle.com>
-Liu Hua <sdu.liu@huawei.com>
-liwenqi <vikilwq@zju.edu.cn>
-lixiaobing10051267 <li.xiaobing1@zte.com.cn>
-Liz Zhang <lizzha@microsoft.com>
-LIZAO LI <lzlarryli@gmail.com>
-Lizzie Dixon <_@lizzie.io>
-Lloyd Dewolf <foolswisdom@gmail.com>
-Lokesh Mandvekar <lsm5@fedoraproject.org>
-longliqiang88 <394564827@qq.com>
-Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
-Lorenzo Fontana <fontanalorenz@gmail.com>
-Lotus Fenn <fenn.lotus@gmail.com>
-Louis Delossantos <ldelossa.ld@gmail.com>
-Louis Opter <kalessin@kalessin.fr>
-Luca Favatella <luca.favatella@erlang-solutions.com>
-Luca Marturana <lucamarturana@gmail.com>
-Luca Orlandi <luca.orlandi@gmail.com>
-Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
-Lucas Chan <lucas-github@lucaschan.com>
-Lucas Chi <lucas@teacherspayteachers.com>
-Lucas Molas <lmolas@fundacionsadosky.org.ar>
-Lucas Silvestre <lukas.silvestre@gmail.com>
-Luciano Mores <leslau@gmail.com>
-Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
-Luiz Svoboda <luizek@gmail.com>
-Lukas Heeren <lukas-heeren@hotmail.com>
-Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
-lukaspustina <lukas.pustina@centerdevice.com>
-Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
-Luke Marsden <me@lukemarsden.net>
-Lyn <energylyn@zju.edu.cn>
-Lynda O'Leary <lyndaoleary29@gmail.com>
-Lénaïc Huard <lhuard@amadeus.com>
-Ma Müller <mueller-ma@users.noreply.github.com>
-Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
-Mabin <bin.ma@huawei.com>
-Madhan Raj Mookkandy <MadhanRaj.Mookkandy@microsoft.com>
-Madhav Puri <madhav.puri@gmail.com>
-Madhu Venugopal <madhu@socketplane.io>
-Mageee <fangpuyi@foxmail.com>
-Mahesh Tiyyagura <tmahesh@gmail.com>
-malnick <malnick@gmail..com>
-Malte Janduda <mail@janduda.net>
-Manfred Touron <m@42.am>
-Manfred Zabarauskas <manfredas@zabarauskas.com>
-Manjunath A Kumatagi <mkumatag@in.ibm.com>
-Mansi Nahar <mmn4185@rit.edu>
-Manuel Meurer <manuel@krautcomputing.com>
-Manuel Rüger <manuel@rueg.eu>
-Manuel Woelker <github@manuel.woelker.org>
-mapk0y <mapk0y@gmail.com>
-Marc Abramowitz <marc@marc-abramowitz.com>
-Marc Kuo <kuomarc2@gmail.com>
-Marc Tamsky <mtamsky@gmail.com>
-Marcel Edmund Franke <marcel.edmund.franke@gmail.com>
-Marcelo Horacio Fortino <info@fortinux.com>
-Marcelo Salazar <chelosalazar@gmail.com>
-Marco Hennings <marco.hennings@freiheit.com>
-Marcus Cobden <mcobden@cisco.com>
-Marcus Farkas <toothlessgear@finitebox.com>
-Marcus Linke <marcus.linke@gmx.de>
-Marcus Martins <marcus@docker.com>
-Marcus Ramberg <marcus@nordaaker.com>
-Marek Goldmann <marek.goldmann@gmail.com>
-Marian Marinov <mm@yuhu.biz>
-Marianna Tessel <mtesselh@gmail.com>
-Mario Loriedo <mario.loriedo@gmail.com>
-Marius Gundersen <me@mariusgundersen.net>
-Marius Sturm <marius@graylog.com>
-Marius Voila <marius.voila@gmail.com>
-Mark Allen <mrallen1@yahoo.com>
-Mark Jeromin <mark.jeromin@sysfrog.net>
-Mark McGranaghan <mmcgrana@gmail.com>
-Mark McKinstry <mmckinst@umich.edu>
-Mark Milstein <mark@epiloque.com>
-Mark Oates <fl0yd@me.com>
-Mark Parker <godefroi@users.noreply.github.com>
-Mark West <markewest@gmail.com>
-Markan Patel <mpatel678@gmail.com>
-Marko Mikulicic <mmikulicic@gmail.com>
-Marko Tibold <marko@tibold.nl>
-Markus Fix <lispmeister@gmail.com>
-Markus Kortlang <hyp3rdino@googlemail.com>
-Martijn Dwars <ikben@martijndwars.nl>
-Martijn van Oosterhout <kleptog@svana.org>
-Martin Honermeyer <maze@strahlungsfrei.de>
-Martin Kelly <martin@surround.io>
-Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
-Martin Muzatko <martin@happy-css.com>
-Martin Redmond <redmond.martin@gmail.com>
-Mary Anthony <mary.anthony@docker.com>
-Masahito Zembutsu <zembutsu@users.noreply.github.com>
-Masato Ohba <over.rye@gmail.com>
-Masayuki Morita <minamijoyo@gmail.com>
-Mason Malone <mason.malone@gmail.com>
-Mateusz Sulima <sulima.mateusz@gmail.com>
-Mathias Monnerville <mathias@monnerville.com>
-Mathieu Champlon <mathieu.champlon@docker.com>
-Mathieu Le Marec - Pasquet <kiorky@cryptelium.net>
-Mathieu Parent <math.parent@gmail.com>
-Matt Apperson <me@mattapperson.com>
-Matt Bachmann <bachmann.matt@gmail.com>
-Matt Bentley <matt.bentley@docker.com>
-Matt Haggard <haggardii@gmail.com>
-Matt Hoyle <matt@deployable.co>
-Matt McCormick <matt.mccormick@kitware.com>
-Matt Moore <mattmoor@google.com>
-Matt Richardson <matt@redgumtech.com.au>
-Matt Rickard <mrick@google.com>
-Matt Robenolt <matt@ydekproductions.com>
-Matt Schurenko <matt.schurenko@gmail.com>
-Matt Williams <mattyw@me.com>
-Matthew Heon <mheon@redhat.com>
-Matthew Lapworth <matthewl@bit-shift.net>
-Matthew Mayer <matthewkmayer@gmail.com>
-Matthew Mosesohn <raytrac3r@gmail.com>
-Matthew Mueller <mattmuelle@gmail.com>
-Matthew Riley <mattdr@google.com>
-Matthias Klumpp <matthias@tenstral.net>
-Matthias Kühnle <git.nivoc@neverbox.com>
-Matthias Rampke <mr@soundcloud.com>
-Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
-Mattias Jernberg <nostrad@gmail.com>
-Mauricio Garavaglia <mauricio@medallia.com>
-mauriyouth <mauriyouth@gmail.com>
-Max Harmathy <max.harmathy@web.de>
-Max Shytikov <mshytikov@gmail.com>
-Maxim Fedchyshyn <sevmax@gmail.com>
-Maxim Ivanov <ivanov.maxim@gmail.com>
-Maxim Kulkin <mkulkin@mirantis.com>
-Maxim Treskin <zerthurd@gmail.com>
-Maxime Petazzoni <max@signalfuse.com>
-Maximiliano Maccanti <maccanti@amazon.com>
-Maxwell <csuhp007@gmail.com>
-Meaglith Ma <genedna@gmail.com>
-meejah <meejah@meejah.ca>
-Megan Kostick <mkostick@us.ibm.com>
-Mehul Kar <mehul.kar@gmail.com>
-Mei ChunTao <mei.chuntao@zte.com.cn>
-Mengdi Gao <usrgdd@gmail.com>
-Mert Yazıcıoğlu <merty@users.noreply.github.com>
-mgniu <mgniu@dataman-inc.com>
-Micah Zoltu <micah@newrelic.com>
-Michael A. Smith <michael@smith-li.com>
-Michael Bridgen <mikeb@squaremobius.net>
-Michael Brown <michael@netdirect.ca>
-Michael Chiang <mchiang@docker.com>
-Michael Crosby <michael@docker.com>
-Michael Currie <mcurrie@bruceforceresearch.com>
-Michael Friis <friism@gmail.com>
-Michael Gorsuch <gorsuch@github.com>
-Michael Grauer <michael.grauer@kitware.com>
-Michael Holzheu <holzheu@linux.vnet.ibm.com>
-Michael Hudson-Doyle <michael.hudson@canonical.com>
-Michael Huettermann <michael@huettermann.net>
-Michael Irwin <mikesir87@gmail.com>
-Michael Käufl <docker@c.michael-kaeufl.de>
-Michael Neale <michael.neale@gmail.com>
-Michael Nussbaum <michael.nussbaum@getbraintree.com>
-Michael Prokop <github@michael-prokop.at>
-Michael Scharf <github@scharf.gr>
-Michael Spetsiotis <michael_spets@hotmail.com>
-Michael Stapelberg <michael+gh@stapelberg.de>
-Michael Steinert <mike.steinert@gmail.com>
-Michael Thies <michaelthies78@gmail.com>
-Michael West <mwest@mdsol.com>
-Michael Zhao <michael.zhao@arm.com>
-Michal Fojtik <mfojtik@redhat.com>
-Michal Gebauer <mishak@mishak.net>
-Michal Jemala <michal.jemala@gmail.com>
-Michal Minář <miminar@redhat.com>
-Michal Wieczorek <wieczorek-michal@wp.pl>
-Michaël Pailloncy <mpapo.dev@gmail.com>
-Michał Czeraszkiewicz <czerasz@gmail.com>
-Michał Gryko <github@odkurzacz.org>
-Michiel de Jong <michiel@unhosted.org>
-Mickaël Fortunato <morsi.morsicus@gmail.com>
-Mickaël Remars <mickael@remars.com>
-Miguel Angel Fernández <elmendalerenda@gmail.com>
-Miguel Morales <mimoralea@gmail.com>
-Mihai Borobocea <MihaiBorob@gmail.com>
-Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
-Mike Brown <brownwm@us.ibm.com>
-Mike Bush <mpbush@gmail.com>
-Mike Casas <mkcsas0@gmail.com>
-Mike Chelen <michael.chelen@gmail.com>
-Mike Danese <mikedanese@google.com>
-Mike Dillon <mike@embody.org>
-Mike Dougherty <mike.dougherty@docker.com>
-Mike Estes <mike.estes@logos.com>
-Mike Gaffney <mike@uberu.com>
-Mike Goelzer <mike.goelzer@docker.com>
-Mike Leone <mleone896@gmail.com>
-Mike Lundy <mike@fluffypenguin.org>
-Mike MacCana <mike.maccana@gmail.com>
-Mike Naberezny <mike@naberezny.com>
-Mike Snitzer <snitzer@redhat.com>
-mikelinjie <294893458@qq.com>
-Mikhail Sobolev <mss@mawhrin.net>
-Miklos Szegedi <miklos.szegedi@cloudera.com>
-Milind Chawre <milindchawre@gmail.com>
-Miloslav Trmač <mitr@redhat.com>
-mingqing <limingqing@cyou-inc.com>
-Mingzhen Feng <fmzhen@zju.edu.cn>
-Misty Stanley-Jones <misty@docker.com>
-Mitch Capper <mitch.capper@gmail.com>
-Mizuki Urushida <z11111001011@gmail.com>
-mlarcher <github@ringabell.org>
-Mohammad Banikazemi <mb@us.ibm.com>
-Mohammad Nasirifar <farnasirim@gmail.com>
-Mohammed Aaqib Ansari <maaquib@gmail.com>
-Mohit Soni <mosoni@ebay.com>
-Moorthy RS <rsmoorthy@gmail.com>
-Morgan Bauer <mbauer@us.ibm.com>
-Morgante Pell <morgante.pell@morgante.net>
-Morgy93 <thomas@ulfertsprygoda.de>
-Morten Siebuhr <sbhr@sbhr.dk>
-Morton Fox <github@qslw.com>
-Moysés Borges <moysesb@gmail.com>
-mrfly <mr.wrfly@gmail.com>
-Mrunal Patel <mrunalp@gmail.com>
-Muayyad Alsadi <alsadi@gmail.com>
-Mustafa Akın <mustafa91@gmail.com>
-Muthukumar R <muthur@gmail.com>
-Máximo Cuadros <mcuadros@gmail.com>
-Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
-Nace Oroz <orkica@gmail.com>
-Nahum Shalman <nshalman@omniti.com>
-Nakul Pathak <nakulpathak3@hotmail.com>
-Nalin Dahyabhai <nalin@redhat.com>
-Nan Monnand Deng <monnand@gmail.com>
-Naoki Orii <norii@cs.cmu.edu>
-Natalie Parker <nparker@omnifone.com>
-Natanael Copa <natanael.copa@docker.com>
-Natasha Jarus <linuxmercedes@gmail.com>
-Nate Brennand <nate.brennand@clever.com>
-Nate Eagleson <nate@nateeag.com>
-Nate Jones <nate@endot.org>
-Nathan Hsieh <hsieh.nathan@gmail.com>
-Nathan Kleyn <nathan@nathankleyn.com>
-Nathan LeClaire <nathan.leclaire@docker.com>
-Nathan McCauley <nathan.mccauley@docker.com>
-Nathan Williams <nathan@teamtreehouse.com>
-Naveed Jamil <naveed.jamil@tenpearls.com>
-Neal McBurnett <neal@mcburnett.org>
-Neil Horman <nhorman@tuxdriver.com>
-Neil Peterson <neilpeterson@outlook.com>
-Nelson Chen <crazysim@gmail.com>
-Neyazul Haque <nuhaque@gmail.com>
-Nghia Tran <nghia@google.com>
-Niall O'Higgins <niallo@unworkable.org>
-Nicholas E. Rabenau <nerab@gmx.at>
-Nick Adcock <nick.adcock@docker.com>
-Nick DeCoursin <n.decoursin@foodpanda.com>
-Nick Irvine <nfirvine@nfirvine.com>
-Nick Neisen <nwneisen@gmail.com>
-Nick Parker <nikaios@gmail.com>
-Nick Payne <nick@kurai.co.uk>
-Nick Russo <nicholasjamesrusso@gmail.com>
-Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
-Nick Stinemates <nick@stinemates.org>
-NickrenREN <yuquan.ren@easystack.cn>
-Nicola Kabar <nicolaka@gmail.com>
-Nicolas Borboën <ponsfrilus@gmail.com>
-Nicolas De Loof <nicolas.deloof@gmail.com>
-Nicolas Dudebout <nicolas.dudebout@gatech.edu>
-Nicolas Goy <kuon@goyman.com>
-Nicolas Kaiser <nikai@nikai.net>
-Nicolas Sterchele <sterchele.nicolas@gmail.com>
-Nicolas V Castet <nvcastet@us.ibm.com>
-Nicolás Hock Isaza <nhocki@gmail.com>
-Nigel Poulton <nigelpoulton@hotmail.com>
-Nik Nyby <nikolas@gnu.org>
-Nikhil Chawla <chawlanikhil24@gmail.com>
-NikolaMandic <mn080202@gmail.com>
-Nikolas Garofil <nikolas.garofil@uantwerpen.be>
-Nikolay Edigaryev <edigaryev@gmail.com>
-Nikolay Milovanov <nmil@itransformers.net>
-Nirmal Mehta <nirmalkmehta@gmail.com>
-Nishant Totla <nishanttotla@gmail.com>
-NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
-Noah Meyerhans <nmeyerha@amazon.com>
-Noah Treuhaft <noah.treuhaft@docker.com>
-NobodyOnSE <ich@sektor.selfip.com>
-noducks <onemannoducks@gmail.com>
-Nolan Darilek <nolan@thewordnerd.info>
-Noriki Nakamura <noriki.nakamura@miraclelinux.com>
-nponeccop <andy.melnikov@gmail.com>
-Nuutti Kotivuori <naked@iki.fi>
-nzwsch <hi@nzwsch.com>
-O.S. Tezer <ostezer@gmail.com>
-objectified <objectified@gmail.com>
-Odin Ugedal <odin@ugedal.com>
-Oguz Bilgic <fisyonet@gmail.com>
-Oh Jinkyun <tintypemolly@gmail.com>
-Ohad Schneider <ohadschn@users.noreply.github.com>
-ohmystack <jun.jiang02@ele.me>
-Ole Reifschneider <mail@ole-reifschneider.de>
-Oliver Neal <ItsVeryWindy@users.noreply.github.com>
-Oliver Reason <oli@overrateddev.co>
-Olivier Gambier <dmp42@users.noreply.github.com>
-Olle Jonsson <olle.jonsson@gmail.com>
-Olli Janatuinen <olli.janatuinen@gmail.com>
-Olly Pomeroy <oppomeroy@gmail.com>
-Omri Shiv <Omri.Shiv@teradata.com>
-Oriol Francès <oriolfa@gmail.com>
-Oskar Niburski <oskarniburski@gmail.com>
-Otto Kekäläinen <otto@seravo.fi>
-Ouyang Liduo <oyld0210@163.com>
-Ovidio Mallo <ovidio.mallo@gmail.com>
-Panagiotis Moustafellos <pmoust@elastic.co>
-Paolo G. Giarrusso <p.giarrusso@gmail.com>
-Pascal <pascalgn@users.noreply.github.com>
-Pascal Bach <pascal.bach@siemens.com>
-Pascal Borreli <pascal@borreli.com>
-Pascal Hartig <phartig@rdrei.net>
-Patrick Böänziger <patrick.baenziger@bsi-software.com>
-Patrick Devine <patrick.devine@docker.com>
-Patrick Hemmer <patrick.hemmer@gmail.com>
-Patrick Stapleton <github@gdi2290.com>
-Patrik Cyvoct <patrik@ptrk.io>
-pattichen <craftsbear@gmail.com>
-Paul <paul9869@gmail.com>
-paul <paul@inkling.com>
-Paul Annesley <paul@annesley.cc>
-Paul Bellamy <paul.a.bellamy@gmail.com>
-Paul Bowsher <pbowsher@globalpersonals.co.uk>
-Paul Furtado <pfurtado@hubspot.com>
-Paul Hammond <paul@paulhammond.org>
-Paul Jimenez <pj@place.org>
-Paul Kehrer <paul.l.kehrer@gmail.com>
-Paul Lietar <paul@lietar.net>
-Paul Liljenberg <liljenberg.paul@gmail.com>
-Paul Morie <pmorie@gmail.com>
-Paul Nasrat <pnasrat@gmail.com>
-Paul Weaver <pauweave@cisco.com>
-Paulo Ribeiro <paigr.io@gmail.com>
-Pavel Lobashov <ShockwaveNN@gmail.com>
-Pavel Matěja <pavel@verotel.cz>
-Pavel Pletenev <cpp.create@gmail.com>
-Pavel Pospisil <pospispa@gmail.com>
-Pavel Sutyrin <pavel.sutyrin@gmail.com>
-Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
-Pavlos Ratis <dastergon@gentoo.org>
-Pavol Vargovcik <pallly.vargovcik@gmail.com>
-Pawel Konczalski <mail@konczalski.de>
-Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
-Peggy Li <peggyli.224@gmail.com>
-Pei Su <sillyousu@gmail.com>
-Peng Tao <bergwolf@gmail.com>
-Penghan Wang <ph.wang@daocloud.io>
-Per Weijnitz <per.weijnitz@gmail.com>
-perhapszzy@sina.com <perhapszzy@sina.com>
-Peter Bourgon <peter@bourgon.org>
-Peter Braden <peterbraden@peterbraden.co.uk>
-Peter Bücker <peter.buecker@pressrelations.de>
-Peter Choi <phkchoi89@gmail.com>
-Peter Dave Hello <hsu@peterdavehello.org>
-Peter Edge <peter.edge@gmail.com>
-Peter Ericson <pdericson@gmail.com>
-Peter Esbensen <pkesbensen@gmail.com>
-Peter Jaffe <pjaffe@nevo.com>
-Peter Kang <peter@spell.run>
-Peter Malmgren <ptmalmgren@gmail.com>
-Peter Salvatore <peter@psftw.com>
-Peter Volpe <petervo@redhat.com>
-Peter Waller <p@pwaller.net>
-Petr Švihlík <svihlik.petr@gmail.com>
-Phil <underscorephil@gmail.com>
-Phil Estes <estesp@linux.vnet.ibm.com>
-Phil Spitler <pspitler@gmail.com>
-Philip Alexander Etling <paetling@gmail.com>
-Philip Monroe <phil@philmonroe.com>
-Philipp Gillé <philipp.gille@gmail.com>
-Philipp Wahala <philipp.wahala@gmail.com>
-Philipp Weissensteiner <mail@philippweissensteiner.com>
-Phillip Alexander <git@phillipalexander.io>
-phineas <phin@phineas.io>
-pidster <pid@pidster.com>
-Piergiuliano Bossi <pgbossi@gmail.com>
-Pierre <py@poujade.org>
-Pierre Carrier <pierre@meteor.com>
-Pierre Dal-Pra <dalpra.pierre@gmail.com>
-Pierre Wacrenier <pierre.wacrenier@gmail.com>
-Pierre-Alain RIVIERE <pariviere@ippon.fr>
-Piotr Bogdan <ppbogdan@gmail.com>
-pixelistik <pixelistik@users.noreply.github.com>
-Porjo <porjo38@yahoo.com.au>
-Poul Kjeldager Sørensen <pks@s-innovations.net>
-Pradeep Chhetri <pradeep@indix.com>
-Pradip Dhara <pradipd@microsoft.com>
-Prasanna Gautam <prasannagautam@gmail.com>
-Pratik Karki <prertik@outlook.com>
-Prayag Verma <prayag.verma@gmail.com>
-Priya Wadhwa <priyawadhwa@google.com>
-Projjol Banerji <probaner23@gmail.com>
-Przemek Hejman <przemyslaw.hejman@gmail.com>
-Pure White <daniel48@126.com>
-pysqz <randomq@126.com>
-Qiang Huang <h.huangqiang@huawei.com>
-Qinglan Peng <qinglanpeng@zju.edu.cn>
-qudongfang <qudongfang@gmail.com>
-Quentin Brossard <qbrossard@gmail.com>
-Quentin Perez <qperez@ocs.online.net>
-Quentin Tayssier <qtayssier@gmail.com>
-r0n22 <cameron.regan@gmail.com>
-Radostin Stoyanov <rstoyanov1@gmail.com>
-Rafal Jeczalik <rjeczalik@gmail.com>
-Rafe Colton <rafael.colton@gmail.com>
-Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
-Raghuram Devarakonda <draghuram@gmail.com>
-Raja Sami <raja.sami@tenpearls.com>
-Rajat Pandit <rp@rajatpandit.com>
-Rajdeep Dua <dua_rajdeep@yahoo.com>
-Ralf Sippl <ralf.sippl@gmail.com>
-Ralle <spam@rasmusa.net>
-Ralph Bean <rbean@redhat.com>
-Ramkumar Ramachandra <artagnon@gmail.com>
-Ramon Brooker <rbrooker@aetherealmind.com>
-Ramon van Alteren <ramon@vanalteren.nl>
-RaviTeja Pothana <ravi-teja@live.com>
-Ray Tsang <rayt@google.com>
-ReadmeCritic <frankensteinbot@gmail.com>
-Recursive Madman <recursive.madman@gmx.de>
-Reficul <xuzhenglun@gmail.com>
-Regan McCooey <rmccooey27@aol.com>
-Remi Rampin <remirampin@gmail.com>
-Remy Suen <remy.suen@gmail.com>
-Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
-Renaud Gaubert <rgaubert@nvidia.com>
-Rhys Hiltner <rhys@twitch.tv>
-Ri Xu <xuri.me@gmail.com>
-Ricardo N Feliciano <FelicianoTech@gmail.com>
-Rich Moyse <rich@moyse.us>
-Rich Seymour <rseymour@gmail.com>
-Richard <richard.scothern@gmail.com>
-Richard Burnison <rburnison@ebay.com>
-Richard Harvey <richard@squarecows.com>
-Richard Mathie <richard.mathie@amey.co.uk>
-Richard Metzler <richard@paadee.com>
-Richard Scothern <richard.scothern@gmail.com>
-Richo Healey <richo@psych0tik.net>
-Rick Bradley <rick@users.noreply.github.com>
-Rick van de Loo <rickvandeloo@gmail.com>
-Rick Wieman <git@rickw.nl>
-Rik Nijessen <rik@keefo.nl>
-Riku Voipio <riku.voipio@linaro.org>
-Riley Guerin <rileytg.dev@gmail.com>
-Ritesh H Shukla <sritesh@vmware.com>
-Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
-Rob Gulewich <rgulewich@netflix.com>
-Rob Vesse <rvesse@dotnetrdf.org>
-Robert Bachmann <rb@robertbachmann.at>
-Robert Bittle <guywithnose@gmail.com>
-Robert Obryk <robryk@gmail.com>
-Robert Schneider <mail@shakeme.info>
-Robert Stern <lexandro2000@gmail.com>
-Robert Terhaar <rterhaar@atlanticdynamic.com>
-Robert Wallis <smilingrob@gmail.com>
-Robert Wang <robert@arctic.tw>
-Roberto G. Hashioka <roberto.hashioka@docker.com>
-Roberto Muñoz Fernández <robertomf@gmail.com>
-Robin Naundorf <r.naundorf@fh-muenster.de>
-Robin Schneider <ypid@riseup.net>
-Robin Speekenbrink <robin@kingsquare.nl>
-Robin Thoni <robin@rthoni.com>
-robpc <rpcann@gmail.com>
-Rodolfo Carvalho <rhcarvalho@gmail.com>
-Rodrigo Vaz <rodrigo.vaz@gmail.com>
-Roel Van Nyen <roel.vannyen@gmail.com>
-Roger Peppe <rogpeppe@gmail.com>
-Rohit Jnagal <jnagal@google.com>
-Rohit Kadam <rohit.d.kadam@gmail.com>
-Rohit Kapur <rkapur@flatiron.com>
-Rojin George <rojingeorge@huawei.com>
-Roland Huß <roland@jolokia.org>
-Roland Kammerer <roland.kammerer@linbit.com>
-Roland Moriz <rmoriz@users.noreply.github.com>
-Roma Sokolov <sokolov.r.v@gmail.com>
-Roman Dudin <katrmr@gmail.com>
-Roman Mazur <roman@balena.io>
-Roman Strashkin <roman.strashkin@gmail.com>
-Ron Smits <ron.smits@gmail.com>
-Ron Williams <ron.a.williams@gmail.com>
-Rong Gao <gaoronggood@163.com>
-Rong Zhang <rongzhang@alauda.io>
-Rongxiang Song <tinysong1226@gmail.com>
-root <docker-dummy@example.com>
-root <root@lxdebmas.marist.edu>
-root <root@ubuntu-14.04-amd64-vbox>
-root <root@webm215.cluster016.ha.ovh.net>
-Rory Hunter <roryhunter2@gmail.com>
-Rory McCune <raesene@gmail.com>
-Ross Boucher <rboucher@gmail.com>
-Rovanion Luckey <rovanion.luckey@gmail.com>
-Royce Remer <royceremer@gmail.com>
-Rozhnov Alexandr <nox73@ya.ru>
-Rudolph Gottesheim <r.gottesheim@loot.at>
-Rui Cao <ruicao@alauda.io>
-Rui Lopes <rgl@ruilopes.com>
-Ruilin Li <liruilin4@huawei.com>
-Runshen Zhu <runshen.zhu@gmail.com>
-Russ Magee <rmagee@gmail.com>
-Ryan Abrams <rdabrams@gmail.com>
-Ryan Anderson <anderson.ryanc@gmail.com>
-Ryan Aslett <github@mixologic.com>
-Ryan Belgrave <rmb1993@gmail.com>
-Ryan Detzel <ryan.detzel@gmail.com>
-Ryan Fowler <rwfowler@gmail.com>
-Ryan Liu <ryanlyy@me.com>
-Ryan McLaughlin <rmclaughlin@insidesales.com>
-Ryan O'Donnell <odonnellryanc@gmail.com>
-Ryan Seto <ryanseto@yak.net>
-Ryan Simmen <ryan.simmen@gmail.com>
-Ryan Stelly <ryan.stelly@live.com>
-Ryan Thomas <rthomas@atlassian.com>
-Ryan Trauntvein <rtrauntvein@novacoast.com>
-Ryan Wallner <ryan.wallner@clusterhq.com>
-Ryan Zhang <ryan.zhang@docker.com>
-ryancooper7 <ryan.cooper7@gmail.com>
-RyanDeng <sheldon.d1018@gmail.com>
-Ryo Nakao <nakabonne@gmail.com>
-Rémy Greinhofer <remy.greinhofer@livelovely.com>
-s. rannou <mxs@sbrk.org>
-s00318865 <sunyuan3@huawei.com>
-Sabin Basyal <sabin.basyal@gmail.com>
-Sachin Joshi <sachin_jayant_joshi@hotmail.com>
-Sagar Hani <sagarhani33@gmail.com>
-Sainath Grandhi <sainath.grandhi@intel.com>
-Sakeven Jiang <jc5930@sina.cn>
-Salahuddin Khan <salah@docker.com>
-Sally O'Malley <somalley@redhat.com>
-Sam Abed <sam.abed@gmail.com>
-Sam Alba <sam.alba@gmail.com>
-Sam Bailey <cyprix@cyprix.com.au>
-Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
-Sam Neirinck <sam@samneirinck.com>
-Sam Reis <sreis@atlassian.com>
-Sam Rijs <srijs@airpost.net>
-Sam Whited <sam@samwhited.com>
-Sambuddha Basu <sambuddhabasu1@gmail.com>
-Sami Wagiaalla <swagiaal@redhat.com>
-Samuel Andaya <samuel@andaya.net>
-Samuel Dion-Girardeau <samuel.diongirardeau@gmail.com>
-Samuel Karp <skarp@amazon.com>
-Samuel PHAN <samuel-phan@users.noreply.github.com>
-Sandeep Bansal <sabansal@microsoft.com>
-Sankar சங்கர் <sankar.curiosity@gmail.com>
-Sanket Saurav <sanketsaurav@gmail.com>
-Santhosh Manohar <santhosh@docker.com>
-sapphiredev <se.imas.kr@gmail.com>
-Sargun Dhillon <sargun@netflix.com>
-Sascha Andres <sascha.andres@outlook.com>
-Sascha Grunert <sgrunert@suse.com>
-SataQiu <qiushida@beyondcent.com>
-Satnam Singh <satnam@raintown.org>
-Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
-Satoshi Tagomori <tagomoris@gmail.com>
-Scott Bessler <scottbessler@gmail.com>
-Scott Collier <emailscottcollier@gmail.com>
-Scott Johnston <scott@docker.com>
-Scott Stamp <scottstamp851@gmail.com>
-Scott Walls <sawalls@umich.edu>
-sdreyesg <sdreyesg@gmail.com>
-Sean Christopherson <sean.j.christopherson@intel.com>
-Sean Cronin <seancron@gmail.com>
-Sean Lee <seanlee@tw.ibm.com>
-Sean McIntyre <s.mcintyre@xverba.ca>
-Sean OMeara <sean@chef.io>
-Sean P. Kane <skane@newrelic.com>
-Sean Rodman <srodman7689@gmail.com>
-Sebastiaan van Steenis <mail@superseb.nl>
-Sebastiaan van Stijn <github@gone.nl>
-Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
-Senthil Kumaran <senthil@uthcode.com>
-SeongJae Park <sj38.park@gmail.com>
-Seongyeol Lim <seongyeol37@gmail.com>
-Serge Hallyn <serge.hallyn@ubuntu.com>
-Sergey Alekseev <sergey.alekseev.minsk@gmail.com>
-Sergey Evstifeev <sergey.evstifeev@gmail.com>
-Sergii Kabashniuk <skabashnyuk@codenvy.com>
-Sergio Lopez <slp@redhat.com>
-Serhat Gülçiçek <serhat25@gmail.com>
-SeungUkLee <lsy931106@gmail.com>
-Sevki Hasirci <s@sevki.org>
-Shane Canon <scanon@lbl.gov>
-Shane da Silva <shane@dasilva.io>
-Shaun Kaasten <shaunk@gmail.com>
-shaunol <shaunol@gmail.com>
-Shawn Landden <shawn@churchofgit.com>
-Shawn Siefkas <shawn.siefkas@meredith.com>
-shawnhe <shawnhe@shawnhedeMacBook-Pro.local>
-Shayne Wang <shaynexwang@gmail.com>
-Shekhar Gulati <shekhargulati84@gmail.com>
-Sheng Yang <sheng@yasker.org>
-Shengbo Song <thomassong@tencent.com>
-Shev Yan <yandong_8212@163.com>
-Shih-Yuan Lee <fourdollars@gmail.com>
-Shijiang Wei <mountkin@gmail.com>
-Shijun Qin <qinshijun16@mails.ucas.ac.cn>
-Shishir Mahajan <shishir.mahajan@redhat.com>
-Shoubhik Bose <sbose78@gmail.com>
-Shourya Sarcar <shourya.sarcar@gmail.com>
-Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
-shuai-z <zs.broccoli@gmail.com>
-Shukui Yang <yangshukui@huawei.com>
-Shuwei Hao <haosw@cn.ibm.com>
-Sian Lerk Lau <kiawin@gmail.com>
-Sidhartha Mani <sidharthamn@gmail.com>
-sidharthamani <sid@rancher.com>
-Silas Sewell <silas@sewell.org>
-Silvan Jegen <s.jegen@gmail.com>
-Simão Reis <smnrsti@gmail.com>
-Simei He <hesimei@zju.edu.cn>
-Simon Barendse <simon.barendse@gmail.com>
-Simon Eskildsen <sirup@sirupsen.com>
-Simon Ferquel <simon.ferquel@docker.com>
-Simon Leinen <simon.leinen@gmail.com>
-Simon Menke <simon.menke@gmail.com>
-Simon Taranto <simon.taranto@gmail.com>
-Simon Vikstrom <pullreq@devsn.se>
-Sindhu S <sindhus@live.in>
-Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
-skanehira <sho19921005@gmail.com>
-Solganik Alexander <solganik@gmail.com>
-Solomon Hykes <solomon@docker.com>
-Song Gao <song@gao.io>
-Soshi Katsuta <soshi.katsuta@gmail.com>
-Soulou <leo@unbekandt.eu>
-Spencer Brown <spencer@spencerbrown.org>
-Spencer Smith <robertspencersmith@gmail.com>
-Sridatta Thatipamala <sthatipamala@gmail.com>
-Sridhar Ratnakumar <sridharr@activestate.com>
-Srini Brahmaroutu <srbrahma@us.ibm.com>
-Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
-Staf Wagemakers <staf@wagemakers.be>
-Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
-Stanislav Levin <slev@altlinux.org>
-Steeve Morin <steeve.morin@gmail.com>
-Stefan Berger <stefanb@linux.vnet.ibm.com>
-Stefan J. Wernli <swernli@microsoft.com>
-Stefan Praszalowicz <stefan@greplin.com>
-Stefan S. <tronicum@user.github.com>
-Stefan Scherer <stefan.scherer@docker.com>
-Stefan Staudenmeyer <doerte@instana.com>
-Stefan Weil <sw@weilnetz.de>
-Stephan Spindler <shutefan@gmail.com>
-Stephen Benjamin <stephen@redhat.com>
-Stephen Crosby <stevecrozz@gmail.com>
-Stephen Day <stevvooe@gmail.com>
-Stephen Drake <stephen@xenolith.net>
-Stephen Rust <srust@blockbridge.com>
-Steve Desmond <steve@vtsv.ca>
-Steve Dougherty <steve@asksteved.com>
-Steve Durrheimer <s.durrheimer@gmail.com>
-Steve Francia <steve.francia@gmail.com>
-Steve Koch <stevekochscience@gmail.com>
-Steven Burgess <steven.a.burgess@hotmail.com>
-Steven Erenst <stevenerenst@gmail.com>
-Steven Hartland <steven.hartland@multiplay.co.uk>
-Steven Iveson <sjiveson@outlook.com>
-Steven Merrill <steven.merrill@gmail.com>
-Steven Richards <steven@axiomzen.co>
-Steven Taylor <steven.taylor@me.com>
-Stig Larsson <stig@larsson.dev>
-Subhajit Ghosh <isubuz.g@gmail.com>
-Sujith Haridasan <sujith.h@gmail.com>
-Sun Gengze <690388648@qq.com>
-Sun Jianbo <wonderflow.sun@gmail.com>
-Sune Keller <sune.keller@gmail.com>
-Sunny Gogoi <indiasuny000@gmail.com>
-Suryakumar Sudar <surya.trunks@gmail.com>
-Sven Dowideit <SvenDowideit@home.org.au>
-Swapnil Daingade <swapnil.daingade@gmail.com>
-Sylvain Baubeau <sbaubeau@redhat.com>
-Sylvain Bellemare <sylvain@ascribe.io>
-Sébastien <sebastien@yoozio.com>
-Sébastien HOUZÉ <cto@verylastroom.com>
-Sébastien Luttringer <seblu@seblu.net>
-Sébastien Stormacq <sebsto@users.noreply.github.com>
-Tabakhase <mail@tabakhase.com>
-Tadej Janež <tadej.j@nez.si>
-TAGOMORI Satoshi <tagomoris@gmail.com>
-tang0th <tang0th@gmx.com>
-Tangi Colin <tangicolin@gmail.com>
-Tatsuki Sugiura <sugi@nemui.org>
-Tatsushi Inagaki <e29253@jp.ibm.com>
-Taylan Isikdemir <taylani@google.com>
-Taylor Jones <monitorjbl@gmail.com>
-Ted M. Young <tedyoung@gmail.com>
-Tehmasp Chaudhri <tehmasp@gmail.com>
-Tejaswini Duggaraju <naduggar@microsoft.com>
-Tejesh Mehta <tejesh.mehta@gmail.com>
-terryding77 <550147740@qq.com>
-tgic <farmer1992@gmail.com>
-Thatcher Peskens <thatcher@docker.com>
-theadactyl <thea.lamkin@gmail.com>
-Thell 'Bo' Fowler <thell@tbfowler.name>
-Thermionix <bond711@gmail.com>
-Thijs Terlouw <thijsterlouw@gmail.com>
-Thomas Bikeev <thomas.bikeev@mac.com>
-Thomas Frössman <thomasf@jossystem.se>
-Thomas Gazagnaire <thomas@gazagnaire.org>
-Thomas Grainger <tagrain@gmail.com>
-Thomas Hansen <thomas.hansen@gmail.com>
-Thomas Leonard <thomas.leonard@docker.com>
-Thomas Léveil <thomasleveil@gmail.com>
-Thomas Orozco <thomas@orozco.fr>
-Thomas Riccardi <riccardi@systran.fr>
-Thomas Schroeter <thomas@cliqz.com>
-Thomas Sjögren <konstruktoid@users.noreply.github.com>
-Thomas Swift <tgs242@gmail.com>
-Thomas Tanaka <thomas.tanaka@oracle.com>
-Thomas Texier <sharkone@en-mousse.org>
-Ti Zhou <tizhou1986@gmail.com>
-Tianon Gravi <admwiggin@gmail.com>
-Tianyi Wang <capkurmagati@gmail.com>
-Tibor Vass <teabee89@gmail.com>
-Tiffany Jernigan <tiffany.f.j@gmail.com>
-Tiffany Low <tiffany@box.com>
-Till Wegmüller <toasterson@gmail.com>
-Tim <elatllat@gmail.com>
-Tim Bart <tim@fewagainstmany.com>
-Tim Bosse <taim@bosboot.org>
-Tim Dettrick <t.dettrick@uq.edu.au>
-Tim Düsterhus <tim@bastelstu.be>
-Tim Hockin <thockin@google.com>
-Tim Potter <tpot@hpe.com>
-Tim Ruffles <oi@truffles.me.uk>
-Tim Smith <timbot@google.com>
-Tim Terhorst <mynamewastaken+git@gmail.com>
-Tim Wang <timwangdev@gmail.com>
-Tim Waugh <twaugh@redhat.com>
-Tim Wraight <tim.wraight@tangentlabs.co.uk>
-Tim Zju <21651152@zju.edu.cn>
-timfeirg <kkcocogogo@gmail.com>
-Timothy Hobbs <timothyhobbs@seznam.cz>
-tjwebb123 <tjwebb123@users.noreply.github.com>
-tobe <tobegit3hub@gmail.com>
-Tobias Bieniek <Tobias.Bieniek@gmx.de>
-Tobias Bradtke <webwurst@gmail.com>
-Tobias Gesellchen <tobias@gesellix.de>
-Tobias Klauser <tklauser@distanz.ch>
-Tobias Munk <schmunk@usrbin.de>
-Tobias Schmidt <ts@soundcloud.com>
-Tobias Schwab <tobias.schwab@dynport.de>
-Todd Crane <todd@toddcrane.com>
-Todd Lunter <tlunter@gmail.com>
-Todd Whiteman <todd.whiteman@joyent.com>
-Toli Kuznets <toli@docker.com>
-Tom Barlow <tomwbarlow@gmail.com>
-Tom Booth <tombooth@gmail.com>
-Tom Denham <tom@tomdee.co.uk>
-Tom Fotherby <tom+github@peopleperhour.com>
-Tom Howe <tom.howe@enstratius.com>
-Tom Hulihan <hulihan.tom159@gmail.com>
-Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
-Tom Sweeney <tsweeney@redhat.com>
-Tom Wilkie <tom.wilkie@gmail.com>
-Tom X. Tobin <tomxtobin@tomxtobin.com>
-Tomas Tomecek <ttomecek@redhat.com>
-Tomasz Kopczynski <tomek@kopczynski.net.pl>
-Tomasz Lipinski <tlipinski@users.noreply.github.com>
-Tomasz Nurkiewicz <nurkiewicz@gmail.com>
-Tommaso Visconti <tommaso.visconti@gmail.com>
-Tomáš Hrčka <thrcka@redhat.com>
-Tonny Xu <tonny.xu@gmail.com>
-Tony Abboud <tdabboud@hotmail.com>
-Tony Daws <tony@daws.ca>
-Tony Miller <mcfiredrill@gmail.com>
-toogley <toogley@mailbox.org>
-Torstein Husebø <torstein@huseboe.net>
-Tõnis Tiigi <tonistiigi@gmail.com>
-Trace Andreason <tandreason@gmail.com>
-tracylihui <793912329@qq.com>
-Trapier Marshall <trapier.marshall@docker.com>
-Travis Cline <travis.cline@gmail.com>
-Travis Thieman <travis.thieman@gmail.com>
-Trent Ogren <tedwardo2@gmail.com>
-Trevor <trevinwoodstock@gmail.com>
-Trevor Pounds <trevor.pounds@gmail.com>
-Trevor Sullivan <pcgeek86@gmail.com>
-Trishna Guha <trishnaguha17@gmail.com>
-Tristan Carel <tristan@cogniteev.com>
-Troy Denton <trdenton@gmail.com>
-Tycho Andersen <tycho@docker.com>
-Tyler Brock <tyler.brock@gmail.com>
-Tyler Brown <tylers.pile@gmail.com>
-Tzu-Jung Lee <roylee17@gmail.com>
-uhayate <uhayate.gong@daocloud.io>
-Ulysse Carion <ulyssecarion@gmail.com>
-Umesh Yadav <umesh4257@gmail.com>
-Utz Bacher <utz.bacher@de.ibm.com>
-vagrant <vagrant@ubuntu-14.04-amd64-vbox>
-Vaidas Jablonskis <jablonskis@gmail.com>
-vanderliang <lansheng@meili-inc.com>
-Velko Ivanov <vivanov@deeperplane.com>
-Veres Lajos <vlajos@gmail.com>
-Victor Algaze <valgaze@gmail.com>
-Victor Coisne <victor.coisne@dotcloud.com>
-Victor Costan <costan@gmail.com>
-Victor I. Wood <viw@t2am.com>
-Victor Lyuboslavsky <victor@victoreda.com>
-Victor Marmol <vmarmol@google.com>
-Victor Palma <palma.victor@gmail.com>
-Victor Vieux <victor.vieux@docker.com>
-Victoria Bialas <victoria.bialas@docker.com>
-Vijaya Kumar K <vijayak@caviumnetworks.com>
-Vikram bir Singh <vsingh@mirantis.com>
-Viktor Stanchev <me@viktorstanchev.com>
-Viktor Vojnovski <viktor.vojnovski@amadeus.com>
-VinayRaghavanKS <raghavan.vinay@gmail.com>
-Vincent Batts <vbatts@redhat.com>
-Vincent Bernat <Vincent.Bernat@exoscale.ch>
-Vincent Boulineau <vincent.boulineau@datadoghq.com>
-Vincent Demeester <vincent.demeester@docker.com>
-Vincent Giersch <vincent.giersch@ovh.net>
-Vincent Mayers <vincent.mayers@inbloom.org>
-Vincent Woo <me@vincentwoo.com>
-Vinod Kulkarni <vinod.kulkarni@gmail.com>
-Vishal Doshi <vishal.doshi@gmail.com>
-Vishnu Kannan <vishnuk@google.com>
-Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
-Vitor Monteiro <vmrmonteiro@gmail.com>
-Vivek Agarwal <me@vivek.im>
-Vivek Dasgupta <vdasgupt@redhat.com>
-Vivek Goyal <vgoyal@redhat.com>
-Vladimir Bulyga <xx@ccxx.cc>
-Vladimir Kirillov <proger@wilab.org.ua>
-Vladimir Pouzanov <farcaller@google.com>
-Vladimir Rutsky <altsysrq@gmail.com>
-Vladimir Varankin <nek.narqo+git@gmail.com>
-VladimirAus <v_roudakov@yahoo.com>
-Vlastimil Zeman <vlastimil.zeman@diffblue.com>
-Vojtech Vitek (V-Teq) <vvitek@redhat.com>
-waitingkuo <waitingkuo0527@gmail.com>
-Walter Leibbrandt <github@wrl.co.za>
-Walter Stanish <walter@pratyeka.org>
-Wang Chao <chao.wang@ucloud.cn>
-Wang Guoliang <liangcszzu@163.com>
-Wang Jie <wangjie5@chinaskycloud.com>
-Wang Long <long.wanglong@huawei.com>
-Wang Ping <present.wp@icloud.com>
-Wang Xing <hzwangxing@corp.netease.com>
-Wang Yuexiao <wang.yuexiao@zte.com.cn>
-Wang Yumu <37442693@qq.com>
-wanghuaiqing <wanghuaiqing@loongson.cn>
-Ward Vandewege <ward@jhvc.com>
-WarheadsSE <max@warheads.net>
-Wassim Dhif <wassimdhif@gmail.com>
-Wayne Chang <wayne@neverfear.org>
-Wayne Song <wsong@docker.com>
-Weerasak Chongnguluam <singpor@gmail.com>
-Wei Fu <fuweid89@gmail.com>
-Wei Wu <wuwei4455@gmail.com>
-Wei-Ting Kuo <waitingkuo0527@gmail.com>
-weipeng <weipeng@tuscloud.io>
-weiyan <weiyan3@huawei.com>
-Weiyang Zhu <cnresonant@gmail.com>
-Wen Cheng Ma <wenchma@cn.ibm.com>
-Wendel Fleming <wfleming@usc.edu>
-Wenjun Tang <tangwj2@lenovo.com>
-Wenkai Yin <yinw@vmware.com>
-wenlxie <wenlxie@ebay.com>
-Wentao Zhang <zhangwentao234@huawei.com>
-Wenxuan Zhao <viz@linux.com>
-Wenyu You <21551128@zju.edu.cn>
-Wenzhi Liang <wenzhi.liang@gmail.com>
-Wes Morgan <cap10morgan@gmail.com>
-Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
-Wiktor Kwapisiewicz <wiktor@metacode.biz>
-Will Dietz <w@wdtz.org>
-Will Rouesnel <w.rouesnel@gmail.com>
-Will Weaver <monkey@buildingbananas.com>
-willhf <willhf@gmail.com>
-William Delanoue <william.delanoue@gmail.com>
-William Henry <whenry@redhat.com>
-William Hubbs <w.d.hubbs@gmail.com>
-William Martin <wmartin@pivotal.io>
-William Riancho <wr.wllm@gmail.com>
-William Thurston <thurstw@amazon.com>
-Wilson Júnior <wilsonpjunior@gmail.com>
-Wing-Kam Wong <wingkwong.code@gmail.com>
-WiseTrem <shepelyov.g@gmail.com>
-Wolfgang Powisch <powo@powo.priv.at>
-Wonjun Kim <wonjun.kim@navercorp.com>
-xamyzhao <x.amy.zhao@gmail.com>
-Xian Chaobo <xianchaobo@huawei.com>
-Xianglin Gao <xlgao@zju.edu.cn>
-Xianlu Bird <xianlubird@gmail.com>
-Xiao YongBiao <xyb4638@gmail.com>
-XiaoBing Jiang <s7v7nislands@gmail.com>
-Xiaodong Liu <liuxiaodong@loongson.cn>
-Xiaodong Zhang <a4012017@sina.com>
-Xiaoxi He <xxhe@alauda.io>
-Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
-Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
-xichengliudui <1693291525@qq.com>
-xiekeyang <xiekeyang@huawei.com>
-Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
-Xinbo Weng <xihuanbo_0521@zju.edu.cn>
-Xinfeng Liu <xinfeng.liu@gmail.com>
-Xinzi Zhou <imdreamrunner@gmail.com>
-Xiuming Chen <cc@cxm.cc>
-Xuecong Liao <satorulogic@gmail.com>
-xuzhaokui <cynicholas@gmail.com>
-Yadnyawalkya Tale <ytale@redhat.com>
-Yahya <ya7yaz@gmail.com>
-YAMADA Tsuyoshi <tyamada@minimum2scp.org>
-Yamasaki Masahide <masahide.y@gmail.com>
-Yan Feng <yanfeng2@huawei.com>
-Yang Bai <hamo.by@gmail.com>
-Yang Pengfei <yangpengfei4@huawei.com>
-yangchenliang <yangchenliang@huawei.com>
-Yanqiang Miao <miao.yanqiang@zte.com.cn>
-Yao Zaiyong <yaozaiyong@hotmail.com>
-Yash Murty <yashmurty@gmail.com>
-Yassine Tijani <yasstij11@gmail.com>
-Yasunori Mahata <nori@mahata.net>
-Yazhong Liu <yorkiefixer@gmail.com>
-Yestin Sun <sunyi0804@gmail.com>
-Yi EungJun <eungjun.yi@navercorp.com>
-Yibai Zhang <xm1994@gmail.com>
-Yihang Ho <hoyihang5@gmail.com>
-Ying Li <ying.li@docker.com>
-Yohei Ueda <yohei@jp.ibm.com>
-Yong Tang <yong.tang.github@outlook.com>
-Yongxin Li <yxli@alauda.io>
-Yongzhi Pan <panyongzhi@gmail.com>
-Yosef Fertel <yfertel@gmail.com>
-You-Sheng Yang (楊有勝) <vicamo@gmail.com>
-youcai <omegacoleman@gmail.com>
-Youcef YEKHLEF <yyekhlef@gmail.com>
-Yu Changchun <yuchangchun1@huawei.com>
-Yu Chengxia <yuchengxia@huawei.com>
-Yu Peng <yu.peng36@zte.com.cn>
-Yu-Ju Hong <yjhong@google.com>
-Yuan Sun <sunyuan3@huawei.com>
-Yuanhong Peng <pengyuanhong@huawei.com>
-Yue Zhang <zy675793960@yeah.net>
-Yuhao Fang <fangyuhao@gmail.com>
-Yuichiro Kaneko <spiketeika@gmail.com>
-Yunxiang Huang <hyxqshk@vip.qq.com>
-Yurii Rashkovskii <yrashk@gmail.com>
-Yusuf Tarık Günaydın <yusuf_tarik@hotmail.com>
-Yves Junqueira <yves.junqueira@gmail.com>
-Zac Dover <zdover@redhat.com>
-Zach Borboa <zachborboa@gmail.com>
-Zachary Jaffee <zjaffee@us.ibm.com>
-Zain Memon <zain@inzain.net>
-Zaiste! <oh@zaiste.net>
-Zane DeGraffenried <zane.deg@gmail.com>
-Zefan Li <lizefan@huawei.com>
-Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
-Zhang Kun <zkazure@gmail.com>
-Zhang Wei <zhangwei555@huawei.com>
-Zhang Wentao <zhangwentao234@huawei.com>
-ZhangHang <stevezhang2014@gmail.com>
-zhangxianwei <xianwei.zw@alibaba-inc.com>
-Zhenan Ye <21551168@zju.edu.cn>
-zhenghenghuo <zhenghenghuo@zju.edu.cn>
-Zhenhai Gao <gaozh1988@live.com>
-Zhenkun Bi <bi.zhenkun@zte.com.cn>
-zhipengzuo <zuozhipeng@baidu.com>
-Zhou Hao <zhouhao@cn.fujitsu.com>
-Zhoulin Xie <zhoulin.xie@daocloud.io>
-Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
-Zhu Kunjia <zhu.kunjia@zte.com.cn>
-Zhuoyun Wei <wzyboy@wzyboy.org>
-Ziheng Liu <lzhfromustc@gmail.com>
-Zilin Du <zilin.du@gmail.com>
-zimbatm <zimbatm@zimbatm.com>
-Ziming Dong <bnudzm@foxmail.com>
-ZJUshuaizhou <21551191@zju.edu.cn>
-zmarouf <zeid.marouf@gmail.com>
-Zoltan Tombol <zoltan.tombol@gmail.com>
-Zou Yu <zouyu7@huawei.com>
-zqh <zqhxuyuan@gmail.com>
-Zuhayr Elahi <zuhayr.elahi@docker.com>
-Zunayed Ali <zunayed@gmail.com>
-Álex González <agonzalezro@gmail.com>
-Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
-Átila Camurça Alves <camurca.home@gmail.com>
-尹吉峰 <jifeng.yin@gmail.com>
-屈骏 <qujun@tiduyun.com>
-徐俊杰 <paco.xu@daocloud.io>
-慕陶 <jihui.xjh@alibaba-inc.com>
-搏通 <yufeng.pyf@alibaba-inc.com>
-黄艳红00139573 <huang.yanhong@zte.com.cn>
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
deleted file mode 100644
index 6d8d58f..0000000
--- a/vendor/github.com/docker/docker/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2013-2018 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
deleted file mode 100644
index 58b19b6..0000000
--- a/vendor/github.com/docker/docker/NOTICE
+++ /dev/null
@@ -1,19 +0,0 @@
-Docker
-Copyright 2012-2017 Docker, Inc.
-
-This product includes software developed at Docker, Inc. (https://www.docker.com).
-
-This product contains software (https://github.com/creack/pty) developed
-by Keith Rarick, licensed under the MIT License.
-
-The following is courtesy of our legal counsel:
-
-
-Use and transfer of Docker may be subject to certain restrictions by the
-United States and other governments.
-It is your responsibility to ensure that your use and/or transfer does not
-violate applicable laws.
-
-For more information, please see https://www.bis.doc.gov
-
-See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md
deleted file mode 100644
index f136c34..0000000
--- a/vendor/github.com/docker/docker/api/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Working on the Engine API
-
-The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon.
-
-It consists of various components in this repository:
-
-- `api/swagger.yaml` A Swagger definition of the API.
-- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this.
-- `cli/` The command-line client.
-- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
-- `daemon/` The daemon, which serves the API.
-
-## Swagger definition
-
-The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
-
-1. Automatically generate documentation.
-2. Automatically generate the Go server and client. (A work-in-progress.)
-3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc.
-
-## Updating the API documentation
-
-The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
-
-The file is split into two main sections:
-
-- `definitions`, which defines re-usable objects used in requests and responses
-- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable)
-
-To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
-
-There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
-
-`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
-
-## Viewing the API documentation
-
-When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
-
-Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
-
-The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go
deleted file mode 100644
index 1565e2a..0000000
--- a/vendor/github.com/docker/docker/api/common.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package api // import "github.com/docker/docker/api"
-
-// Common constants for daemon and client.
-const (
- // DefaultVersion of Current REST API
- DefaultVersion = "1.41"
-
- // NoBaseImageSpecifier is the symbol used by the FROM
- // command to specify that no base image is to be used.
- NoBaseImageSpecifier = "scratch"
-)
diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go
deleted file mode 100644
index 504b0c9..0000000
--- a/vendor/github.com/docker/docker/api/common_unix.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build !windows
-
-package api // import "github.com/docker/docker/api"
-
-// MinVersion represents Minimum REST API version supported
-const MinVersion = "1.12"
diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go
deleted file mode 100644
index 590ba54..0000000
--- a/vendor/github.com/docker/docker/api/common_windows.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package api // import "github.com/docker/docker/api"
-
-// MinVersion represents Minimum REST API version supported
-// Technically the first daemon API version released on Windows is v1.25 in
-// engine version 1.13. However, some clients are explicitly using downlevel
-// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
-// Hence also allowing 1.24 on Windows.
-const MinVersion string = "1.24"
diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml
deleted file mode 100644
index f07a027..0000000
--- a/vendor/github.com/docker/docker/api/swagger-gen.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-
-layout:
- models:
- - name: definition
- source: asset:model
- target: "{{ joinFilePath .Target .ModelPackage }}"
- file_name: "{{ (snakize (pascalize .Name)) }}.go"
- operations:
- - name: handler
- source: asset:serverOperation
- target: "{{ joinFilePath .Target .APIPackage .Package }}"
- file_name: "{{ (snakize (pascalize .Name)) }}.go"
diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml
deleted file mode 100644
index bada4a8..0000000
--- a/vendor/github.com/docker/docker/api/swagger.yaml
+++ /dev/null
@@ -1,11425 +0,0 @@
-# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API.
-#
-# This is used for generating API documentation and the types used by the
-# client/server. See api/README.md for more information.
-#
-# Some style notes:
-# - This file is used by ReDoc, which allows GitHub Flavored Markdown in
-# descriptions.
-# - There is no maximum line length, for ease of editing and pretty diffs.
-# - operationIds are in the format "NounVerb", with a singular noun.
-
-swagger: "2.0"
-schemes:
- - "http"
- - "https"
-produces:
- - "application/json"
- - "text/plain"
-consumes:
- - "application/json"
- - "text/plain"
-basePath: "/v1.41"
-info:
- title: "Docker Engine API"
- version: "1.41"
- x-logo:
- url: "https://docs.docker.com/images/logo-docker-main.png"
- description: |
- The Engine API is an HTTP API served by Docker Engine. It is the API the
- Docker client uses to communicate with the Engine, so everything the Docker
- client can do can be done with the API.
-
- Most of the client's commands map directly to API endpoints (e.g. `docker ps`
- is `GET /containers/json`). The notable exception is running containers,
- which consists of several API calls.
-
- # Errors
-
- The API uses standard HTTP status codes to indicate the success or failure
- of the API call. The body of the response will be JSON in the following
- format:
-
- ```
- {
- "message": "page not found"
- }
- ```
-
- # Versioning
-
- The API is usually changed in each release, so API calls are versioned to
- ensure that clients don't break. To lock to a specific version of the API,
- you prefix the URL with its version, for example, call `/v1.30/info` to use
- the v1.30 version of the `/info` endpoint. If the API version specified in
- the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
- is returned.
-
- If you omit the version-prefix, the current version of the API (v1.41) is used.
- For example, calling `/info` is the same as calling `/v1.41/info`. Using the
- API without a version-prefix is deprecated and will be removed in a future release.
-
- Engine releases in the near future should support this version of the API,
- so your client will continue to work even if it is talking to a newer Engine.
-
- The API uses an open schema model, which means server may add extra properties
- to responses. Likewise, the server will ignore any extra query parameters and
- request body properties. When you write clients, you need to ignore additional
- properties in responses to ensure they do not break when talking to newer
- daemons.
-
-
- # Authentication
-
- Authentication for registries is handled client side. The client has to send
- authentication details to various endpoints that need to communicate with
- registries, such as `POST /images/(name)/push`. These are sent as
- `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5)
- (JSON) string with the following structure:
-
- ```
- {
- "username": "string",
- "password": "string",
- "email": "string",
- "serveraddress": "string"
- }
- ```
-
- The `serveraddress` is a domain/IP without a protocol. Throughout this
- structure, double quotes are required.
-
- If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth),
- you can just pass this instead of credentials:
-
- ```
- {
- "identitytoken": "9cbaf023786cd7..."
- }
- ```
-
-# The tags on paths define the menu sections in the ReDoc documentation, so
-# the usage of tags must make sense for that:
-# - They should be singular, not plural.
-# - There should not be too many tags, or the menu becomes unwieldy. For
-# example, it is preferable to add a path to the "System" tag instead of
-# creating a tag with a single path in it.
-# - The order of tags in this list defines the order in the menu.
-tags:
- # Primary objects
- - name: "Container"
- x-displayName: "Containers"
- description: |
- Create and manage containers.
- - name: "Image"
- x-displayName: "Images"
- - name: "Network"
- x-displayName: "Networks"
- description: |
- Networks are user-defined networks that containers can be attached to.
- See the [networking documentation](https://docs.docker.com/network/)
- for more information.
- - name: "Volume"
- x-displayName: "Volumes"
- description: |
- Create and manage persistent storage that can be attached to containers.
- - name: "Exec"
- x-displayName: "Exec"
- description: |
- Run new commands inside running containers. Refer to the
- [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/)
- for more information.
-
- To exec a command in a container, you first need to create an exec instance,
- then start it. These two API endpoints are wrapped up in a single command-line
- command, `docker exec`.
-
- # Swarm things
- - name: "Swarm"
- x-displayName: "Swarm"
- description: |
- Engines can be clustered together in a swarm. Refer to the
- [swarm mode documentation](https://docs.docker.com/engine/swarm/)
- for more information.
- - name: "Node"
- x-displayName: "Nodes"
- description: |
- Nodes are instances of the Engine participating in a swarm. Swarm mode
- must be enabled for these endpoints to work.
- - name: "Service"
- x-displayName: "Services"
- description: |
- Services are the definitions of tasks to run on a swarm. Swarm mode must
- be enabled for these endpoints to work.
- - name: "Task"
- x-displayName: "Tasks"
- description: |
- A task is a container running on a swarm. It is the atomic scheduling unit
- of swarm. Swarm mode must be enabled for these endpoints to work.
- - name: "Secret"
- x-displayName: "Secrets"
- description: |
- Secrets are sensitive data that can be used by services. Swarm mode must
- be enabled for these endpoints to work.
- - name: "Config"
- x-displayName: "Configs"
- description: |
- Configs are application configurations that can be used by services. Swarm
- mode must be enabled for these endpoints to work.
- # System things
- - name: "Plugin"
- x-displayName: "Plugins"
- - name: "System"
- x-displayName: "System"
-
-definitions:
- Port:
- type: "object"
- description: "An open port on a container"
- required: [PrivatePort, Type]
- properties:
- IP:
- type: "string"
- format: "ip-address"
- description: "Host IP address that the container's port is mapped to"
- PrivatePort:
- type: "integer"
- format: "uint16"
- x-nullable: false
- description: "Port on the container"
- PublicPort:
- type: "integer"
- format: "uint16"
- description: "Port exposed on the host"
- Type:
- type: "string"
- x-nullable: false
- enum: ["tcp", "udp", "sctp"]
- example:
- PrivatePort: 8080
- PublicPort: 80
- Type: "tcp"
-
- MountPoint:
- type: "object"
- description: "A mount point inside a container"
- properties:
- Type:
- type: "string"
- Name:
- type: "string"
- Source:
- type: "string"
- Destination:
- type: "string"
- Driver:
- type: "string"
- Mode:
- type: "string"
- RW:
- type: "boolean"
- Propagation:
- type: "string"
-
- DeviceMapping:
- type: "object"
- description: "A device mapping between the host and container"
- properties:
- PathOnHost:
- type: "string"
- PathInContainer:
- type: "string"
- CgroupPermissions:
- type: "string"
- example:
- PathOnHost: "/dev/deviceName"
- PathInContainer: "/dev/deviceName"
- CgroupPermissions: "mrw"
-
- DeviceRequest:
- type: "object"
- description: "A request for devices to be sent to device drivers"
- properties:
- Driver:
- type: "string"
- example: "nvidia"
- Count:
- type: "integer"
- example: -1
- DeviceIDs:
- type: "array"
- items:
- type: "string"
- example:
- - "0"
- - "1"
- - "GPU-fef8089b-4820-abfc-e83e-94318197576e"
- Capabilities:
- description: |
- A list of capabilities; an OR list of AND lists of capabilities.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- example:
- # gpu AND nvidia AND compute
- - ["gpu", "nvidia", "compute"]
- Options:
- description: |
- Driver-specific options, specified as a key/value pairs. These options
- are passed directly to the driver.
- type: "object"
- additionalProperties:
- type: "string"
-
- ThrottleDevice:
- type: "object"
- properties:
- Path:
- description: "Device path"
- type: "string"
- Rate:
- description: "Rate"
- type: "integer"
- format: "int64"
- minimum: 0
-
- Mount:
- type: "object"
- properties:
- Target:
- description: "Container path."
- type: "string"
- Source:
- description: "Mount source (e.g. a volume name, a host path)."
- type: "string"
- Type:
- description: |
- The mount type. Available types:
-
- - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container.
- - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
- - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
- - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
- type: "string"
- enum:
- - "bind"
- - "volume"
- - "tmpfs"
- - "npipe"
- ReadOnly:
- description: "Whether the mount should be read-only."
- type: "boolean"
- Consistency:
- description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`."
- type: "string"
- BindOptions:
- description: "Optional configuration for the `bind` type."
- type: "object"
- properties:
- Propagation:
- description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`."
- type: "string"
- enum:
- - "private"
- - "rprivate"
- - "shared"
- - "rshared"
- - "slave"
- - "rslave"
- NonRecursive:
- description: "Disable recursive bind mount."
- type: "boolean"
- default: false
- VolumeOptions:
- description: "Optional configuration for the `volume` type."
- type: "object"
- properties:
- NoCopy:
- description: "Populate volume with data from the target."
- type: "boolean"
- default: false
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- DriverConfig:
- description: "Map of driver specific options"
- type: "object"
- properties:
- Name:
- description: "Name of the driver to use to create the volume."
- type: "string"
- Options:
- description: "key/value map of driver specific options."
- type: "object"
- additionalProperties:
- type: "string"
- TmpfsOptions:
- description: "Optional configuration for the `tmpfs` type."
- type: "object"
- properties:
- SizeBytes:
- description: "The size for the tmpfs mount in bytes."
- type: "integer"
- format: "int64"
- Mode:
- description: "The permission mode for the tmpfs mount in an integer."
- type: "integer"
-
- RestartPolicy:
- description: |
- The behavior to apply when the container exits. The default is not to
- restart.
-
- An ever increasing delay (double the previous delay, starting at 100ms) is
- added before each restart to prevent flooding the server.
- type: "object"
- properties:
- Name:
- type: "string"
- description: |
- - Empty string means not to restart
- - `always` Always restart
- - `unless-stopped` Restart always except when the user has manually stopped the container
- - `on-failure` Restart only when the container exit code is non-zero
- enum:
- - ""
- - "always"
- - "unless-stopped"
- - "on-failure"
- MaximumRetryCount:
- type: "integer"
- description: |
- If `on-failure` is used, the number of times to retry before giving up.
-
- Resources:
- description: "A container's resources (cgroups config, ulimits, etc)"
- type: "object"
- properties:
- # Applicable to all platforms
- CpuShares:
- description: |
- An integer value representing this container's relative CPU weight
- versus other containers.
- type: "integer"
- Memory:
- description: "Memory limit in bytes."
- type: "integer"
- format: "int64"
- default: 0
- # Applicable to UNIX platforms
- CgroupParent:
- description: |
- Path to `cgroups` under which the container's `cgroup` is created. If
- the path is not absolute, the path is considered to be relative to the
- `cgroups` path of the init process. Cgroups are created if they do not
- already exist.
- type: "string"
- BlkioWeight:
- description: "Block IO weight (relative weight)."
- type: "integer"
- minimum: 0
- maximum: 1000
- BlkioWeightDevice:
- description: |
- Block IO weight (relative device weight) in the form:
-
- ```
- [{"Path": "device_path", "Weight": weight}]
- ```
- type: "array"
- items:
- type: "object"
- properties:
- Path:
- type: "string"
- Weight:
- type: "integer"
- minimum: 0
- BlkioDeviceReadBps:
- description: |
- Limit read rate (bytes per second) from a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- BlkioDeviceWriteBps:
- description: |
- Limit write rate (bytes per second) to a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- BlkioDeviceReadIOps:
- description: |
- Limit read rate (IO per second) from a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- BlkioDeviceWriteIOps:
- description: |
- Limit write rate (IO per second) to a device, in the form:
-
- ```
- [{"Path": "device_path", "Rate": rate}]
- ```
- type: "array"
- items:
- $ref: "#/definitions/ThrottleDevice"
- CpuPeriod:
- description: "The length of a CPU period in microseconds."
- type: "integer"
- format: "int64"
- CpuQuota:
- description: |
- Microseconds of CPU time that the container can get in a CPU period.
- type: "integer"
- format: "int64"
- CpuRealtimePeriod:
- description: |
- The length of a CPU real-time period in microseconds. Set to 0 to
- allocate no time allocated to real-time tasks.
- type: "integer"
- format: "int64"
- CpuRealtimeRuntime:
- description: |
- The length of a CPU real-time runtime in microseconds. Set to 0 to
- allocate no time allocated to real-time tasks.
- type: "integer"
- format: "int64"
- CpusetCpus:
- description: |
- CPUs in which to allow execution (e.g., `0-3`, `0,1`).
- type: "string"
- example: "0-3"
- CpusetMems:
- description: |
- Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only
- effective on NUMA systems.
- type: "string"
- Devices:
- description: "A list of devices to add to the container."
- type: "array"
- items:
- $ref: "#/definitions/DeviceMapping"
- DeviceCgroupRules:
- description: "a list of cgroup rules to apply to the container"
- type: "array"
- items:
- type: "string"
- example: "c 13:* rwm"
- DeviceRequests:
- description: |
- A list of requests for devices to be sent to device drivers.
- type: "array"
- items:
- $ref: "#/definitions/DeviceRequest"
- KernelMemory:
- description: |
- Kernel memory limit in bytes.
-
- <p><br /></p>
-
- > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated
- > `kmem.limit_in_bytes`.
- type: "integer"
- format: "int64"
- example: 209715200
- KernelMemoryTCP:
- description: "Hard limit for kernel TCP buffer memory (in bytes)."
- type: "integer"
- format: "int64"
- MemoryReservation:
- description: "Memory soft limit in bytes."
- type: "integer"
- format: "int64"
- MemorySwap:
- description: |
- Total memory limit (memory + swap). Set as `-1` to enable unlimited
- swap.
- type: "integer"
- format: "int64"
- MemorySwappiness:
- description: |
- Tune a container's memory swappiness behavior. Accepts an integer
- between 0 and 100.
- type: "integer"
- format: "int64"
- minimum: 0
- maximum: 100
- NanoCpus:
- description: "CPU quota in units of 10<sup>-9</sup> CPUs."
- type: "integer"
- format: "int64"
- OomKillDisable:
- description: "Disable OOM Killer for the container."
- type: "boolean"
- Init:
- description: |
- Run an init inside the container that forwards signals and reaps
- processes. This field is omitted if empty, and the default (as
- configured on the daemon) is used.
- type: "boolean"
- x-nullable: true
- PidsLimit:
- description: |
- Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null`
- to not change.
- type: "integer"
- format: "int64"
- x-nullable: true
- Ulimits:
- description: |
- A list of resource limits to set in the container. For example:
-
- ```
- {"Name": "nofile", "Soft": 1024, "Hard": 2048}
- ```
- type: "array"
- items:
- type: "object"
- properties:
- Name:
- description: "Name of ulimit"
- type: "string"
- Soft:
- description: "Soft limit"
- type: "integer"
- Hard:
- description: "Hard limit"
- type: "integer"
- # Applicable to Windows
- CpuCount:
- description: |
- The number of usable CPUs (Windows only).
-
- On Windows Server containers, the processor resource controls are
- mutually exclusive. The order of precedence is `CPUCount` first, then
- `CPUShares`, and `CPUPercent` last.
- type: "integer"
- format: "int64"
- CpuPercent:
- description: |
- The usable percentage of the available CPUs (Windows only).
-
- On Windows Server containers, the processor resource controls are
- mutually exclusive. The order of precedence is `CPUCount` first, then
- `CPUShares`, and `CPUPercent` last.
- type: "integer"
- format: "int64"
- IOMaximumIOps:
- description: "Maximum IOps for the container system drive (Windows only)"
- type: "integer"
- format: "int64"
- IOMaximumBandwidth:
- description: |
- Maximum IO in bytes per second for the container system drive
- (Windows only).
- type: "integer"
- format: "int64"
-
- Limit:
- description: |
- An object describing a limit on resources which can be requested by a task.
- type: "object"
- properties:
- NanoCPUs:
- type: "integer"
- format: "int64"
- example: 4000000000
- MemoryBytes:
- type: "integer"
- format: "int64"
- example: 8272408576
- Pids:
- description: |
- Limits the maximum number of PIDs in the container. Set `0` for unlimited.
- type: "integer"
- format: "int64"
- default: 0
- example: 100
-
- ResourceObject:
- description: |
- An object describing the resources which can be advertised by a node and
- requested by a task.
- type: "object"
- properties:
- NanoCPUs:
- type: "integer"
- format: "int64"
- example: 4000000000
- MemoryBytes:
- type: "integer"
- format: "int64"
- example: 8272408576
- GenericResources:
- $ref: "#/definitions/GenericResources"
-
- GenericResources:
- description: |
- User-defined resources can be either Integer resources (e.g, `SSD=3`) or
- String resources (e.g, `GPU=UUID1`).
- type: "array"
- items:
- type: "object"
- properties:
- NamedResourceSpec:
- type: "object"
- properties:
- Kind:
- type: "string"
- Value:
- type: "string"
- DiscreteResourceSpec:
- type: "object"
- properties:
- Kind:
- type: "string"
- Value:
- type: "integer"
- format: "int64"
- example:
- - DiscreteResourceSpec:
- Kind: "SSD"
- Value: 3
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID1"
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID2"
-
- HealthConfig:
- description: "A test to perform to check that the container is healthy."
- type: "object"
- properties:
- Test:
- description: |
- The test to perform. Possible values are:
-
- - `[]` inherit healthcheck from image or parent image
- - `["NONE"]` disable healthcheck
- - `["CMD", args...]` exec arguments directly
- - `["CMD-SHELL", command]` run command with system's default shell
- type: "array"
- items:
- type: "string"
- Interval:
- description: |
- The time to wait between checks in nanoseconds. It should be 0 or at
- least 1000000 (1 ms). 0 means inherit.
- type: "integer"
- Timeout:
- description: |
- The time to wait before considering the check to have hung. It should
- be 0 or at least 1000000 (1 ms). 0 means inherit.
- type: "integer"
- Retries:
- description: |
- The number of consecutive failures needed to consider a container as
- unhealthy. 0 means inherit.
- type: "integer"
- StartPeriod:
- description: |
- Start period for the container to initialize before starting
- health-retries countdown in nanoseconds. It should be 0 or at least
- 1000000 (1 ms). 0 means inherit.
- type: "integer"
-
- Health:
- description: |
- Health stores information about the container's healthcheck results.
- type: "object"
- properties:
- Status:
- description: |
- Status is one of `none`, `starting`, `healthy` or `unhealthy`
-
- - "none" Indicates there is no healthcheck
- - "starting" Starting indicates that the container is not yet ready
- - "healthy" Healthy indicates that the container is running correctly
- - "unhealthy" Unhealthy indicates that the container has a problem
- type: "string"
- enum:
- - "none"
- - "starting"
- - "healthy"
- - "unhealthy"
- example: "healthy"
- FailingStreak:
- description: "FailingStreak is the number of consecutive failures"
- type: "integer"
- example: 0
- Log:
- type: "array"
- description: |
- Log contains the last few results (oldest first)
- items:
- x-nullable: true
- $ref: "#/definitions/HealthcheckResult"
-
- HealthcheckResult:
- description: |
- HealthcheckResult stores information about a single run of a healthcheck probe
- type: "object"
- properties:
- Start:
- description: |
- Date and time at which this check started in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "date-time"
- example: "2020-01-04T10:44:24.496525531Z"
- End:
- description: |
- Date and time at which this check ended in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2020-01-04T10:45:21.364524523Z"
- ExitCode:
- description: |
- ExitCode meanings:
-
- - `0` healthy
- - `1` unhealthy
- - `2` reserved (considered unhealthy)
- - other values: error running probe
- type: "integer"
- example: 0
- Output:
- description: "Output from last check"
- type: "string"
-
- HostConfig:
- description: "Container configuration that depends on the host we are running on"
- allOf:
- - $ref: "#/definitions/Resources"
- - type: "object"
- properties:
- # Applicable to all platforms
- Binds:
- type: "array"
- description: |
- A list of volume bindings for this container. Each volume binding
- is a string in one of these forms:
-
- - `host-src:container-dest[:options]` to bind-mount a host path
- into the container. Both `host-src`, and `container-dest` must
- be an _absolute_ path.
- - `volume-name:container-dest[:options]` to bind-mount a volume
- managed by a volume driver into the container. `container-dest`
- must be an _absolute_ path.
-
- `options` is an optional, comma-delimited list of:
-
- - `nocopy` disables automatic copying of data from the container
- path to the volume. The `nocopy` flag only applies to named volumes.
- - `[ro|rw]` mounts a volume read-only or read-write, respectively.
- If omitted or set to `rw`, volumes are mounted read-write.
- - `[z|Z]` applies SELinux labels to allow or deny multiple containers
- to read and write to the same volume.
- - `z`: a _shared_ content label is applied to the content. This
- label indicates that multiple containers can share the volume
- content, for both reading and writing.
- - `Z`: a _private unshared_ label is applied to the content.
- This label indicates that only the current container can use
- a private volume. Labeling systems such as SELinux require
- proper labels to be placed on volume content that is mounted
- into a container. Without a label, the security system can
- prevent a container's processes from using the content. By
- default, the labels set by the host operating system are not
- modified.
- - `[[r]shared|[r]slave|[r]private]` specifies mount
- [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt).
- This only applies to bind-mounted volumes, not internal volumes
- or named volumes. Mount propagation requires the source mount
- point (the location where the source directory is mounted in the
- host operating system) to have the correct propagation properties.
- For shared volumes, the source mount point must be set to `shared`.
- For slave volumes, the mount must be set to either `shared` or
- `slave`.
- items:
- type: "string"
- ContainerIDFile:
- type: "string"
- description: "Path to a file where the container ID is written"
- LogConfig:
- type: "object"
- description: "The logging configuration for this container"
- properties:
- Type:
- type: "string"
- enum:
- - "json-file"
- - "syslog"
- - "journald"
- - "gelf"
- - "fluentd"
- - "awslogs"
- - "splunk"
- - "etwlogs"
- - "none"
- Config:
- type: "object"
- additionalProperties:
- type: "string"
- NetworkMode:
- type: "string"
- description: |
- Network mode to use for this container. Supported standard values
- are: `bridge`, `host`, `none`, and `container:<name|id>`. Any
- other value is taken as a custom network's name to which this
- container should connect to.
- PortBindings:
- $ref: "#/definitions/PortMap"
- RestartPolicy:
- $ref: "#/definitions/RestartPolicy"
- AutoRemove:
- type: "boolean"
- description: |
- Automatically remove the container when the container's process
- exits. This has no effect if `RestartPolicy` is set.
- VolumeDriver:
- type: "string"
- description: "Driver that this container uses to mount volumes."
- VolumesFrom:
- type: "array"
- description: |
- A list of volumes to inherit from another container, specified in
- the form `<container name>[:<ro|rw>]`.
- items:
- type: "string"
- Mounts:
- description: |
- Specification for mounts to be added to the container.
- type: "array"
- items:
- $ref: "#/definitions/Mount"
-
- # Applicable to UNIX platforms
- CapAdd:
- type: "array"
- description: |
- A list of kernel capabilities to add to the container. Conflicts
- with option 'Capabilities'.
- items:
- type: "string"
- CapDrop:
- type: "array"
- description: |
- A list of kernel capabilities to drop from the container. Conflicts
- with option 'Capabilities'.
- items:
- type: "string"
- CgroupnsMode:
- type: "string"
- enum:
- - "private"
- - "host"
- description: |
- cgroup namespace mode for the container. Possible values are:
-
- - `"private"`: the container runs in its own private cgroup namespace
- - `"host"`: use the host system's cgroup namespace
-
- If not specified, the daemon default is used, which can either be `"private"`
- or `"host"`, depending on daemon version, kernel support and configuration.
- Dns:
- type: "array"
- description: "A list of DNS servers for the container to use."
- items:
- type: "string"
- DnsOptions:
- type: "array"
- description: "A list of DNS options."
- items:
- type: "string"
- DnsSearch:
- type: "array"
- description: "A list of DNS search domains."
- items:
- type: "string"
- ExtraHosts:
- type: "array"
- description: |
- A list of hostnames/IP mappings to add to the container's `/etc/hosts`
- file. Specified in the form `["hostname:IP"]`.
- items:
- type: "string"
- GroupAdd:
- type: "array"
- description: |
- A list of additional groups that the container process will run as.
- items:
- type: "string"
- IpcMode:
- type: "string"
- description: |
- IPC sharing mode for the container. Possible values are:
-
- - `"none"`: own private IPC namespace, with /dev/shm not mounted
- - `"private"`: own private IPC namespace
- - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers
- - `"container:<name|id>"`: join another (shareable) container's IPC namespace
- - `"host"`: use the host system's IPC namespace
-
- If not specified, daemon default is used, which can either be `"private"`
- or `"shareable"`, depending on daemon version and configuration.
- Cgroup:
- type: "string"
- description: "Cgroup to use for the container."
- Links:
- type: "array"
- description: |
- A list of links for the container in the form `container_name:alias`.
- items:
- type: "string"
- OomScoreAdj:
- type: "integer"
- description: |
- An integer value containing the score given to the container in
- order to tune OOM killer preferences.
- example: 500
- PidMode:
- type: "string"
- description: |
- Set the PID (Process) Namespace mode for the container. It can be
- either:
-
- - `"container:<name|id>"`: joins another container's PID namespace
- - `"host"`: use the host's PID namespace inside the container
- Privileged:
- type: "boolean"
- description: "Gives the container full access to the host."
- PublishAllPorts:
- type: "boolean"
- description: |
- Allocates an ephemeral host port for all of a container's
- exposed ports.
-
- Ports are de-allocated when the container stops and allocated when
- the container starts. The allocated port might be changed when
- restarting the container.
-
- The port is selected from the ephemeral port range that depends on
- the kernel. For example, on Linux the range is defined by
- `/proc/sys/net/ipv4/ip_local_port_range`.
- ReadonlyRootfs:
- type: "boolean"
- description: "Mount the container's root filesystem as read only."
- SecurityOpt:
- type: "array"
- description: "A list of string values to customize labels for MLS
- systems, such as SELinux."
- items:
- type: "string"
- StorageOpt:
- type: "object"
- description: |
- Storage driver options for this container, in the form `{"size": "120G"}`.
- additionalProperties:
- type: "string"
- Tmpfs:
- type: "object"
- description: |
- A map of container directories which should be replaced by tmpfs
- mounts, and their corresponding mount options. For example:
-
- ```
- { "/run": "rw,noexec,nosuid,size=65536k" }
- ```
- additionalProperties:
- type: "string"
- UTSMode:
- type: "string"
- description: "UTS namespace to use for the container."
- UsernsMode:
- type: "string"
- description: |
- Sets the usernamespace mode for the container when usernamespace
- remapping option is enabled.
- ShmSize:
- type: "integer"
- description: |
- Size of `/dev/shm` in bytes. If omitted, the system uses 64MB.
- minimum: 0
- Sysctls:
- type: "object"
- description: |
- A list of kernel parameters (sysctls) to set in the container.
- For example:
-
- ```
- {"net.ipv4.ip_forward": "1"}
- ```
- additionalProperties:
- type: "string"
- Runtime:
- type: "string"
- description: "Runtime to use with this container."
- # Applicable to Windows
- ConsoleSize:
- type: "array"
- description: |
- Initial console size, as an `[height, width]` array. (Windows only)
- minItems: 2
- maxItems: 2
- items:
- type: "integer"
- minimum: 0
- Isolation:
- type: "string"
- description: |
- Isolation technology of the container. (Windows only)
- enum:
- - "default"
- - "process"
- - "hyperv"
- MaskedPaths:
- type: "array"
- description: |
- The list of paths to be masked inside the container (this overrides
- the default set of paths).
- items:
- type: "string"
- ReadonlyPaths:
- type: "array"
- description: |
- The list of paths to be set as read-only inside the container
- (this overrides the default set of paths).
- items:
- type: "string"
-
- ContainerConfig:
- description: "Configuration for a container that is portable between hosts"
- type: "object"
- properties:
- Hostname:
- description: "The hostname to use for the container, as a valid RFC 1123 hostname."
- type: "string"
- Domainname:
- description: "The domain name to use for the container."
- type: "string"
- User:
- description: "The user that commands are run as inside the container."
- type: "string"
- AttachStdin:
- description: "Whether to attach to `stdin`."
- type: "boolean"
- default: false
- AttachStdout:
- description: "Whether to attach to `stdout`."
- type: "boolean"
- default: true
- AttachStderr:
- description: "Whether to attach to `stderr`."
- type: "boolean"
- default: true
- ExposedPorts:
- description: |
- An object mapping ports to an empty object in the form:
-
- `{"<port>/<tcp|udp|sctp>": {}}`
- type: "object"
- additionalProperties:
- type: "object"
- enum:
- - {}
- default: {}
- Tty:
- description: |
- Attach standard streams to a TTY, including `stdin` if it is not closed.
- type: "boolean"
- default: false
- OpenStdin:
- description: "Open `stdin`"
- type: "boolean"
- default: false
- StdinOnce:
- description: "Close `stdin` after one attached client disconnects"
- type: "boolean"
- default: false
- Env:
- description: |
- A list of environment variables to set inside the container in the
- form `["VAR=value", ...]`. A variable without `=` is removed from the
- environment, rather than to have an empty value.
- type: "array"
- items:
- type: "string"
- Cmd:
- description: |
- Command to run specified as a string or an array of strings.
- type: "array"
- items:
- type: "string"
- Healthcheck:
- $ref: "#/definitions/HealthConfig"
- ArgsEscaped:
- description: "Command is already escaped (Windows only)"
- type: "boolean"
- Image:
- description: |
- The name of the image to use when creating the container/
- type: "string"
- Volumes:
- description: |
- An object mapping mount point paths inside the container to empty
- objects.
- type: "object"
- additionalProperties:
- type: "object"
- enum:
- - {}
- default: {}
- WorkingDir:
- description: "The working directory for commands to run in."
- type: "string"
- Entrypoint:
- description: |
- The entry point for the container as a string or an array of strings.
-
- If the array consists of exactly one empty string (`[""]`) then the
- entry point is reset to system default (i.e., the entry point used by
- docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`).
- type: "array"
- items:
- type: "string"
- NetworkDisabled:
- description: "Disable networking for the container."
- type: "boolean"
- MacAddress:
- description: "MAC address of the container."
- type: "string"
- OnBuild:
- description: |
- `ONBUILD` metadata that were defined in the image's `Dockerfile`.
- type: "array"
- items:
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- StopSignal:
- description: |
- Signal to stop a container as a string or unsigned integer.
- type: "string"
- default: "SIGTERM"
- StopTimeout:
- description: "Timeout to stop a container in seconds."
- type: "integer"
- default: 10
- Shell:
- description: |
- Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell.
- type: "array"
- items:
- type: "string"
-
- NetworkingConfig:
- description: |
- NetworkingConfig represents the container's networking configuration for
- each of its interfaces.
- It is used for the networking configs specified in the `docker create`
- and `docker network connect` commands.
- type: "object"
- properties:
- EndpointsConfig:
- description: |
- A mapping of network name to endpoint configuration for that network.
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
- example:
- # putting an example here, instead of using the example values from
- # /definitions/EndpointSettings, because containers/create currently
- # does not support attaching to multiple networks, so the example request
- # would be confusing if it showed that multiple networks can be contained
- # in the EndpointsConfig.
- # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323)
- EndpointsConfig:
- isolated_nw:
- IPAMConfig:
- IPv4Address: "172.20.30.33"
- IPv6Address: "2001:db8:abcd::3033"
- LinkLocalIPs:
- - "169.254.34.68"
- - "fe80::3468"
- Links:
- - "container_1"
- - "container_2"
- Aliases:
- - "server_x"
- - "server_y"
-
- NetworkSettings:
- description: "NetworkSettings exposes the network settings in the API"
- type: "object"
- properties:
- Bridge:
- description: Name of the network'a bridge (for example, `docker0`).
- type: "string"
- example: "docker0"
- SandboxID:
- description: SandboxID uniquely represents a container's network stack.
- type: "string"
- example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3"
- HairpinMode:
- description: |
- Indicates if hairpin NAT should be enabled on the virtual interface.
- type: "boolean"
- example: false
- LinkLocalIPv6Address:
- description: IPv6 unicast address using the link-local prefix.
- type: "string"
- example: "fe80::42:acff:fe11:1"
- LinkLocalIPv6PrefixLen:
- description: Prefix length of the IPv6 unicast address.
- type: "integer"
- example: "64"
- Ports:
- $ref: "#/definitions/PortMap"
- SandboxKey:
- description: SandboxKey identifies the sandbox
- type: "string"
- example: "/var/run/docker/netns/8ab54b426c38"
-
- # TODO is SecondaryIPAddresses actually used?
- SecondaryIPAddresses:
- description: ""
- type: "array"
- items:
- $ref: "#/definitions/Address"
- x-nullable: true
-
- # TODO is SecondaryIPv6Addresses actually used?
- SecondaryIPv6Addresses:
- description: ""
- type: "array"
- items:
- $ref: "#/definitions/Address"
- x-nullable: true
-
- # TODO properties below are part of DefaultNetworkSettings, which is
- # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12
- EndpointID:
- description: |
- EndpointID uniquely represents a service endpoint in a Sandbox.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
- Gateway:
- description: |
- Gateway address for the default "bridge" network.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "172.17.0.1"
- GlobalIPv6Address:
- description: |
- Global IPv6 address for the default "bridge" network.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "2001:db8::5689"
- GlobalIPv6PrefixLen:
- description: |
- Mask length of the global IPv6 address.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "integer"
- example: 64
- IPAddress:
- description: |
- IPv4 address for the default "bridge" network.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "172.17.0.4"
- IPPrefixLen:
- description: |
- Mask length of the IPv4 address.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "integer"
- example: 16
- IPv6Gateway:
- description: |
- IPv6 gateway address for this network.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "2001:db8:2::100"
- MacAddress:
- description: |
- MAC address for the container on the default "bridge" network.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when attached to the
- > default "bridge" network. Use the information from the "bridge"
- > network inside the `Networks` map instead, which contains the same
- > information. This field was deprecated in Docker 1.9 and is scheduled
- > to be removed in Docker 17.12.0
- type: "string"
- example: "02:42:ac:11:00:04"
- Networks:
- description: |
- Information about all networks that the container is connected to.
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
-
- Address:
- description: Address represents an IPv4 or IPv6 IP address.
- type: "object"
- properties:
- Addr:
- description: IP address.
- type: "string"
- PrefixLen:
- description: Mask length of the IP address.
- type: "integer"
-
- PortMap:
- description: |
- PortMap describes the mapping of container ports to host ports, using the
- container's port-number and protocol as key in the format `<port>/<protocol>`,
- for example, `80/udp`.
-
- If a container's port is mapped for multiple protocols, separate entries
- are added to the mapping table.
- type: "object"
- additionalProperties:
- type: "array"
- x-nullable: true
- items:
- $ref: "#/definitions/PortBinding"
- example:
- "443/tcp":
- - HostIp: "127.0.0.1"
- HostPort: "4443"
- "80/tcp":
- - HostIp: "0.0.0.0"
- HostPort: "80"
- - HostIp: "0.0.0.0"
- HostPort: "8080"
- "80/udp":
- - HostIp: "0.0.0.0"
- HostPort: "80"
- "53/udp":
- - HostIp: "0.0.0.0"
- HostPort: "53"
- "2377/tcp": null
-
- PortBinding:
- description: |
- PortBinding represents a binding between a host IP address and a host
- port.
- type: "object"
- properties:
- HostIp:
- description: "Host IP address that the container's port is mapped to."
- type: "string"
- example: "127.0.0.1"
- HostPort:
- description: "Host port number that the container's port is mapped to."
- type: "string"
- example: "4443"
-
- GraphDriverData:
- description: "Information about a container's graph driver."
- type: "object"
- required: [Name, Data]
- properties:
- Name:
- type: "string"
- x-nullable: false
- Data:
- type: "object"
- x-nullable: false
- additionalProperties:
- type: "string"
-
- Image:
- type: "object"
- required:
- - Id
- - Parent
- - Comment
- - Created
- - Container
- - DockerVersion
- - Author
- - Architecture
- - Os
- - Size
- - VirtualSize
- - GraphDriver
- - RootFS
- properties:
- Id:
- type: "string"
- x-nullable: false
- RepoTags:
- type: "array"
- items:
- type: "string"
- RepoDigests:
- type: "array"
- items:
- type: "string"
- Parent:
- type: "string"
- x-nullable: false
- Comment:
- type: "string"
- x-nullable: false
- Created:
- type: "string"
- x-nullable: false
- Container:
- type: "string"
- x-nullable: false
- ContainerConfig:
- $ref: "#/definitions/ContainerConfig"
- DockerVersion:
- type: "string"
- x-nullable: false
- Author:
- type: "string"
- x-nullable: false
- Config:
- $ref: "#/definitions/ContainerConfig"
- Architecture:
- type: "string"
- x-nullable: false
- Os:
- type: "string"
- x-nullable: false
- OsVersion:
- type: "string"
- Size:
- type: "integer"
- format: "int64"
- x-nullable: false
- VirtualSize:
- type: "integer"
- format: "int64"
- x-nullable: false
- GraphDriver:
- $ref: "#/definitions/GraphDriverData"
- RootFS:
- type: "object"
- required: [Type]
- properties:
- Type:
- type: "string"
- x-nullable: false
- Layers:
- type: "array"
- items:
- type: "string"
- BaseLayer:
- type: "string"
- Metadata:
- type: "object"
- properties:
- LastTagTime:
- type: "string"
- format: "dateTime"
-
- ImageSummary:
- type: "object"
- required:
- - Id
- - ParentId
- - RepoTags
- - RepoDigests
- - Created
- - Size
- - SharedSize
- - VirtualSize
- - Labels
- - Containers
- properties:
- Id:
- type: "string"
- x-nullable: false
- ParentId:
- type: "string"
- x-nullable: false
- RepoTags:
- type: "array"
- x-nullable: false
- items:
- type: "string"
- RepoDigests:
- type: "array"
- x-nullable: false
- items:
- type: "string"
- Created:
- type: "integer"
- x-nullable: false
- Size:
- type: "integer"
- x-nullable: false
- SharedSize:
- type: "integer"
- x-nullable: false
- VirtualSize:
- type: "integer"
- x-nullable: false
- Labels:
- type: "object"
- x-nullable: false
- additionalProperties:
- type: "string"
- Containers:
- x-nullable: false
- type: "integer"
-
- AuthConfig:
- type: "object"
- properties:
- username:
- type: "string"
- password:
- type: "string"
- email:
- type: "string"
- serveraddress:
- type: "string"
- example:
- username: "hannibal"
- password: "xxxx"
- serveraddress: "https://index.docker.io/v1/"
-
- ProcessConfig:
- type: "object"
- properties:
- privileged:
- type: "boolean"
- user:
- type: "string"
- tty:
- type: "boolean"
- entrypoint:
- type: "string"
- arguments:
- type: "array"
- items:
- type: "string"
-
- Volume:
- type: "object"
- required: [Name, Driver, Mountpoint, Labels, Scope, Options]
- properties:
- Name:
- type: "string"
- description: "Name of the volume."
- x-nullable: false
- Driver:
- type: "string"
- description: "Name of the volume driver used by the volume."
- x-nullable: false
- Mountpoint:
- type: "string"
- description: "Mount path of the volume on the host."
- x-nullable: false
- CreatedAt:
- type: "string"
- format: "dateTime"
- description: "Date/Time the volume was created."
- Status:
- type: "object"
- description: |
- Low-level details about the volume, provided by the volume driver.
- Details are returned as a map with key/value pairs:
- `{"key":"value","key2":"value2"}`.
-
- The `Status` field is optional, and is omitted if the volume driver
- does not support this feature.
- additionalProperties:
- type: "object"
- Labels:
- type: "object"
- description: "User-defined key/value metadata."
- x-nullable: false
- additionalProperties:
- type: "string"
- Scope:
- type: "string"
- description: |
- The level at which the volume exists. Either `global` for cluster-wide,
- or `local` for machine level.
- default: "local"
- x-nullable: false
- enum: ["local", "global"]
- Options:
- type: "object"
- description: |
- The driver specific options used when creating the volume.
- additionalProperties:
- type: "string"
- UsageData:
- type: "object"
- x-nullable: true
- required: [Size, RefCount]
- description: |
- Usage details about the volume. This information is used by the
- `GET /system/df` endpoint, and omitted in other endpoints.
- properties:
- Size:
- type: "integer"
- default: -1
- description: |
- Amount of disk space used by the volume (in bytes). This information
- is only available for volumes created with the `"local"` volume
- driver. For volumes created with other volume drivers, this field
- is set to `-1` ("not available")
- x-nullable: false
- RefCount:
- type: "integer"
- default: -1
- description: |
- The number of containers referencing this volume. This field
- is set to `-1` if the reference-count is not available.
- x-nullable: false
-
- example:
- Name: "tardis"
- Driver: "custom"
- Mountpoint: "/var/lib/docker/volumes/tardis"
- Status:
- hello: "world"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Scope: "local"
- CreatedAt: "2016-06-07T20:31:11.853781916Z"
-
- Network:
- type: "object"
- properties:
- Name:
- type: "string"
- Id:
- type: "string"
- Created:
- type: "string"
- format: "dateTime"
- Scope:
- type: "string"
- Driver:
- type: "string"
- EnableIPv6:
- type: "boolean"
- IPAM:
- $ref: "#/definitions/IPAM"
- Internal:
- type: "boolean"
- Attachable:
- type: "boolean"
- Ingress:
- type: "boolean"
- Containers:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/NetworkContainer"
- Options:
- type: "object"
- additionalProperties:
- type: "string"
- Labels:
- type: "object"
- additionalProperties:
- type: "string"
- example:
- Name: "net01"
- Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
- Created: "2016-10-19T04:33:30.360899459Z"
- Scope: "local"
- Driver: "bridge"
- EnableIPv6: false
- IPAM:
- Driver: "default"
- Config:
- - Subnet: "172.19.0.0/16"
- Gateway: "172.19.0.1"
- Options:
- foo: "bar"
- Internal: false
- Attachable: false
- Ingress: false
- Containers:
- 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
- Name: "test"
- EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
- MacAddress: "02:42:ac:13:00:02"
- IPv4Address: "172.19.0.2/16"
- IPv6Address: ""
- Options:
- com.docker.network.bridge.default_bridge: "true"
- com.docker.network.bridge.enable_icc: "true"
- com.docker.network.bridge.enable_ip_masquerade: "true"
- com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
- com.docker.network.bridge.name: "docker0"
- com.docker.network.driver.mtu: "1500"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- IPAM:
- type: "object"
- properties:
- Driver:
- description: "Name of the IPAM driver to use."
- type: "string"
- default: "default"
- Config:
- description: |
- List of IPAM configuration options, specified as a map:
-
- ```
- {"Subnet": <CIDR>, "IPRange": <CIDR>, "Gateway": <IP address>, "AuxAddress": <device_name:IP address>}
- ```
- type: "array"
- items:
- type: "object"
- additionalProperties:
- type: "string"
- Options:
- description: "Driver-specific options, specified as a map."
- type: "object"
- additionalProperties:
- type: "string"
-
- NetworkContainer:
- type: "object"
- properties:
- Name:
- type: "string"
- EndpointID:
- type: "string"
- MacAddress:
- type: "string"
- IPv4Address:
- type: "string"
- IPv6Address:
- type: "string"
-
- BuildInfo:
- type: "object"
- properties:
- id:
- type: "string"
- stream:
- type: "string"
- error:
- type: "string"
- errorDetail:
- $ref: "#/definitions/ErrorDetail"
- status:
- type: "string"
- progress:
- type: "string"
- progressDetail:
- $ref: "#/definitions/ProgressDetail"
- aux:
- $ref: "#/definitions/ImageID"
-
- BuildCache:
- type: "object"
- properties:
- ID:
- type: "string"
- Parent:
- type: "string"
- Type:
- type: "string"
- Description:
- type: "string"
- InUse:
- type: "boolean"
- Shared:
- type: "boolean"
- Size:
- description: |
- Amount of disk space used by the build cache (in bytes).
- type: "integer"
- CreatedAt:
- description: |
- Date and time at which the build cache was created in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2016-08-18T10:44:24.496525531Z"
- LastUsedAt:
- description: |
- Date and time at which the build cache was last used in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- x-nullable: true
- example: "2017-08-09T07:09:37.632105588Z"
- UsageCount:
- type: "integer"
-
- ImageID:
- type: "object"
- description: "Image ID or Digest"
- properties:
- ID:
- type: "string"
- example:
- ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
-
- CreateImageInfo:
- type: "object"
- properties:
- id:
- type: "string"
- error:
- type: "string"
- status:
- type: "string"
- progress:
- type: "string"
- progressDetail:
- $ref: "#/definitions/ProgressDetail"
-
- PushImageInfo:
- type: "object"
- properties:
- error:
- type: "string"
- status:
- type: "string"
- progress:
- type: "string"
- progressDetail:
- $ref: "#/definitions/ProgressDetail"
-
- ErrorDetail:
- type: "object"
- properties:
- code:
- type: "integer"
- message:
- type: "string"
-
- ProgressDetail:
- type: "object"
- properties:
- current:
- type: "integer"
- total:
- type: "integer"
-
- ErrorResponse:
- description: "Represents an error."
- type: "object"
- required: ["message"]
- properties:
- message:
- description: "The error message."
- type: "string"
- x-nullable: false
- example:
- message: "Something went wrong."
-
- IdResponse:
- description: "Response to an API call that returns just an Id"
- type: "object"
- required: ["Id"]
- properties:
- Id:
- description: "The id of the newly created object."
- type: "string"
- x-nullable: false
-
- EndpointSettings:
- description: "Configuration for a network endpoint."
- type: "object"
- properties:
- # Configurations
- IPAMConfig:
- $ref: "#/definitions/EndpointIPAMConfig"
- Links:
- type: "array"
- items:
- type: "string"
- example:
- - "container_1"
- - "container_2"
- Aliases:
- type: "array"
- items:
- type: "string"
- example:
- - "server_x"
- - "server_y"
-
- # Operational data
- NetworkID:
- description: |
- Unique ID of the network.
- type: "string"
- example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a"
- EndpointID:
- description: |
- Unique ID for the service endpoint in a Sandbox.
- type: "string"
- example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b"
- Gateway:
- description: |
- Gateway address for this network.
- type: "string"
- example: "172.17.0.1"
- IPAddress:
- description: |
- IPv4 address.
- type: "string"
- example: "172.17.0.4"
- IPPrefixLen:
- description: |
- Mask length of the IPv4 address.
- type: "integer"
- example: 16
- IPv6Gateway:
- description: |
- IPv6 gateway address.
- type: "string"
- example: "2001:db8:2::100"
- GlobalIPv6Address:
- description: |
- Global IPv6 address.
- type: "string"
- example: "2001:db8::5689"
- GlobalIPv6PrefixLen:
- description: |
- Mask length of the global IPv6 address.
- type: "integer"
- format: "int64"
- example: 64
- MacAddress:
- description: |
- MAC address for the endpoint on this network.
- type: "string"
- example: "02:42:ac:11:00:04"
- DriverOpts:
- description: |
- DriverOpts is a mapping of driver options and values. These options
- are passed directly to the driver and are driver specific.
- type: "object"
- x-nullable: true
- additionalProperties:
- type: "string"
- example:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
-
- EndpointIPAMConfig:
- description: |
- EndpointIPAMConfig represents an endpoint's IPAM configuration.
- type: "object"
- x-nullable: true
- properties:
- IPv4Address:
- type: "string"
- example: "172.20.30.33"
- IPv6Address:
- type: "string"
- example: "2001:db8:abcd::3033"
- LinkLocalIPs:
- type: "array"
- items:
- type: "string"
- example:
- - "169.254.34.68"
- - "fe80::3468"
-
- PluginMount:
- type: "object"
- x-nullable: false
- required: [Name, Description, Settable, Source, Destination, Type, Options]
- properties:
- Name:
- type: "string"
- x-nullable: false
- example: "some-mount"
- Description:
- type: "string"
- x-nullable: false
- example: "This is a mount that's used by the plugin."
- Settable:
- type: "array"
- items:
- type: "string"
- Source:
- type: "string"
- example: "/var/lib/docker/plugins/"
- Destination:
- type: "string"
- x-nullable: false
- example: "/mnt/state"
- Type:
- type: "string"
- x-nullable: false
- example: "bind"
- Options:
- type: "array"
- items:
- type: "string"
- example:
- - "rbind"
- - "rw"
-
- PluginDevice:
- type: "object"
- required: [Name, Description, Settable, Path]
- x-nullable: false
- properties:
- Name:
- type: "string"
- x-nullable: false
- Description:
- type: "string"
- x-nullable: false
- Settable:
- type: "array"
- items:
- type: "string"
- Path:
- type: "string"
- example: "/dev/fuse"
-
- PluginEnv:
- type: "object"
- x-nullable: false
- required: [Name, Description, Settable, Value]
- properties:
- Name:
- x-nullable: false
- type: "string"
- Description:
- x-nullable: false
- type: "string"
- Settable:
- type: "array"
- items:
- type: "string"
- Value:
- type: "string"
-
- PluginInterfaceType:
- type: "object"
- x-nullable: false
- required: [Prefix, Capability, Version]
- properties:
- Prefix:
- type: "string"
- x-nullable: false
- Capability:
- type: "string"
- x-nullable: false
- Version:
- type: "string"
- x-nullable: false
-
- Plugin:
- description: "A plugin for the Engine API"
- type: "object"
- required: [Settings, Enabled, Config, Name]
- properties:
- Id:
- type: "string"
- example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078"
- Name:
- type: "string"
- x-nullable: false
- example: "tiborvass/sample-volume-plugin"
- Enabled:
- description:
- True if the plugin is running. False if the plugin is not running,
- only installed.
- type: "boolean"
- x-nullable: false
- example: true
- Settings:
- description: "Settings that can be modified by users."
- type: "object"
- x-nullable: false
- required: [Args, Devices, Env, Mounts]
- properties:
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/PluginMount"
- Env:
- type: "array"
- items:
- type: "string"
- example:
- - "DEBUG=0"
- Args:
- type: "array"
- items:
- type: "string"
- Devices:
- type: "array"
- items:
- $ref: "#/definitions/PluginDevice"
- PluginReference:
- description: "plugin remote reference used to push/pull the plugin"
- type: "string"
- x-nullable: false
- example: "localhost:5000/tiborvass/sample-volume-plugin:latest"
- Config:
- description: "The config of a plugin."
- type: "object"
- x-nullable: false
- required:
- - Description
- - Documentation
- - Interface
- - Entrypoint
- - WorkDir
- - Network
- - Linux
- - PidHost
- - PropagatedMount
- - IpcHost
- - Mounts
- - Env
- - Args
- properties:
- DockerVersion:
- description: "Docker Version used to create the plugin"
- type: "string"
- x-nullable: false
- example: "17.06.0-ce"
- Description:
- type: "string"
- x-nullable: false
- example: "A sample volume plugin for Docker"
- Documentation:
- type: "string"
- x-nullable: false
- example: "https://docs.docker.com/engine/extend/plugins/"
- Interface:
- description: "The interface between Docker and the plugin"
- x-nullable: false
- type: "object"
- required: [Types, Socket]
- properties:
- Types:
- type: "array"
- items:
- $ref: "#/definitions/PluginInterfaceType"
- example:
- - "docker.volumedriver/1.0"
- Socket:
- type: "string"
- x-nullable: false
- example: "plugins.sock"
- ProtocolScheme:
- type: "string"
- example: "some.protocol/v1.0"
- description: "Protocol to use for clients connecting to the plugin."
- enum:
- - ""
- - "moby.plugins.http/v1"
- Entrypoint:
- type: "array"
- items:
- type: "string"
- example:
- - "/usr/bin/sample-volume-plugin"
- - "/data"
- WorkDir:
- type: "string"
- x-nullable: false
- example: "/bin/"
- User:
- type: "object"
- x-nullable: false
- properties:
- UID:
- type: "integer"
- format: "uint32"
- example: 1000
- GID:
- type: "integer"
- format: "uint32"
- example: 1000
- Network:
- type: "object"
- x-nullable: false
- required: [Type]
- properties:
- Type:
- x-nullable: false
- type: "string"
- example: "host"
- Linux:
- type: "object"
- x-nullable: false
- required: [Capabilities, AllowAllDevices, Devices]
- properties:
- Capabilities:
- type: "array"
- items:
- type: "string"
- example:
- - "CAP_SYS_ADMIN"
- - "CAP_SYSLOG"
- AllowAllDevices:
- type: "boolean"
- x-nullable: false
- example: false
- Devices:
- type: "array"
- items:
- $ref: "#/definitions/PluginDevice"
- PropagatedMount:
- type: "string"
- x-nullable: false
- example: "/mnt/volumes"
- IpcHost:
- type: "boolean"
- x-nullable: false
- example: false
- PidHost:
- type: "boolean"
- x-nullable: false
- example: false
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/PluginMount"
- Env:
- type: "array"
- items:
- $ref: "#/definitions/PluginEnv"
- example:
- - Name: "DEBUG"
- Description: "If set, prints debug messages"
- Settable: null
- Value: "0"
- Args:
- type: "object"
- x-nullable: false
- required: [Name, Description, Settable, Value]
- properties:
- Name:
- x-nullable: false
- type: "string"
- example: "args"
- Description:
- x-nullable: false
- type: "string"
- example: "command line arguments"
- Settable:
- type: "array"
- items:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- rootfs:
- type: "object"
- properties:
- type:
- type: "string"
- example: "layers"
- diff_ids:
- type: "array"
- items:
- type: "string"
- example:
- - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887"
- - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
-
- ObjectVersion:
- description: |
- The version number of the object such as node, service, etc. This is needed
- to avoid conflicting writes. The client must send the version number along
- with the modified specification when updating these objects.
-
- This approach ensures safe concurrency and determinism in that the change
- on the object may not be applied if the version number has changed from the
- last read. In other words, if two update requests specify the same base
- version, only one of the requests can succeed. As a result, two separate
- update requests that happen at the same time will not unintentionally
- overwrite each other.
- type: "object"
- properties:
- Index:
- type: "integer"
- format: "uint64"
- example: 373531
-
- NodeSpec:
- type: "object"
- properties:
- Name:
- description: "Name for the node."
- type: "string"
- example: "my-node"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- Role:
- description: "Role of the node."
- type: "string"
- enum:
- - "worker"
- - "manager"
- example: "manager"
- Availability:
- description: "Availability of the node."
- type: "string"
- enum:
- - "active"
- - "pause"
- - "drain"
- example: "active"
- example:
- Availability: "active"
- Name: "node-name"
- Role: "manager"
- Labels:
- foo: "bar"
-
- Node:
- type: "object"
- properties:
- ID:
- type: "string"
- example: "24ifsmvkjbyhk"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- description: |
- Date and time at which the node was added to the swarm in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2016-08-18T10:44:24.496525531Z"
- UpdatedAt:
- description: |
- Date and time at which the node was last updated in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2017-08-09T07:09:37.632105588Z"
- Spec:
- $ref: "#/definitions/NodeSpec"
- Description:
- $ref: "#/definitions/NodeDescription"
- Status:
- $ref: "#/definitions/NodeStatus"
- ManagerStatus:
- $ref: "#/definitions/ManagerStatus"
-
- NodeDescription:
- description: |
- NodeDescription encapsulates the properties of the Node as reported by the
- agent.
- type: "object"
- properties:
- Hostname:
- type: "string"
- example: "bf3067039e47"
- Platform:
- $ref: "#/definitions/Platform"
- Resources:
- $ref: "#/definitions/ResourceObject"
- Engine:
- $ref: "#/definitions/EngineDescription"
- TLSInfo:
- $ref: "#/definitions/TLSInfo"
-
- Platform:
- description: |
- Platform represents the platform (Arch/OS).
- type: "object"
- properties:
- Architecture:
- description: |
- Architecture represents the hardware architecture (for example,
- `x86_64`).
- type: "string"
- example: "x86_64"
- OS:
- description: |
- OS represents the Operating System (for example, `linux` or `windows`).
- type: "string"
- example: "linux"
-
- EngineDescription:
- description: "EngineDescription provides information about an engine."
- type: "object"
- properties:
- EngineVersion:
- type: "string"
- example: "17.06.0"
- Labels:
- type: "object"
- additionalProperties:
- type: "string"
- example:
- foo: "bar"
- Plugins:
- type: "array"
- items:
- type: "object"
- properties:
- Type:
- type: "string"
- Name:
- type: "string"
- example:
- - Type: "Log"
- Name: "awslogs"
- - Type: "Log"
- Name: "fluentd"
- - Type: "Log"
- Name: "gcplogs"
- - Type: "Log"
- Name: "gelf"
- - Type: "Log"
- Name: "journald"
- - Type: "Log"
- Name: "json-file"
- - Type: "Log"
- Name: "logentries"
- - Type: "Log"
- Name: "splunk"
- - Type: "Log"
- Name: "syslog"
- - Type: "Network"
- Name: "bridge"
- - Type: "Network"
- Name: "host"
- - Type: "Network"
- Name: "ipvlan"
- - Type: "Network"
- Name: "macvlan"
- - Type: "Network"
- Name: "null"
- - Type: "Network"
- Name: "overlay"
- - Type: "Volume"
- Name: "local"
- - Type: "Volume"
- Name: "localhost:5000/vieux/sshfs:latest"
- - Type: "Volume"
- Name: "vieux/sshfs:latest"
-
- TLSInfo:
- description: |
- Information about the issuer of leaf TLS certificates and the trusted root
- CA certificate.
- type: "object"
- properties:
- TrustRoot:
- description: |
- The root CA certificate(s) that are used to validate leaf TLS
- certificates.
- type: "string"
- CertIssuerSubject:
- description:
- The base64-url-safe-encoded raw subject bytes of the issuer.
- type: "string"
- CertIssuerPublicKey:
- description: |
- The base64-url-safe-encoded raw public key bytes of the issuer.
- type: "string"
- example:
- TrustRoot: |
- -----BEGIN CERTIFICATE-----
- MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw
- EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0
- MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH
- A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf
- 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
- Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO
- PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz
- pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H
- -----END CERTIFICATE-----
- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh"
- CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="
-
- NodeStatus:
- description: |
- NodeStatus represents the status of a node.
-
- It provides the current status of the node, as seen by the manager.
- type: "object"
- properties:
- State:
- $ref: "#/definitions/NodeState"
- Message:
- type: "string"
- example: ""
- Addr:
- description: "IP address of the node."
- type: "string"
- example: "172.17.0.2"
-
- NodeState:
- description: "NodeState represents the state of a node."
- type: "string"
- enum:
- - "unknown"
- - "down"
- - "ready"
- - "disconnected"
- example: "ready"
-
- ManagerStatus:
- description: |
- ManagerStatus represents the status of a manager.
-
- It provides the current status of a node's manager component, if the node
- is a manager.
- x-nullable: true
- type: "object"
- properties:
- Leader:
- type: "boolean"
- default: false
- example: true
- Reachability:
- $ref: "#/definitions/Reachability"
- Addr:
- description: |
- The IP address and port at which the manager is reachable.
- type: "string"
- example: "10.0.0.46:2377"
-
- Reachability:
- description: "Reachability represents the reachability of a node."
- type: "string"
- enum:
- - "unknown"
- - "unreachable"
- - "reachable"
- example: "reachable"
-
- SwarmSpec:
- description: "User modifiable swarm configuration."
- type: "object"
- properties:
- Name:
- description: "Name of the swarm."
- type: "string"
- example: "default"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- com.example.corp.type: "production"
- com.example.corp.department: "engineering"
- Orchestration:
- description: "Orchestration configuration."
- type: "object"
- x-nullable: true
- properties:
- TaskHistoryRetentionLimit:
- description: |
- The number of historic tasks to keep per instance or node. If
- negative, never remove completed or failed tasks.
- type: "integer"
- format: "int64"
- example: 10
- Raft:
- description: "Raft configuration."
- type: "object"
- properties:
- SnapshotInterval:
- description: "The number of log entries between snapshots."
- type: "integer"
- format: "uint64"
- example: 10000
- KeepOldSnapshots:
- description: |
- The number of snapshots to keep beyond the current snapshot.
- type: "integer"
- format: "uint64"
- LogEntriesForSlowFollowers:
- description: |
- The number of log entries to keep around to sync up slow followers
- after a snapshot is created.
- type: "integer"
- format: "uint64"
- example: 500
- ElectionTick:
- description: |
- The number of ticks that a follower will wait for a message from
- the leader before becoming a candidate and starting an election.
- `ElectionTick` must be greater than `HeartbeatTick`.
-
- A tick currently defaults to one second, so these translate
- directly to seconds currently, but this is NOT guaranteed.
- type: "integer"
- example: 3
- HeartbeatTick:
- description: |
- The number of ticks between heartbeats. Every HeartbeatTick ticks,
- the leader will send a heartbeat to the followers.
-
- A tick currently defaults to one second, so these translate
- directly to seconds currently, but this is NOT guaranteed.
- type: "integer"
- example: 1
- Dispatcher:
- description: "Dispatcher configuration."
- type: "object"
- x-nullable: true
- properties:
- HeartbeatPeriod:
- description: |
- The delay for an agent to send a heartbeat to the dispatcher.
- type: "integer"
- format: "int64"
- example: 5000000000
- CAConfig:
- description: "CA configuration."
- type: "object"
- x-nullable: true
- properties:
- NodeCertExpiry:
- description: "The duration node certificates are issued for."
- type: "integer"
- format: "int64"
- example: 7776000000000000
- ExternalCAs:
- description: |
- Configuration for forwarding signing requests to an external
- certificate authority.
- type: "array"
- items:
- type: "object"
- properties:
- Protocol:
- description: |
- Protocol for communication with the external CA (currently
- only `cfssl` is supported).
- type: "string"
- enum:
- - "cfssl"
- default: "cfssl"
- URL:
- description: |
- URL where certificate signing requests should be sent.
- type: "string"
- Options:
- description: |
- An object with key/value pairs that are interpreted as
- protocol-specific options for the external CA driver.
- type: "object"
- additionalProperties:
- type: "string"
- CACert:
- description: |
- The root CA certificate (in PEM format) this external CA uses
- to issue TLS certificates (assumed to be to the current swarm
- root CA certificate if not provided).
- type: "string"
- SigningCACert:
- description: |
- The desired signing CA certificate for all swarm node TLS leaf
- certificates, in PEM format.
- type: "string"
- SigningCAKey:
- description: |
- The desired signing CA key for all swarm node TLS leaf certificates,
- in PEM format.
- type: "string"
- ForceRotate:
- description: |
- An integer whose purpose is to force swarm to generate a new
- signing CA certificate and key, if none have been specified in
- `SigningCACert` and `SigningCAKey`
- format: "uint64"
- type: "integer"
- EncryptionConfig:
- description: "Parameters related to encryption-at-rest."
- type: "object"
- properties:
- AutoLockManagers:
- description: |
- If set, generate a key and use it to lock data stored on the
- managers.
- type: "boolean"
- example: false
- TaskDefaults:
- description: "Defaults for creating tasks in this cluster."
- type: "object"
- properties:
- LogDriver:
- description: |
- The log driver to use for tasks created in the orchestrator if
- unspecified by a service.
-
- Updating this value only affects new tasks. Existing tasks continue
- to use their previously configured log driver until recreated.
- type: "object"
- properties:
- Name:
- description: |
- The log driver to use as a default for new tasks.
- type: "string"
- example: "json-file"
- Options:
- description: |
- Driver-specific options for the selectd log driver, specified
- as key/value pairs.
- type: "object"
- additionalProperties:
- type: "string"
- example:
- "max-file": "10"
- "max-size": "100m"
-
- # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but
- # without `JoinTokens`.
- ClusterInfo:
- description: |
- ClusterInfo represents information about the swarm as is returned by the
- "/info" endpoint. Join-tokens are not included.
- x-nullable: true
- type: "object"
- properties:
- ID:
- description: "The ID of the swarm."
- type: "string"
- example: "abajmipo7b4xz5ip2nrla6b11"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- description: |
- Date and time at which the swarm was initialised in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2016-08-18T10:44:24.496525531Z"
- UpdatedAt:
- description: |
- Date and time at which the swarm was last updated in
- [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
- type: "string"
- format: "dateTime"
- example: "2017-08-09T07:09:37.632105588Z"
- Spec:
- $ref: "#/definitions/SwarmSpec"
- TLSInfo:
- $ref: "#/definitions/TLSInfo"
- RootRotationInProgress:
- description: |
- Whether there is currently a root CA rotation in progress for the swarm
- type: "boolean"
- example: false
- DataPathPort:
- description: |
- DataPathPort specifies the data path port number for data traffic.
- Acceptable port range is 1024 to 49151.
- If no port is set or is set to 0, the default port (4789) is used.
- type: "integer"
- format: "uint32"
- default: 4789
- example: 4789
- DefaultAddrPool:
- description: |
- Default Address Pool specifies default subnet pools for global scope
- networks.
- type: "array"
- items:
- type: "string"
- format: "CIDR"
- example: ["10.10.0.0/16", "20.20.0.0/16"]
- SubnetSize:
- description: |
- SubnetSize specifies the subnet size of the networks created from the
- default subnet pool.
- type: "integer"
- format: "uint32"
- maximum: 29
- default: 24
- example: 24
-
- JoinTokens:
- description: |
- JoinTokens contains the tokens workers and managers need to join the swarm.
- type: "object"
- properties:
- Worker:
- description: |
- The token workers can use to join the swarm.
- type: "string"
- example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx"
- Manager:
- description: |
- The token managers can use to join the swarm.
- type: "string"
- example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
-
- Swarm:
- type: "object"
- allOf:
- - $ref: "#/definitions/ClusterInfo"
- - type: "object"
- properties:
- JoinTokens:
- $ref: "#/definitions/JoinTokens"
-
- TaskSpec:
- description: "User modifiable task configuration."
- type: "object"
- properties:
- PluginSpec:
- type: "object"
- description: |
- Plugin spec for the service. *(Experimental release only.)*
-
- <p><br /></p>
-
- > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
- > mutually exclusive. PluginSpec is only used when the Runtime field
- > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
- > field is set to `attachment`.
- properties:
- Name:
- description: "The name or 'alias' to use for the plugin."
- type: "string"
- Remote:
- description: "The plugin image reference to use."
- type: "string"
- Disabled:
- description: "Disable the plugin once scheduled."
- type: "boolean"
- PluginPrivilege:
- type: "array"
- items:
- description: |
- Describes a permission accepted by the user upon installing the
- plugin.
- type: "object"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- ContainerSpec:
- type: "object"
- description: |
- Container spec for the service.
-
- <p><br /></p>
-
- > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
- > mutually exclusive. PluginSpec is only used when the Runtime field
- > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
- > field is set to `attachment`.
- properties:
- Image:
- description: "The image name to use for the container"
- type: "string"
- Labels:
- description: "User-defined key/value data."
- type: "object"
- additionalProperties:
- type: "string"
- Command:
- description: "The command to be run in the image."
- type: "array"
- items:
- type: "string"
- Args:
- description: "Arguments to the command."
- type: "array"
- items:
- type: "string"
- Hostname:
- description: |
- The hostname to use for the container, as a valid
- [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname.
- type: "string"
- Env:
- description: |
- A list of environment variables in the form `VAR=value`.
- type: "array"
- items:
- type: "string"
- Dir:
- description: "The working directory for commands to run in."
- type: "string"
- User:
- description: "The user inside the container."
- type: "string"
- Groups:
- type: "array"
- description: |
- A list of additional groups that the container process will run as.
- items:
- type: "string"
- Privileges:
- type: "object"
- description: "Security options for the container"
- properties:
- CredentialSpec:
- type: "object"
- description: "CredentialSpec for managed service account (Windows only)"
- properties:
- Config:
- type: "string"
- example: "0bt9dmxjvjiqermk6xrop3ekq"
- description: |
- Load credential spec from a Swarm Config with the given ID.
- The specified config must also be present in the Configs
- field with the Runtime property set.
-
- <p><br /></p>
-
-
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
- > and `CredentialSpec.Config` are mutually exclusive.
- File:
- type: "string"
- example: "spec.json"
- description: |
- Load credential spec from this file. The file is read by
- the daemon, and must be present in the `CredentialSpecs`
- subdirectory in the docker data directory, which defaults
- to `C:\ProgramData\Docker\` on Windows.
-
- For example, specifying `spec.json` loads
- `C:\ProgramData\Docker\CredentialSpecs\spec.json`.
-
- <p><br /></p>
-
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
- > and `CredentialSpec.Config` are mutually exclusive.
- Registry:
- type: "string"
- description: |
- Load credential spec from this value in the Windows
- registry. The specified registry value must be located in:
-
- `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs`
-
- <p><br /></p>
-
-
- > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`,
- > and `CredentialSpec.Config` are mutually exclusive.
- SELinuxContext:
- type: "object"
- description: "SELinux labels of the container"
- properties:
- Disable:
- type: "boolean"
- description: "Disable SELinux"
- User:
- type: "string"
- description: "SELinux user label"
- Role:
- type: "string"
- description: "SELinux role label"
- Type:
- type: "string"
- description: "SELinux type label"
- Level:
- type: "string"
- description: "SELinux level label"
- TTY:
- description: "Whether a pseudo-TTY should be allocated."
- type: "boolean"
- OpenStdin:
- description: "Open `stdin`"
- type: "boolean"
- ReadOnly:
- description: "Mount the container's root filesystem as read only."
- type: "boolean"
- Mounts:
- description: |
- Specification for mounts to be added to containers created as part
- of the service.
- type: "array"
- items:
- $ref: "#/definitions/Mount"
- StopSignal:
- description: "Signal to stop the container."
- type: "string"
- StopGracePeriod:
- description: |
- Amount of time to wait for the container to terminate before
- forcefully killing it.
- type: "integer"
- format: "int64"
- HealthCheck:
- $ref: "#/definitions/HealthConfig"
- Hosts:
- type: "array"
- description: |
- A list of hostname/IP mappings to add to the container's `hosts`
- file. The format of extra hosts is specified in the
- [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html)
- man page:
-
- IP_address canonical_hostname [aliases...]
- items:
- type: "string"
- DNSConfig:
- description: |
- Specification for DNS related configurations in resolver configuration
- file (`resolv.conf`).
- type: "object"
- properties:
- Nameservers:
- description: "The IP addresses of the name servers."
- type: "array"
- items:
- type: "string"
- Search:
- description: "A search list for host-name lookup."
- type: "array"
- items:
- type: "string"
- Options:
- description: |
- A list of internal resolver variables to be modified (e.g.,
- `debug`, `ndots:3`, etc.).
- type: "array"
- items:
- type: "string"
- Secrets:
- description: |
- Secrets contains references to zero or more secrets that will be
- exposed to the service.
- type: "array"
- items:
- type: "object"
- properties:
- File:
- description: |
- File represents a specific target that is backed by a file.
- type: "object"
- properties:
- Name:
- description: |
- Name represents the final filename in the filesystem.
- type: "string"
- UID:
- description: "UID represents the file UID."
- type: "string"
- GID:
- description: "GID represents the file GID."
- type: "string"
- Mode:
- description: "Mode represents the FileMode of the file."
- type: "integer"
- format: "uint32"
- SecretID:
- description: |
- SecretID represents the ID of the specific secret that we're
- referencing.
- type: "string"
- SecretName:
- description: |
- SecretName is the name of the secret that this references,
- but this is just provided for lookup/display purposes. The
- secret in the reference will be identified by its ID.
- type: "string"
- Configs:
- description: |
- Configs contains references to zero or more configs that will be
- exposed to the service.
- type: "array"
- items:
- type: "object"
- properties:
- File:
- description: |
- File represents a specific target that is backed by a file.
-
- <p><br /><p>
-
- > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive
- type: "object"
- properties:
- Name:
- description: |
- Name represents the final filename in the filesystem.
- type: "string"
- UID:
- description: "UID represents the file UID."
- type: "string"
- GID:
- description: "GID represents the file GID."
- type: "string"
- Mode:
- description: "Mode represents the FileMode of the file."
- type: "integer"
- format: "uint32"
- Runtime:
- description: |
- Runtime represents a target that is not mounted into the
- container but is used by the task
-
- <p><br /><p>
-
- > **Note**: `Configs.File` and `Configs.Runtime` are mutually
- > exclusive
- type: "object"
- ConfigID:
- description: |
- ConfigID represents the ID of the specific config that we're
- referencing.
- type: "string"
- ConfigName:
- description: |
- ConfigName is the name of the config that this references,
- but this is just provided for lookup/display purposes. The
- config in the reference will be identified by its ID.
- type: "string"
- Isolation:
- type: "string"
- description: |
- Isolation technology of the containers running the service.
- (Windows only)
- enum:
- - "default"
- - "process"
- - "hyperv"
- Init:
- description: |
- Run an init inside the container that forwards signals and reaps
- processes. This field is omitted if empty, and the default (as
- configured on the daemon) is used.
- type: "boolean"
- x-nullable: true
- Sysctls:
- description: |
- Set kernel namedspaced parameters (sysctls) in the container.
- The Sysctls option on services accepts the same sysctls as the
- are supported on containers. Note that while the same sysctls are
- supported, no guarantees or checks are made about their
- suitability for a clustered environment, and it's up to the user
- to determine whether a given sysctl will work properly in a
- Service.
- type: "object"
- additionalProperties:
- type: "string"
- # This option is not used by Windows containers
- CapabilityAdd:
- type: "array"
- description: |
- A list of kernel capabilities to add to the default set
- for the container.
- items:
- type: "string"
- example:
- - "CAP_NET_RAW"
- - "CAP_SYS_ADMIN"
- - "CAP_SYS_CHROOT"
- - "CAP_SYSLOG"
- CapabilityDrop:
- type: "array"
- description: |
- A list of kernel capabilities to drop from the default set
- for the container.
- items:
- type: "string"
- example:
- - "CAP_NET_RAW"
- Ulimits:
- description: |
- A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`"
- type: "array"
- items:
- type: "object"
- properties:
- Name:
- description: "Name of ulimit"
- type: "string"
- Soft:
- description: "Soft limit"
- type: "integer"
- Hard:
- description: "Hard limit"
- type: "integer"
- NetworkAttachmentSpec:
- description: |
- Read-only spec type for non-swarm containers attached to swarm overlay
- networks.
-
- <p><br /></p>
-
- > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are
- > mutually exclusive. PluginSpec is only used when the Runtime field
- > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime
- > field is set to `attachment`.
- type: "object"
- properties:
- ContainerID:
- description: "ID of the container represented by this task"
- type: "string"
- Resources:
- description: |
- Resource requirements which apply to each individual container created
- as part of the service.
- type: "object"
- properties:
- Limits:
- description: "Define resources limits."
- $ref: "#/definitions/Limit"
- Reservation:
- description: "Define resources reservation."
- $ref: "#/definitions/ResourceObject"
- RestartPolicy:
- description: |
- Specification for the restart policy which applies to containers
- created as part of this service.
- type: "object"
- properties:
- Condition:
- description: "Condition for restart."
- type: "string"
- enum:
- - "none"
- - "on-failure"
- - "any"
- Delay:
- description: "Delay between restart attempts."
- type: "integer"
- format: "int64"
- MaxAttempts:
- description: |
- Maximum attempts to restart a given container before giving up
- (default value is 0, which is ignored).
- type: "integer"
- format: "int64"
- default: 0
- Window:
- description: |
- Windows is the time window used to evaluate the restart policy
- (default value is 0, which is unbounded).
- type: "integer"
- format: "int64"
- default: 0
- Placement:
- type: "object"
- properties:
- Constraints:
- description: |
- An array of constraint expressions to limit the set of nodes where
- a task can be scheduled. Constraint expressions can either use a
- _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find
- nodes that satisfy every expression (AND match). Constraints can
- match node or Docker Engine labels as follows:
-
- node attribute | matches | example
- ---------------------|--------------------------------|-----------------------------------------------
- `node.id` | Node ID | `node.id==2ivku8v2gvtg4`
- `node.hostname` | Node hostname | `node.hostname!=node-2`
- `node.role` | Node role (`manager`/`worker`) | `node.role==manager`
- `node.platform.os` | Node operating system | `node.platform.os==windows`
- `node.platform.arch` | Node architecture | `node.platform.arch==x86_64`
- `node.labels` | User-defined node labels | `node.labels.security==high`
- `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04`
-
- `engine.labels` apply to Docker Engine labels like operating system,
- drivers, etc. Swarm administrators add `node.labels` for operational
- purposes by using the [`node update endpoint`](#operation/NodeUpdate).
-
- type: "array"
- items:
- type: "string"
- example:
- - "node.hostname!=node3.corp.example.com"
- - "node.role!=manager"
- - "node.labels.type==production"
- - "node.platform.os==linux"
- - "node.platform.arch==x86_64"
- Preferences:
- description: |
- Preferences provide a way to make the scheduler aware of factors
- such as topology. They are provided in order from highest to
- lowest precedence.
- type: "array"
- items:
- type: "object"
- properties:
- Spread:
- type: "object"
- properties:
- SpreadDescriptor:
- description: |
- label descriptor, such as `engine.labels.az`.
- type: "string"
- example:
- - Spread:
- SpreadDescriptor: "node.labels.datacenter"
- - Spread:
- SpreadDescriptor: "node.labels.rack"
- MaxReplicas:
- description: |
- Maximum number of replicas for per node (default value is 0, which
- is unlimited)
- type: "integer"
- format: "int64"
- default: 0
- Platforms:
- description: |
- Platforms stores all the platforms that the service's image can
- run on. This field is used in the platform filter for scheduling.
- If empty, then the platform filter is off, meaning there are no
- scheduling restrictions.
- type: "array"
- items:
- $ref: "#/definitions/Platform"
- ForceUpdate:
- description: |
- A counter that triggers an update even if no relevant parameters have
- been changed.
- type: "integer"
- Runtime:
- description: |
- Runtime is the type of runtime specified for the task executor.
- type: "string"
- Networks:
- description: "Specifies which networks the service should attach to."
- type: "array"
- items:
- $ref: "#/definitions/NetworkAttachmentConfig"
- LogDriver:
- description: |
- Specifies the log driver to use for tasks created from this spec. If
- not present, the default one for the swarm will be used, finally
- falling back to the engine default if not specified.
- type: "object"
- properties:
- Name:
- type: "string"
- Options:
- type: "object"
- additionalProperties:
- type: "string"
-
- TaskState:
- type: "string"
- enum:
- - "new"
- - "allocated"
- - "pending"
- - "assigned"
- - "accepted"
- - "preparing"
- - "ready"
- - "starting"
- - "running"
- - "complete"
- - "shutdown"
- - "failed"
- - "rejected"
- - "remove"
- - "orphaned"
-
- Task:
- type: "object"
- properties:
- ID:
- description: "The ID of the task."
- type: "string"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- Name:
- description: "Name of the task."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- Spec:
- $ref: "#/definitions/TaskSpec"
- ServiceID:
- description: "The ID of the service this task is part of."
- type: "string"
- Slot:
- type: "integer"
- NodeID:
- description: "The ID of the node that this task is on."
- type: "string"
- AssignedGenericResources:
- $ref: "#/definitions/GenericResources"
- Status:
- type: "object"
- properties:
- Timestamp:
- type: "string"
- format: "dateTime"
- State:
- $ref: "#/definitions/TaskState"
- Message:
- type: "string"
- Err:
- type: "string"
- ContainerStatus:
- type: "object"
- properties:
- ContainerID:
- type: "string"
- PID:
- type: "integer"
- ExitCode:
- type: "integer"
- DesiredState:
- $ref: "#/definitions/TaskState"
- JobIteration:
- description: |
- If the Service this Task belongs to is a job-mode service, contains
- the JobIteration of the Service this Task was created for. Absent if
- the Task was created for a Replicated or Global Service.
- $ref: "#/definitions/ObjectVersion"
- example:
- ID: "0kzzo1i0y4jz6027t0k7aezc7"
- Version:
- Index: 71
- CreatedAt: "2016-06-07T21:07:31.171892745Z"
- UpdatedAt: "2016-06-07T21:07:31.376370513Z"
- Spec:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Slot: 1
- NodeID: "60gvrl6tm78dmak4yl7srz94v"
- Status:
- Timestamp: "2016-06-07T21:07:31.290032978Z"
- State: "running"
- Message: "started"
- ContainerStatus:
- ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
- PID: 677
- DesiredState: "running"
- NetworksAttachments:
- - Network:
- ID: "4qvuz4ko70xaltuqbt8956gd1"
- Version:
- Index: 18
- CreatedAt: "2016-06-07T20:31:11.912919752Z"
- UpdatedAt: "2016-06-07T21:07:29.955277358Z"
- Spec:
- Name: "ingress"
- Labels:
- com.docker.swarm.internal: "true"
- DriverConfiguration: {}
- IPAMOptions:
- Driver: {}
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- DriverState:
- Name: "overlay"
- Options:
- com.docker.network.driver.overlay.vxlanid_list: "256"
- IPAMOptions:
- Driver:
- Name: "default"
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- Addresses:
- - "10.255.0.10/16"
- AssignedGenericResources:
- - DiscreteResourceSpec:
- Kind: "SSD"
- Value: 3
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID1"
- - NamedResourceSpec:
- Kind: "GPU"
- Value: "UUID2"
-
- ServiceSpec:
- description: "User modifiable configuration for a service."
- properties:
- Name:
- description: "Name of the service."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- TaskTemplate:
- $ref: "#/definitions/TaskSpec"
- Mode:
- description: "Scheduling mode for the service."
- type: "object"
- properties:
- Replicated:
- type: "object"
- properties:
- Replicas:
- type: "integer"
- format: "int64"
- Global:
- type: "object"
- ReplicatedJob:
- description: |
- The mode used for services with a finite number of tasks that run
- to a completed state.
- type: "object"
- properties:
- MaxConcurrent:
- description: |
- The maximum number of replicas to run simultaneously.
- type: "integer"
- format: "int64"
- default: 1
- TotalCompletions:
- description: |
- The total number of replicas desired to reach the Completed
- state. If unset, will default to the value of `MaxConcurrent`
- type: "integer"
- format: "int64"
- GlobalJob:
- description: |
- The mode used for services which run a task to the completed state
- on each valid node.
- type: "object"
- UpdateConfig:
- description: "Specification for the update strategy of the service."
- type: "object"
- properties:
- Parallelism:
- description: |
- Maximum number of tasks to be updated in one iteration (0 means
- unlimited parallelism).
- type: "integer"
- format: "int64"
- Delay:
- description: "Amount of time between updates, in nanoseconds."
- type: "integer"
- format: "int64"
- FailureAction:
- description: |
- Action to take if an updated task fails to run, or stops running
- during the update.
- type: "string"
- enum:
- - "continue"
- - "pause"
- - "rollback"
- Monitor:
- description: |
- Amount of time to monitor each updated task for failures, in
- nanoseconds.
- type: "integer"
- format: "int64"
- MaxFailureRatio:
- description: |
- The fraction of tasks that may fail during an update before the
- failure action is invoked, specified as a floating point number
- between 0 and 1.
- type: "number"
- default: 0
- Order:
- description: |
- The order of operations when rolling out an updated task. Either
- the old task is shut down before the new task is started, or the
- new task is started before the old task is shut down.
- type: "string"
- enum:
- - "stop-first"
- - "start-first"
- RollbackConfig:
- description: "Specification for the rollback strategy of the service."
- type: "object"
- properties:
- Parallelism:
- description: |
- Maximum number of tasks to be rolled back in one iteration (0 means
- unlimited parallelism).
- type: "integer"
- format: "int64"
- Delay:
- description: |
- Amount of time between rollback iterations, in nanoseconds.
- type: "integer"
- format: "int64"
- FailureAction:
- description: |
- Action to take if an rolled back task fails to run, or stops
- running during the rollback.
- type: "string"
- enum:
- - "continue"
- - "pause"
- Monitor:
- description: |
- Amount of time to monitor each rolled back task for failures, in
- nanoseconds.
- type: "integer"
- format: "int64"
- MaxFailureRatio:
- description: |
- The fraction of tasks that may fail during a rollback before the
- failure action is invoked, specified as a floating point number
- between 0 and 1.
- type: "number"
- default: 0
- Order:
- description: |
- The order of operations when rolling back a task. Either the old
- task is shut down before the new task is started, or the new task
- is started before the old task is shut down.
- type: "string"
- enum:
- - "stop-first"
- - "start-first"
- Networks:
- description: "Specifies which networks the service should attach to."
- type: "array"
- items:
- $ref: "#/definitions/NetworkAttachmentConfig"
-
- EndpointSpec:
- $ref: "#/definitions/EndpointSpec"
-
- EndpointPortConfig:
- type: "object"
- properties:
- Name:
- type: "string"
- Protocol:
- type: "string"
- enum:
- - "tcp"
- - "udp"
- - "sctp"
- TargetPort:
- description: "The port inside the container."
- type: "integer"
- PublishedPort:
- description: "The port on the swarm hosts."
- type: "integer"
- PublishMode:
- description: |
- The mode in which port is published.
-
- <p><br /></p>
-
- - "ingress" makes the target port accessible on every node,
- regardless of whether there is a task for the service running on
- that node or not.
- - "host" bypasses the routing mesh and publish the port directly on
- the swarm node where that service is running.
-
- type: "string"
- enum:
- - "ingress"
- - "host"
- default: "ingress"
- example: "ingress"
-
- EndpointSpec:
- description: "Properties that can be configured to access and load balance a service."
- type: "object"
- properties:
- Mode:
- description: |
- The mode of resolution to use for internal load balancing between tasks.
- type: "string"
- enum:
- - "vip"
- - "dnsrr"
- default: "vip"
- Ports:
- description: |
- List of exposed ports that this service is accessible on from the
- outside. Ports can only be provided if `vip` resolution mode is used.
- type: "array"
- items:
- $ref: "#/definitions/EndpointPortConfig"
-
- Service:
- type: "object"
- properties:
- ID:
- type: "string"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- Spec:
- $ref: "#/definitions/ServiceSpec"
- Endpoint:
- type: "object"
- properties:
- Spec:
- $ref: "#/definitions/EndpointSpec"
- Ports:
- type: "array"
- items:
- $ref: "#/definitions/EndpointPortConfig"
- VirtualIPs:
- type: "array"
- items:
- type: "object"
- properties:
- NetworkID:
- type: "string"
- Addr:
- type: "string"
- UpdateStatus:
- description: "The status of a service update."
- type: "object"
- properties:
- State:
- type: "string"
- enum:
- - "updating"
- - "paused"
- - "completed"
- StartedAt:
- type: "string"
- format: "dateTime"
- CompletedAt:
- type: "string"
- format: "dateTime"
- Message:
- type: "string"
- ServiceStatus:
- description: |
- The status of the service's tasks. Provided only when requested as
- part of a ServiceList operation.
- type: "object"
- properties:
- RunningTasks:
- description: |
- The number of tasks for the service currently in the Running state.
- type: "integer"
- format: "uint64"
- example: 7
- DesiredTasks:
- description: |
- The number of tasks for the service desired to be running.
- For replicated services, this is the replica count from the
- service spec. For global services, this is computed by taking
- count of all tasks for the service with a Desired State other
- than Shutdown.
- type: "integer"
- format: "uint64"
- example: 10
- CompletedTasks:
- description: |
- The number of tasks for a job that are in the Completed state.
- This field must be cross-referenced with the service type, as the
- value of 0 may mean the service is not in a job mode, or it may
- mean the job-mode service has no tasks yet Completed.
- type: "integer"
- format: "uint64"
- JobStatus:
- description: |
- The status of the service when it is in one of ReplicatedJob or
- GlobalJob modes. Absent on Replicated and Global mode services. The
- JobIteration is an ObjectVersion, but unlike the Service's version,
- does not need to be sent with an update request.
- type: "object"
- properties:
- JobIteration:
- description: |
- JobIteration is a value increased each time a Job is executed,
- successfully or otherwise. "Executed", in this case, means the
- job as a whole has been started, not that an individual Task has
- been launched. A job is "Executed" when its ServiceSpec is
- updated. JobIteration can be used to disambiguate Tasks belonging
- to different executions of a job. Though JobIteration will
- increase with each subsequent execution, it may not necessarily
- increase by 1, and so JobIteration should not be used to
- $ref: "#/definitions/ObjectVersion"
- LastExecution:
- description: |
- The last time, as observed by the server, that this job was
- started.
- type: "string"
- format: "dateTime"
- example:
- ID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Version:
- Index: 19
- CreatedAt: "2016-06-07T21:05:51.880065305Z"
- UpdatedAt: "2016-06-07T21:07:29.962229872Z"
- Spec:
- Name: "hopeful_cori"
- TaskTemplate:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ForceUpdate: 0
- Mode:
- Replicated:
- Replicas: 1
- UpdateConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- RollbackConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- EndpointSpec:
- Mode: "vip"
- Ports:
- -
- Protocol: "tcp"
- TargetPort: 6379
- PublishedPort: 30001
- Endpoint:
- Spec:
- Mode: "vip"
- Ports:
- -
- Protocol: "tcp"
- TargetPort: 6379
- PublishedPort: 30001
- Ports:
- -
- Protocol: "tcp"
- TargetPort: 6379
- PublishedPort: 30001
- VirtualIPs:
- -
- NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
- Addr: "10.255.0.2/16"
- -
- NetworkID: "4qvuz4ko70xaltuqbt8956gd1"
- Addr: "10.255.0.3/16"
-
- ImageDeleteResponseItem:
- type: "object"
- properties:
- Untagged:
- description: "The image ID of an image that was untagged"
- type: "string"
- Deleted:
- description: "The image ID of an image that was deleted"
- type: "string"
-
- ServiceUpdateResponse:
- type: "object"
- properties:
- Warnings:
- description: "Optional warning messages"
- type: "array"
- items:
- type: "string"
- example:
- Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
-
- ContainerSummary:
- type: "array"
- items:
- type: "object"
- properties:
- Id:
- description: "The ID of this container"
- type: "string"
- x-go-name: "ID"
- Names:
- description: "The names that this container has been given"
- type: "array"
- items:
- type: "string"
- Image:
- description: "The name of the image used when creating this container"
- type: "string"
- ImageID:
- description: "The ID of the image that this container was created from"
- type: "string"
- Command:
- description: "Command to run when starting the container"
- type: "string"
- Created:
- description: "When the container was created"
- type: "integer"
- format: "int64"
- Ports:
- description: "The ports exposed by this container"
- type: "array"
- items:
- $ref: "#/definitions/Port"
- SizeRw:
- description: "The size of files that have been created or changed by this container"
- type: "integer"
- format: "int64"
- SizeRootFs:
- description: "The total size of all the files in this container"
- type: "integer"
- format: "int64"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- State:
- description: "The state of this container (e.g. `Exited`)"
- type: "string"
- Status:
- description: "Additional human-readable status of this container (e.g. `Exit 0`)"
- type: "string"
- HostConfig:
- type: "object"
- properties:
- NetworkMode:
- type: "string"
- NetworkSettings:
- description: "A summary of the container's network settings"
- type: "object"
- properties:
- Networks:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/EndpointSettings"
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/Mount"
-
- Driver:
- description: "Driver represents a driver (network, logging, secrets)."
- type: "object"
- required: [Name]
- properties:
- Name:
- description: "Name of the driver."
- type: "string"
- x-nullable: false
- example: "some-driver"
- Options:
- description: "Key/value map of driver-specific options."
- type: "object"
- x-nullable: false
- additionalProperties:
- type: "string"
- example:
- OptionA: "value for driver-specific option A"
- OptionB: "value for driver-specific option B"
-
- SecretSpec:
- type: "object"
- properties:
- Name:
- description: "User-defined name of the secret."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Data:
- description: |
- Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
- data to store as secret.
-
- This field is only used to _create_ a secret, and is not returned by
- other endpoints.
- type: "string"
- example: ""
- Driver:
- description: |
- Name of the secrets driver used to fetch the secret's value from an
- external secret store.
- $ref: "#/definitions/Driver"
- Templating:
- description: |
- Templating driver, if applicable
-
- Templating controls whether and how to evaluate the config payload as
- a template. If no driver is set, no templating is used.
- $ref: "#/definitions/Driver"
-
- Secret:
- type: "object"
- properties:
- ID:
- type: "string"
- example: "blt1owaxmitz71s9v5zh81zun"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- example: "2017-07-20T13:55:28.678958722Z"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- example: "2017-07-20T13:55:28.678958722Z"
- Spec:
- $ref: "#/definitions/SecretSpec"
-
- ConfigSpec:
- type: "object"
- properties:
- Name:
- description: "User-defined name of the config."
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- Data:
- description: |
- Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5))
- config data.
- type: "string"
- Templating:
- description: |
- Templating driver, if applicable
-
- Templating controls whether and how to evaluate the config payload as
- a template. If no driver is set, no templating is used.
- $ref: "#/definitions/Driver"
-
- Config:
- type: "object"
- properties:
- ID:
- type: "string"
- Version:
- $ref: "#/definitions/ObjectVersion"
- CreatedAt:
- type: "string"
- format: "dateTime"
- UpdatedAt:
- type: "string"
- format: "dateTime"
- Spec:
- $ref: "#/definitions/ConfigSpec"
-
- ContainerState:
- description: |
- ContainerState stores container's running state. It's part of ContainerJSONBase
- and will be returned by the "inspect" command.
- type: "object"
- properties:
- Status:
- description: |
- String representation of the container state. Can be one of "created",
- "running", "paused", "restarting", "removing", "exited", or "dead".
- type: "string"
- enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"]
- example: "running"
- Running:
- description: |
- Whether this container is running.
-
- Note that a running container can be _paused_. The `Running` and `Paused`
- booleans are not mutually exclusive:
-
- When pausing a container (on Linux), the freezer cgroup is used to suspend
- all processes in the container. Freezing the process requires the process to
- be running. As a result, paused containers are both `Running` _and_ `Paused`.
-
- Use the `Status` field instead to determine if a container's state is "running".
- type: "boolean"
- example: true
- Paused:
- description: "Whether this container is paused."
- type: "boolean"
- example: false
- Restarting:
- description: "Whether this container is restarting."
- type: "boolean"
- example: false
- OOMKilled:
- description: |
- Whether this container has been killed because it ran out of memory.
- type: "boolean"
- example: false
- Dead:
- type: "boolean"
- example: false
- Pid:
- description: "The process ID of this container"
- type: "integer"
- example: 1234
- ExitCode:
- description: "The last exit code of this container"
- type: "integer"
- example: 0
- Error:
- type: "string"
- StartedAt:
- description: "The time when this container was last started."
- type: "string"
- example: "2020-01-06T09:06:59.461876391Z"
- FinishedAt:
- description: "The time when this container last exited."
- type: "string"
- example: "2020-01-06T09:07:59.461876391Z"
- Health:
- x-nullable: true
- $ref: "#/definitions/Health"
-
- SystemVersion:
- type: "object"
- description: |
- Response of Engine API: GET "/version"
- properties:
- Platform:
- type: "object"
- required: [Name]
- properties:
- Name:
- type: "string"
- Components:
- type: "array"
- description: |
- Information about system components
- items:
- type: "object"
- x-go-name: ComponentVersion
- required: [Name, Version]
- properties:
- Name:
- description: |
- Name of the component
- type: "string"
- example: "Engine"
- Version:
- description: |
- Version of the component
- type: "string"
- x-nullable: false
- example: "19.03.12"
- Details:
- description: |
- Key/value pairs of strings with additional information about the
- component. These values are intended for informational purposes
- only, and their content is not defined, and not part of the API
- specification.
-
- These messages can be printed by the client as information to the user.
- type: "object"
- x-nullable: true
- Version:
- description: "The version of the daemon"
- type: "string"
- example: "19.03.12"
- ApiVersion:
- description: |
- The default (and highest) API version that is supported by the daemon
- type: "string"
- example: "1.40"
- MinAPIVersion:
- description: |
- The minimum API version that is supported by the daemon
- type: "string"
- example: "1.12"
- GitCommit:
- description: |
- The Git commit of the source code that was used to build the daemon
- type: "string"
- example: "48a66213fe"
- GoVersion:
- description: |
- The version Go used to compile the daemon, and the version of the Go
- runtime in use.
- type: "string"
- example: "go1.13.14"
- Os:
- description: |
- The operating system that the daemon is running on ("linux" or "windows")
- type: "string"
- example: "linux"
- Arch:
- description: |
- The architecture that the daemon is running on
- type: "string"
- example: "amd64"
- KernelVersion:
- description: |
- The kernel version (`uname -r`) that the daemon is running on.
-
- This field is omitted when empty.
- type: "string"
- example: "4.19.76-linuxkit"
- Experimental:
- description: |
- Indicates if the daemon is started with experimental features enabled.
-
- This field is omitted when empty / false.
- type: "boolean"
- example: true
- BuildTime:
- description: |
- The date and time that the daemon was compiled.
- type: "string"
- example: "2020-06-22T15:49:27.000000000+00:00"
-
-
- SystemInfo:
- type: "object"
- properties:
- ID:
- description: |
- Unique identifier of the daemon.
-
- <p><br /></p>
-
- > **Note**: The format of the ID itself is not part of the API, and
- > should not be considered stable.
- type: "string"
- example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS"
- Containers:
- description: "Total number of containers on the host."
- type: "integer"
- example: 14
- ContainersRunning:
- description: |
- Number of containers with status `"running"`.
- type: "integer"
- example: 3
- ContainersPaused:
- description: |
- Number of containers with status `"paused"`.
- type: "integer"
- example: 1
- ContainersStopped:
- description: |
- Number of containers with status `"stopped"`.
- type: "integer"
- example: 10
- Images:
- description: |
- Total number of images on the host.
-
- Both _tagged_ and _untagged_ (dangling) images are counted.
- type: "integer"
- example: 508
- Driver:
- description: "Name of the storage driver in use."
- type: "string"
- example: "overlay2"
- DriverStatus:
- description: |
- Information specific to the storage driver, provided as
- "label" / "value" pairs.
-
- This information is provided by the storage driver, and formatted
- in a way consistent with the output of `docker info` on the command
- line.
-
- <p><br /></p>
-
- > **Note**: The information returned in this field, including the
- > formatting of values and labels, should not be considered stable,
- > and may change without notice.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- example:
- - ["Backing Filesystem", "extfs"]
- - ["Supports d_type", "true"]
- - ["Native Overlay Diff", "true"]
- DockerRootDir:
- description: |
- Root directory of persistent Docker state.
-
- Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker`
- on Windows.
- type: "string"
- example: "/var/lib/docker"
- Plugins:
- $ref: "#/definitions/PluginsInfo"
- MemoryLimit:
- description: "Indicates if the host has memory limit support enabled."
- type: "boolean"
- example: true
- SwapLimit:
- description: "Indicates if the host has memory swap limit support enabled."
- type: "boolean"
- example: true
- KernelMemory:
- description: |
- Indicates if the host has kernel memory limit support enabled.
-
- <p><br /></p>
-
- > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated
- > `kmem.limit_in_bytes`.
- type: "boolean"
- example: true
- CpuCfsPeriod:
- description: |
- Indicates if CPU CFS(Completely Fair Scheduler) period is supported by
- the host.
- type: "boolean"
- example: true
- CpuCfsQuota:
- description: |
- Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by
- the host.
- type: "boolean"
- example: true
- CPUShares:
- description: |
- Indicates if CPU Shares limiting is supported by the host.
- type: "boolean"
- example: true
- CPUSet:
- description: |
- Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host.
-
- See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt)
- type: "boolean"
- example: true
- PidsLimit:
- description: "Indicates if the host kernel has PID limit support enabled."
- type: "boolean"
- example: true
- OomKillDisable:
- description: "Indicates if OOM killer disable is supported on the host."
- type: "boolean"
- IPv4Forwarding:
- description: "Indicates IPv4 forwarding is enabled."
- type: "boolean"
- example: true
- BridgeNfIptables:
- description: "Indicates if `bridge-nf-call-iptables` is available on the host."
- type: "boolean"
- example: true
- BridgeNfIp6tables:
- description: "Indicates if `bridge-nf-call-ip6tables` is available on the host."
- type: "boolean"
- example: true
- Debug:
- description: |
- Indicates if the daemon is running in debug-mode / with debug-level
- logging enabled.
- type: "boolean"
- example: true
- NFd:
- description: |
- The total number of file Descriptors in use by the daemon process.
-
- This information is only returned if debug-mode is enabled.
- type: "integer"
- example: 64
- NGoroutines:
- description: |
- The number of goroutines that currently exist.
-
- This information is only returned if debug-mode is enabled.
- type: "integer"
- example: 174
- SystemTime:
- description: |
- Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt)
- format with nano-seconds.
- type: "string"
- example: "2017-08-08T20:28:29.06202363Z"
- LoggingDriver:
- description: |
- The logging driver to use as a default for new containers.
- type: "string"
- CgroupDriver:
- description: |
- The driver to use for managing cgroups.
- type: "string"
- enum: ["cgroupfs", "systemd", "none"]
- default: "cgroupfs"
- example: "cgroupfs"
- CgroupVersion:
- description: |
- The version of the cgroup.
- type: "string"
- enum: ["1", "2"]
- default: "1"
- example: "1"
- NEventsListener:
- description: "Number of event listeners subscribed."
- type: "integer"
- example: 30
- KernelVersion:
- description: |
- Kernel version of the host.
-
- On Linux, this information obtained from `uname`. On Windows this
- information is queried from the <kbd>HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\</kbd>
- registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_.
- type: "string"
- example: "4.9.38-moby"
- OperatingSystem:
- description: |
- Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS"
- or "Windows Server 2016 Datacenter"
- type: "string"
- example: "Alpine Linux v3.5"
- OSVersion:
- description: |
- Version of the host's operating system
-
- <p><br /></p>
-
- > **Note**: The information returned in this field, including its
- > very existence, and the formatting of values, should not be considered
- > stable, and may change without notice.
- type: "string"
- example: "16.04"
- OSType:
- description: |
- Generic type of the operating system of the host, as returned by the
- Go runtime (`GOOS`).
-
- Currently returned values are "linux" and "windows". A full list of
- possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
- type: "string"
- example: "linux"
- Architecture:
- description: |
- Hardware architecture of the host, as returned by the Go runtime
- (`GOARCH`).
-
- A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment).
- type: "string"
- example: "x86_64"
- NCPU:
- description: |
- The number of logical CPUs usable by the daemon.
-
- The number of available CPUs is checked by querying the operating
- system when the daemon starts. Changes to operating system CPU
- allocation after the daemon is started are not reflected.
- type: "integer"
- example: 4
- MemTotal:
- description: |
- Total amount of physical memory available on the host, in bytes.
- type: "integer"
- format: "int64"
- example: 2095882240
-
- IndexServerAddress:
- description: |
- Address / URL of the index server that is used for image search,
- and as a default for user authentication for Docker Hub and Docker Cloud.
- default: "https://index.docker.io/v1/"
- type: "string"
- example: "https://index.docker.io/v1/"
- RegistryConfig:
- $ref: "#/definitions/RegistryServiceConfig"
- GenericResources:
- $ref: "#/definitions/GenericResources"
- HttpProxy:
- description: |
- HTTP-proxy configured for the daemon. This value is obtained from the
- [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
- Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
- are masked in the API response.
-
- Containers do not automatically inherit this configuration.
- type: "string"
- example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080"
- HttpsProxy:
- description: |
- HTTPS-proxy configured for the daemon. This value is obtained from the
- [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable.
- Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL
- are masked in the API response.
-
- Containers do not automatically inherit this configuration.
- type: "string"
- example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443"
- NoProxy:
- description: |
- Comma-separated list of domain extensions for which no proxy should be
- used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html)
- environment variable.
-
- Containers do not automatically inherit this configuration.
- type: "string"
- example: "*.local, 169.254/16"
- Name:
- description: "Hostname of the host."
- type: "string"
- example: "node5.corp.example.com"
- Labels:
- description: |
- User-defined labels (key/value metadata) as set on the daemon.
-
- <p><br /></p>
-
- > **Note**: When part of a Swarm, nodes can both have _daemon_ labels,
- > set through the daemon configuration, and _node_ labels, set from a
- > manager node in the Swarm. Node labels are not included in this
- > field. Node labels can be retrieved using the `/nodes/(id)` endpoint
- > on a manager node in the Swarm.
- type: "array"
- items:
- type: "string"
- example: ["storage=ssd", "production"]
- ExperimentalBuild:
- description: |
- Indicates if experimental features are enabled on the daemon.
- type: "boolean"
- example: true
- ServerVersion:
- description: |
- Version string of the daemon.
-
- > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/)
- > returns the Swarm version instead of the daemon version, for example
- > `swarm/1.2.8`.
- type: "string"
- example: "17.06.0-ce"
- ClusterStore:
- description: |
- URL of the distributed storage backend.
-
-
- The storage backend is used for multihost networking (to store
- network and endpoint information) and by the node discovery mechanism.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when using standalone Swarm
- > mode, and overlay networking using an external k/v store. Overlay
- > networks with Swarm mode enabled use the built-in raft store, and
- > this field will be empty.
- type: "string"
- example: "consul://consul.corp.example.com:8600/some/path"
- ClusterAdvertise:
- description: |
- The network endpoint that the Engine advertises for the purpose of
- node discovery. ClusterAdvertise is a `host:port` combination on which
- the daemon is reachable by other hosts.
-
- <p><br /></p>
-
- > **Deprecated**: This field is only propagated when using standalone Swarm
- > mode, and overlay networking using an external k/v store. Overlay
- > networks with Swarm mode enabled use the built-in raft store, and
- > this field will be empty.
- type: "string"
- example: "node5.corp.example.com:8000"
- Runtimes:
- description: |
- List of [OCI compliant](https://github.com/opencontainers/runtime-spec)
- runtimes configured on the daemon. Keys hold the "name" used to
- reference the runtime.
-
- The Docker daemon relies on an OCI compliant runtime (invoked via the
- `containerd` daemon) as its interface to the Linux kernel namespaces,
- cgroups, and SELinux.
-
- The default runtime is `runc`, and automatically configured. Additional
- runtimes can be configured by the user and will be listed here.
- type: "object"
- additionalProperties:
- $ref: "#/definitions/Runtime"
- default:
- runc:
- path: "runc"
- example:
- runc:
- path: "runc"
- runc-master:
- path: "/go/bin/runc"
- custom:
- path: "/usr/local/bin/my-oci-runtime"
- runtimeArgs: ["--debug", "--systemd-cgroup=false"]
- DefaultRuntime:
- description: |
- Name of the default OCI runtime that is used when starting containers.
-
- The default can be overridden per-container at create time.
- type: "string"
- default: "runc"
- example: "runc"
- Swarm:
- $ref: "#/definitions/SwarmInfo"
- LiveRestoreEnabled:
- description: |
- Indicates if live restore is enabled.
-
- If enabled, containers are kept running when the daemon is shutdown
- or upon daemon start if running containers are detected.
- type: "boolean"
- default: false
- example: false
- Isolation:
- description: |
- Represents the isolation technology to use as a default for containers.
- The supported values are platform-specific.
-
- If no isolation value is specified on daemon start, on Windows client,
- the default is `hyperv`, and on Windows server, the default is `process`.
-
- This option is currently not used on other platforms.
- default: "default"
- type: "string"
- enum:
- - "default"
- - "hyperv"
- - "process"
- InitBinary:
- description: |
- Name and, optional, path of the `docker-init` binary.
-
- If the path is omitted, the daemon searches the host's `$PATH` for the
- binary and uses the first result.
- type: "string"
- example: "docker-init"
- ContainerdCommit:
- $ref: "#/definitions/Commit"
- RuncCommit:
- $ref: "#/definitions/Commit"
- InitCommit:
- $ref: "#/definitions/Commit"
- SecurityOptions:
- description: |
- List of security features that are enabled on the daemon, such as
- apparmor, seccomp, SELinux, user-namespaces (userns), and rootless.
-
- Additional configuration options for each security feature may
- be present, and are included as a comma-separated list of key/value
- pairs.
- type: "array"
- items:
- type: "string"
- example:
- - "name=apparmor"
- - "name=seccomp,profile=default"
- - "name=selinux"
- - "name=userns"
- - "name=rootless"
- ProductLicense:
- description: |
- Reports a summary of the product license on the daemon.
-
- If a commercial license has been applied to the daemon, information
- such as number of nodes, and expiration are included.
- type: "string"
- example: "Community Engine"
- DefaultAddressPools:
- description: |
- List of custom default address pools for local networks, which can be
- specified in the daemon.json file or dockerd option.
-
- Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256
- 10.10.[0-255].0/24 address pools.
- type: "array"
- items:
- type: "object"
- properties:
- Base:
- description: "The network address in CIDR format"
- type: "string"
- example: "10.10.0.0/16"
- Size:
- description: "The network pool size"
- type: "integer"
- example: "24"
- Warnings:
- description: |
- List of warnings / informational messages about missing features, or
- issues related to the daemon configuration.
-
- These messages can be printed by the client as information to the user.
- type: "array"
- items:
- type: "string"
- example:
- - "WARNING: No memory limit support"
- - "WARNING: bridge-nf-call-iptables is disabled"
- - "WARNING: bridge-nf-call-ip6tables is disabled"
-
-
- # PluginsInfo is a temp struct holding Plugins name
- # registered with docker daemon. It is used by Info struct
- PluginsInfo:
- description: |
- Available plugins per type.
-
- <p><br /></p>
-
- > **Note**: Only unmanaged (V1) plugins are included in this list.
- > V1 plugins are "lazily" loaded, and are not returned in this list
- > if there is no resource using the plugin.
- type: "object"
- properties:
- Volume:
- description: "Names of available volume-drivers, and network-driver plugins."
- type: "array"
- items:
- type: "string"
- example: ["local"]
- Network:
- description: "Names of available network-drivers, and network-driver plugins."
- type: "array"
- items:
- type: "string"
- example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"]
- Authorization:
- description: "Names of available authorization plugins."
- type: "array"
- items:
- type: "string"
- example: ["img-authz-plugin", "hbm"]
- Log:
- description: "Names of available logging-drivers, and logging-driver plugins."
- type: "array"
- items:
- type: "string"
- example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"]
-
-
- RegistryServiceConfig:
- description: |
- RegistryServiceConfig stores daemon registry services configuration.
- type: "object"
- x-nullable: true
- properties:
- AllowNondistributableArtifactsCIDRs:
- description: |
- List of IP ranges to which nondistributable artifacts can be pushed,
- using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632).
-
- Some images (for example, Windows base images) contain artifacts
- whose distribution is restricted by license. When these images are
- pushed to a registry, restricted artifacts are not included.
-
- This configuration override this behavior, and enables the daemon to
- push nondistributable artifacts to all registries whose resolved IP
- address is within the subnet described by the CIDR syntax.
-
- This option is useful when pushing images containing
- nondistributable artifacts to a registry on an air-gapped network so
- hosts on that network can pull the images without connecting to
- another server.
-
- > **Warning**: Nondistributable artifacts typically have restrictions
- > on how and where they can be distributed and shared. Only use this
- > feature to push artifacts to private registries and ensure that you
- > are in compliance with any terms that cover redistributing
- > nondistributable artifacts.
-
- type: "array"
- items:
- type: "string"
- example: ["::1/128", "127.0.0.0/8"]
- AllowNondistributableArtifactsHostnames:
- description: |
- List of registry hostnames to which nondistributable artifacts can be
- pushed, using the format `<hostname>[:<port>]` or `<IP address>[:<port>]`.
-
- Some images (for example, Windows base images) contain artifacts
- whose distribution is restricted by license. When these images are
- pushed to a registry, restricted artifacts are not included.
-
- This configuration override this behavior for the specified
- registries.
-
- This option is useful when pushing images containing
- nondistributable artifacts to a registry on an air-gapped network so
- hosts on that network can pull the images without connecting to
- another server.
-
- > **Warning**: Nondistributable artifacts typically have restrictions
- > on how and where they can be distributed and shared. Only use this
- > feature to push artifacts to private registries and ensure that you
- > are in compliance with any terms that cover redistributing
- > nondistributable artifacts.
- type: "array"
- items:
- type: "string"
- example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"]
- InsecureRegistryCIDRs:
- description: |
- List of IP ranges of insecure registries, using the CIDR syntax
- ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries
- accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates
- from unknown CAs) communication.
-
- By default, local registries (`127.0.0.0/8`) are configured as
- insecure. All other registries are secure. Communicating with an
- insecure registry is not possible if the daemon assumes that registry
- is secure.
-
- This configuration override this behavior, insecure communication with
- registries whose resolved IP address is within the subnet described by
- the CIDR syntax.
-
- Registries can also be marked insecure by hostname. Those registries
- are listed under `IndexConfigs` and have their `Secure` field set to
- `false`.
-
- > **Warning**: Using this option can be useful when running a local
- > registry, but introduces security vulnerabilities. This option
- > should therefore ONLY be used for testing purposes. For increased
- > security, users should add their CA to their system's list of trusted
- > CAs instead of enabling this option.
- type: "array"
- items:
- type: "string"
- example: ["::1/128", "127.0.0.0/8"]
- IndexConfigs:
- type: "object"
- additionalProperties:
- $ref: "#/definitions/IndexInfo"
- example:
- "127.0.0.1:5000":
- "Name": "127.0.0.1:5000"
- "Mirrors": []
- "Secure": false
- "Official": false
- "[2001:db8:a0b:12f0::1]:80":
- "Name": "[2001:db8:a0b:12f0::1]:80"
- "Mirrors": []
- "Secure": false
- "Official": false
- "docker.io":
- Name: "docker.io"
- Mirrors: ["https://hub-mirror.corp.example.com:5000/"]
- Secure: true
- Official: true
- "registry.internal.corp.example.com:3000":
- Name: "registry.internal.corp.example.com:3000"
- Mirrors: []
- Secure: false
- Official: false
- Mirrors:
- description: |
- List of registry URLs that act as a mirror for the official
- (`docker.io`) registry.
-
- type: "array"
- items:
- type: "string"
- example:
- - "https://hub-mirror.corp.example.com:5000/"
- - "https://[2001:db8:a0b:12f0::1]/"
-
- IndexInfo:
- description:
- IndexInfo contains information about a registry.
- type: "object"
- x-nullable: true
- properties:
- Name:
- description: |
- Name of the registry, such as "docker.io".
- type: "string"
- example: "docker.io"
- Mirrors:
- description: |
- List of mirrors, expressed as URIs.
- type: "array"
- items:
- type: "string"
- example:
- - "https://hub-mirror.corp.example.com:5000/"
- - "https://registry-2.docker.io/"
- - "https://registry-3.docker.io/"
- Secure:
- description: |
- Indicates if the registry is part of the list of insecure
- registries.
-
- If `false`, the registry is insecure. Insecure registries accept
- un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from
- unknown CAs) communication.
-
- > **Warning**: Insecure registries can be useful when running a local
- > registry. However, because its use creates security vulnerabilities
- > it should ONLY be enabled for testing purposes. For increased
- > security, users should add their CA to their system's list of
- > trusted CAs instead of enabling this option.
- type: "boolean"
- example: true
- Official:
- description: |
- Indicates whether this is an official registry (i.e., Docker Hub / docker.io)
- type: "boolean"
- example: true
-
- Runtime:
- description: |
- Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec)
- runtime.
-
- The runtime is invoked by the daemon via the `containerd` daemon. OCI
- runtimes act as an interface to the Linux kernel namespaces, cgroups,
- and SELinux.
- type: "object"
- properties:
- path:
- description: |
- Name and, optional, path, of the OCI executable binary.
-
- If the path is omitted, the daemon searches the host's `$PATH` for the
- binary and uses the first result.
- type: "string"
- example: "/usr/local/bin/my-oci-runtime"
- runtimeArgs:
- description: |
- List of command-line arguments to pass to the runtime when invoked.
- type: "array"
- x-nullable: true
- items:
- type: "string"
- example: ["--debug", "--systemd-cgroup=false"]
-
- Commit:
- description: |
- Commit holds the Git-commit (SHA1) that a binary was built from, as
- reported in the version-string of external tools, such as `containerd`,
- or `runC`.
- type: "object"
- properties:
- ID:
- description: "Actual commit ID of external tool."
- type: "string"
- example: "cfb82a876ecc11b5ca0977d1733adbe58599088a"
- Expected:
- description: |
- Commit ID of external tool expected by dockerd as set at build time.
- type: "string"
- example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4"
-
- SwarmInfo:
- description: |
- Represents generic information about swarm.
- type: "object"
- properties:
- NodeID:
- description: "Unique identifier of for this node in the swarm."
- type: "string"
- default: ""
- example: "k67qz4598weg5unwwffg6z1m1"
- NodeAddr:
- description: |
- IP address at which this node can be reached by other nodes in the
- swarm.
- type: "string"
- default: ""
- example: "10.0.0.46"
- LocalNodeState:
- $ref: "#/definitions/LocalNodeState"
- ControlAvailable:
- type: "boolean"
- default: false
- example: true
- Error:
- type: "string"
- default: ""
- RemoteManagers:
- description: |
- List of ID's and addresses of other managers in the swarm.
- type: "array"
- default: null
- x-nullable: true
- items:
- $ref: "#/definitions/PeerNode"
- example:
- - NodeID: "71izy0goik036k48jg985xnds"
- Addr: "10.0.0.158:2377"
- - NodeID: "79y6h1o4gv8n120drcprv5nmc"
- Addr: "10.0.0.159:2377"
- - NodeID: "k67qz4598weg5unwwffg6z1m1"
- Addr: "10.0.0.46:2377"
- Nodes:
- description: "Total number of nodes in the swarm."
- type: "integer"
- x-nullable: true
- example: 4
- Managers:
- description: "Total number of managers in the swarm."
- type: "integer"
- x-nullable: true
- example: 3
- Cluster:
- $ref: "#/definitions/ClusterInfo"
-
- LocalNodeState:
- description: "Current local status of this node."
- type: "string"
- default: ""
- enum:
- - ""
- - "inactive"
- - "pending"
- - "active"
- - "error"
- - "locked"
- example: "active"
-
- PeerNode:
- description: "Represents a peer-node in the swarm"
- properties:
- NodeID:
- description: "Unique identifier of for this node in the swarm."
- type: "string"
- Addr:
- description: |
- IP address and ports at which this node can be reached.
- type: "string"
-
- NetworkAttachmentConfig:
- description: |
- Specifies how a service should be attached to a particular network.
- type: "object"
- properties:
- Target:
- description: |
- The target network for attachment. Must be a network name or ID.
- type: "string"
- Aliases:
- description: |
- Discoverable alternate names for the service on this network.
- type: "array"
- items:
- type: "string"
- DriverOpts:
- description: |
- Driver attachment options for the network target.
- type: "object"
- additionalProperties:
- type: "string"
-
-paths:
- /containers/json:
- get:
- summary: "List containers"
- description: |
- Returns a list of containers. For details on the format, see the
- [inspect endpoint](#operation/ContainerInspect).
-
- Note that it uses a different, smaller representation of a container
- than inspecting a single container. For example, the list of linked
- containers is not propagated .
- operationId: "ContainerList"
- produces:
- - "application/json"
- parameters:
- - name: "all"
- in: "query"
- description: |
- Return all containers. By default, only running containers are shown.
- type: "boolean"
- default: false
- - name: "limit"
- in: "query"
- description: |
- Return this number of most recently created containers, including
- non-running ones.
- type: "integer"
- - name: "size"
- in: "query"
- description: |
- Return the size of container as fields `SizeRw` and `SizeRootFs`.
- type: "boolean"
- default: false
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the container list, encoded as JSON (a
- `map[string][]string`). For example, `{"status": ["paused"]}` will
- only return paused containers.
-
- Available filters:
-
- - `ancestor`=(`<image-name>[:<tag>]`, `<image id>`, or `<image@digest>`)
- - `before`=(`<container id>` or `<container name>`)
- - `expose`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
- - `exited=<int>` containers with exit code of `<int>`
- - `health`=(`starting`|`healthy`|`unhealthy`|`none`)
- - `id=<ID>` a container's ID
- - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only)
- - `is-task=`(`true`|`false`)
- - `label=key` or `label="key=value"` of a container label
- - `name=<name>` a container's name
- - `network`=(`<network id>` or `<network name>`)
- - `publish`=(`<port>[/<proto>]`|`<startport-endport>/[<proto>]`)
- - `since`=(`<container id>` or `<container name>`)
- - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`)
- - `volume`=(`<volume name>` or `<mount point destination>`)
- type: "string"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/ContainerSummary"
- examples:
- application/json:
- - Id: "8dfafdbc3a40"
- Names:
- - "/boring_feynman"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 1"
- Created: 1367854155
- State: "Exited"
- Status: "Exit 0"
- Ports:
- - PrivatePort: 2222
- PublicPort: 3333
- Type: "tcp"
- Labels:
- com.example.vendor: "Acme"
- com.example.license: "GPL"
- com.example.version: "1.0"
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.2"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:02"
- Mounts:
- - Name: "fac362...80535"
- Source: "/data"
- Destination: "/data"
- Driver: "local"
- Mode: "ro,Z"
- RW: false
- Propagation: ""
- - Id: "9cd87474be90"
- Names:
- - "/coolName"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 222222"
- Created: 1367854155
- State: "Exited"
- Status: "Exit 0"
- Ports: []
- Labels: {}
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.8"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:08"
- Mounts: []
- - Id: "3176a2479c92"
- Names:
- - "/sleepy_dog"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 3333333333333333"
- Created: 1367854154
- State: "Exited"
- Status: "Exit 0"
- Ports: []
- Labels: {}
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.6"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:06"
- Mounts: []
- - Id: "4cb07b47f9fb"
- Names:
- - "/running_cat"
- Image: "ubuntu:latest"
- ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82"
- Command: "echo 444444444444444444444444444444444"
- Created: 1367854152
- State: "Exited"
- Status: "Exit 0"
- Ports: []
- Labels: {}
- SizeRw: 12288
- SizeRootFs: 0
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.5"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:11:00:05"
- Mounts: []
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Container"]
- /containers/create:
- post:
- summary: "Create a container"
- operationId: "ContainerCreate"
- consumes:
- - "application/json"
- - "application/octet-stream"
- produces:
- - "application/json"
- parameters:
- - name: "name"
- in: "query"
- description: |
- Assign the specified name to the container. Must match
- `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
- type: "string"
- pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
- - name: "body"
- in: "body"
- description: "Container to create"
- schema:
- allOf:
- - $ref: "#/definitions/ContainerConfig"
- - type: "object"
- properties:
- HostConfig:
- $ref: "#/definitions/HostConfig"
- NetworkingConfig:
- $ref: "#/definitions/NetworkingConfig"
- example:
- Hostname: ""
- Domainname: ""
- User: ""
- AttachStdin: false
- AttachStdout: true
- AttachStderr: true
- Tty: false
- OpenStdin: false
- StdinOnce: false
- Env:
- - "FOO=bar"
- - "BAZ=quux"
- Cmd:
- - "date"
- Entrypoint: ""
- Image: "ubuntu"
- Labels:
- com.example.vendor: "Acme"
- com.example.license: "GPL"
- com.example.version: "1.0"
- Volumes:
- /volumes/data: {}
- WorkingDir: ""
- NetworkDisabled: false
- MacAddress: "12:34:56:78:9a:bc"
- ExposedPorts:
- 22/tcp: {}
- StopSignal: "SIGTERM"
- StopTimeout: 10
- HostConfig:
- Binds:
- - "/tmp:/tmp"
- Links:
- - "redis3:redis"
- Memory: 0
- MemorySwap: 0
- MemoryReservation: 0
- KernelMemory: 0
- NanoCpus: 500000
- CpuPercent: 80
- CpuShares: 512
- CpuPeriod: 100000
- CpuRealtimePeriod: 1000000
- CpuRealtimeRuntime: 10000
- CpuQuota: 50000
- CpusetCpus: "0,1"
- CpusetMems: "0,1"
- MaximumIOps: 0
- MaximumIOBps: 0
- BlkioWeight: 300
- BlkioWeightDevice:
- - {}
- BlkioDeviceReadBps:
- - {}
- BlkioDeviceReadIOps:
- - {}
- BlkioDeviceWriteBps:
- - {}
- BlkioDeviceWriteIOps:
- - {}
- DeviceRequests:
- - Driver: "nvidia"
- Count: -1
- DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
- Capabilities: [["gpu", "nvidia", "compute"]]
- Options:
- property1: "string"
- property2: "string"
- MemorySwappiness: 60
- OomKillDisable: false
- OomScoreAdj: 500
- PidMode: ""
- PidsLimit: 0
- PortBindings:
- 22/tcp:
- - HostPort: "11022"
- PublishAllPorts: false
- Privileged: false
- ReadonlyRootfs: false
- Dns:
- - "8.8.8.8"
- DnsOptions:
- - ""
- DnsSearch:
- - ""
- VolumesFrom:
- - "parent"
- - "other:ro"
- CapAdd:
- - "NET_ADMIN"
- CapDrop:
- - "MKNOD"
- GroupAdd:
- - "newgroup"
- RestartPolicy:
- Name: ""
- MaximumRetryCount: 0
- AutoRemove: true
- NetworkMode: "bridge"
- Devices: []
- Ulimits:
- - {}
- LogConfig:
- Type: "json-file"
- Config: {}
- SecurityOpt: []
- StorageOpt: {}
- CgroupParent: ""
- VolumeDriver: ""
- ShmSize: 67108864
- NetworkingConfig:
- EndpointsConfig:
- isolated_nw:
- IPAMConfig:
- IPv4Address: "172.20.30.33"
- IPv6Address: "2001:db8:abcd::3033"
- LinkLocalIPs:
- - "169.254.34.68"
- - "fe80::3468"
- Links:
- - "container_1"
- - "container_2"
- Aliases:
- - "server_x"
- - "server_y"
-
- required: true
- responses:
- 201:
- description: "Container created successfully"
- schema:
- type: "object"
- title: "ContainerCreateResponse"
- description: "OK response to ContainerCreate operation"
- required: [Id, Warnings]
- properties:
- Id:
- description: "The ID of the created container"
- type: "string"
- x-nullable: false
- Warnings:
- description: "Warnings encountered when creating the container"
- type: "array"
- x-nullable: false
- items:
- type: "string"
- examples:
- application/json:
- Id: "e90e34656806"
- Warnings: []
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such image: c2ada9df5af8"
- 409:
- description: "conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Container"]
- /containers/{id}/json:
- get:
- summary: "Inspect a container"
- description: "Return low-level information about a container."
- operationId: "ContainerInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "ContainerInspectResponse"
- properties:
- Id:
- description: "The ID of the container"
- type: "string"
- Created:
- description: "The time the container was created"
- type: "string"
- Path:
- description: "The path to the command being run"
- type: "string"
- Args:
- description: "The arguments to the command being run"
- type: "array"
- items:
- type: "string"
- State:
- x-nullable: true
- $ref: "#/definitions/ContainerState"
- Image:
- description: "The container's image ID"
- type: "string"
- ResolvConfPath:
- type: "string"
- HostnamePath:
- type: "string"
- HostsPath:
- type: "string"
- LogPath:
- type: "string"
- Name:
- type: "string"
- RestartCount:
- type: "integer"
- Driver:
- type: "string"
- Platform:
- type: "string"
- MountLabel:
- type: "string"
- ProcessLabel:
- type: "string"
- AppArmorProfile:
- type: "string"
- ExecIDs:
- description: "IDs of exec instances that are running in the container."
- type: "array"
- items:
- type: "string"
- x-nullable: true
- HostConfig:
- $ref: "#/definitions/HostConfig"
- GraphDriver:
- $ref: "#/definitions/GraphDriverData"
- SizeRw:
- description: |
- The size of files that have been created or changed by this
- container.
- type: "integer"
- format: "int64"
- SizeRootFs:
- description: "The total size of all the files in this container."
- type: "integer"
- format: "int64"
- Mounts:
- type: "array"
- items:
- $ref: "#/definitions/MountPoint"
- Config:
- $ref: "#/definitions/ContainerConfig"
- NetworkSettings:
- $ref: "#/definitions/NetworkSettings"
- examples:
- application/json:
- AppArmorProfile: ""
- Args:
- - "-c"
- - "exit 9"
- Config:
- AttachStderr: true
- AttachStdin: false
- AttachStdout: true
- Cmd:
- - "/bin/sh"
- - "-c"
- - "exit 9"
- Domainname: ""
- Env:
- - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- Healthcheck:
- Test: ["CMD-SHELL", "exit 0"]
- Hostname: "ba033ac44011"
- Image: "ubuntu"
- Labels:
- com.example.vendor: "Acme"
- com.example.license: "GPL"
- com.example.version: "1.0"
- MacAddress: ""
- NetworkDisabled: false
- OpenStdin: false
- StdinOnce: false
- Tty: false
- User: ""
- Volumes:
- /volumes/data: {}
- WorkingDir: ""
- StopSignal: "SIGTERM"
- StopTimeout: 10
- Created: "2015-01-06T15:47:31.485331387Z"
- Driver: "devicemapper"
- ExecIDs:
- - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca"
- - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4"
- HostConfig:
- MaximumIOps: 0
- MaximumIOBps: 0
- BlkioWeight: 0
- BlkioWeightDevice:
- - {}
- BlkioDeviceReadBps:
- - {}
- BlkioDeviceWriteBps:
- - {}
- BlkioDeviceReadIOps:
- - {}
- BlkioDeviceWriteIOps:
- - {}
- ContainerIDFile: ""
- CpusetCpus: ""
- CpusetMems: ""
- CpuPercent: 80
- CpuShares: 0
- CpuPeriod: 100000
- CpuRealtimePeriod: 1000000
- CpuRealtimeRuntime: 10000
- Devices: []
- DeviceRequests:
- - Driver: "nvidia"
- Count: -1
- DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"]
- Capabilities: [["gpu", "nvidia", "compute"]]
- Options:
- property1: "string"
- property2: "string"
- IpcMode: ""
- LxcConf: []
- Memory: 0
- MemorySwap: 0
- MemoryReservation: 0
- KernelMemory: 0
- OomKillDisable: false
- OomScoreAdj: 500
- NetworkMode: "bridge"
- PidMode: ""
- PortBindings: {}
- Privileged: false
- ReadonlyRootfs: false
- PublishAllPorts: false
- RestartPolicy:
- MaximumRetryCount: 2
- Name: "on-failure"
- LogConfig:
- Type: "json-file"
- Sysctls:
- net.ipv4.ip_forward: "1"
- Ulimits:
- - {}
- VolumeDriver: ""
- ShmSize: 67108864
- HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname"
- HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts"
- LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log"
- Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39"
- Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2"
- MountLabel: ""
- Name: "/boring_euclid"
- NetworkSettings:
- Bridge: ""
- SandboxID: ""
- HairpinMode: false
- LinkLocalIPv6Address: ""
- LinkLocalIPv6PrefixLen: 0
- SandboxKey: ""
- EndpointID: ""
- Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- IPAddress: ""
- IPPrefixLen: 0
- IPv6Gateway: ""
- MacAddress: ""
- Networks:
- bridge:
- NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812"
- EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d"
- Gateway: "172.17.0.1"
- IPAddress: "172.17.0.2"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:12:00:02"
- Path: "/bin/sh"
- ProcessLabel: ""
- ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf"
- RestartCount: 1
- State:
- Error: ""
- ExitCode: 9
- FinishedAt: "2015-01-06T15:47:32.080254511Z"
- Health:
- Status: "healthy"
- FailingStreak: 0
- Log:
- - Start: "2019-12-22T10:59:05.6385933Z"
- End: "2019-12-22T10:59:05.8078452Z"
- ExitCode: 0
- Output: ""
- OOMKilled: false
- Dead: false
- Paused: false
- Pid: 0
- Restarting: false
- Running: true
- StartedAt: "2015-01-06T15:47:32.072697474Z"
- Status: "running"
- Mounts:
- - Name: "fac362...80535"
- Source: "/data"
- Destination: "/data"
- Driver: "local"
- Mode: "ro,Z"
- RW: false
- Propagation: ""
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "size"
- in: "query"
- type: "boolean"
- default: false
- description: "Return the size of container as fields `SizeRw` and `SizeRootFs`"
- tags: ["Container"]
- /containers/{id}/top:
- get:
- summary: "List processes running inside a container"
- description: |
- On Unix systems, this is done by running the `ps` command. This endpoint
- is not supported on Windows.
- operationId: "ContainerTop"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "ContainerTopResponse"
- description: "OK response to ContainerTop operation"
- properties:
- Titles:
- description: "The ps column titles"
- type: "array"
- items:
- type: "string"
- Processes:
- description: |
- Each process running in the container, where each is process
- is an array of values corresponding to the titles.
- type: "array"
- items:
- type: "array"
- items:
- type: "string"
- examples:
- application/json:
- Titles:
- - "UID"
- - "PID"
- - "PPID"
- - "C"
- - "STIME"
- - "TTY"
- - "TIME"
- - "CMD"
- Processes:
- -
- - "root"
- - "13642"
- - "882"
- - "0"
- - "17:03"
- - "pts/0"
- - "00:00:00"
- - "/bin/bash"
- -
- - "root"
- - "13735"
- - "13642"
- - "0"
- - "17:06"
- - "pts/0"
- - "00:00:00"
- - "sleep 10"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "ps_args"
- in: "query"
- description: "The arguments to pass to `ps`. For example, `aux`"
- type: "string"
- default: "-ef"
- tags: ["Container"]
- /containers/{id}/logs:
- get:
- summary: "Get container logs"
- description: |
- Get `stdout` and `stderr` logs from a container.
-
- Note: This endpoint works only for containers with the `json-file` or
- `journald` logging driver.
- operationId: "ContainerLogs"
- responses:
- 200:
- description: |
- logs returned as a stream in response body.
- For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach).
- Note that unlike the attach endpoint, the logs endpoint does not
- upgrade the connection and does not set Content-Type.
- schema:
- type: "string"
- format: "binary"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "follow"
- in: "query"
- description: "Keep connection after returning logs."
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Return logs from `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Return logs from `stderr`"
- type: "boolean"
- default: false
- - name: "since"
- in: "query"
- description: "Only return logs since this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "until"
- in: "query"
- description: "Only return logs before this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "timestamps"
- in: "query"
- description: "Add timestamps to every log line"
- type: "boolean"
- default: false
- - name: "tail"
- in: "query"
- description: |
- Only return this number of log lines from the end of the logs.
- Specify as an integer or `all` to output all log lines.
- type: "string"
- default: "all"
- tags: ["Container"]
- /containers/{id}/changes:
- get:
- summary: "Get changes on a container’s filesystem"
- description: |
- Returns which files in a container's filesystem have been added, deleted,
- or modified. The `Kind` of modification can be one of:
-
- - `0`: Modified
- - `1`: Added
- - `2`: Deleted
- operationId: "ContainerChanges"
- produces: ["application/json"]
- responses:
- 200:
- description: "The list of changes"
- schema:
- type: "array"
- items:
- type: "object"
- x-go-name: "ContainerChangeResponseItem"
- title: "ContainerChangeResponseItem"
- description: "change item in response to ContainerChanges operation"
- required: [Path, Kind]
- properties:
- Path:
- description: "Path to file that has changed"
- type: "string"
- x-nullable: false
- Kind:
- description: "Kind of change"
- type: "integer"
- format: "uint8"
- enum: [0, 1, 2]
- x-nullable: false
- examples:
- application/json:
- - Path: "/dev"
- Kind: 0
- - Path: "/dev/kmsg"
- Kind: 1
- - Path: "/test"
- Kind: 1
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/export:
- get:
- summary: "Export a container"
- description: "Export the contents of a container as a tarball."
- operationId: "ContainerExport"
- produces:
- - "application/octet-stream"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/stats:
- get:
- summary: "Get container stats based on resource usage"
- description: |
- This endpoint returns a live stream of a container’s resource usage
- statistics.
-
- The `precpu_stats` is the CPU statistic of the *previous* read, and is
- used to calculate the CPU usage percentage. It is not an exact copy
- of the `cpu_stats` field.
-
- If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is
- nil then for compatibility with older daemons the length of the
- corresponding `cpu_usage.percpu_usage` array should be used.
-
- On a cgroup v2 host, the following fields are not set
- * `blkio_stats`: all fields other than `io_service_bytes_recursive`
- * `cpu_stats`: `cpu_usage.percpu_usage`
- * `memory_stats`: `max_usage` and `failcnt`
- Also, `memory_stats.stats` fields are incompatible with cgroup v1.
-
- To calculate the values shown by the `stats` command of the docker cli tool
- the following formulas can be used:
- * used_memory = `memory_stats.usage - memory_stats.stats.cache`
- * available_memory = `memory_stats.limit`
- * Memory usage % = `(used_memory / available_memory) * 100.0`
- * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage`
- * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage`
- * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus`
- * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0`
- operationId: "ContainerStats"
- produces: ["application/json"]
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- examples:
- application/json:
- read: "2015-01-08T22:57:31.547920715Z"
- pids_stats:
- current: 3
- networks:
- eth0:
- rx_bytes: 5338
- rx_dropped: 0
- rx_errors: 0
- rx_packets: 36
- tx_bytes: 648
- tx_dropped: 0
- tx_errors: 0
- tx_packets: 8
- eth5:
- rx_bytes: 4641
- rx_dropped: 0
- rx_errors: 0
- rx_packets: 26
- tx_bytes: 690
- tx_dropped: 0
- tx_errors: 0
- tx_packets: 9
- memory_stats:
- stats:
- total_pgmajfault: 0
- cache: 0
- mapped_file: 0
- total_inactive_file: 0
- pgpgout: 414
- rss: 6537216
- total_mapped_file: 0
- writeback: 0
- unevictable: 0
- pgpgin: 477
- total_unevictable: 0
- pgmajfault: 0
- total_rss: 6537216
- total_rss_huge: 6291456
- total_writeback: 0
- total_inactive_anon: 0
- rss_huge: 6291456
- hierarchical_memory_limit: 67108864
- total_pgfault: 964
- total_active_file: 0
- active_anon: 6537216
- total_active_anon: 6537216
- total_pgpgout: 414
- total_cache: 0
- inactive_anon: 0
- active_file: 0
- pgfault: 964
- inactive_file: 0
- total_pgpgin: 477
- max_usage: 6651904
- usage: 6537216
- failcnt: 0
- limit: 67108864
- blkio_stats: {}
- cpu_stats:
- cpu_usage:
- percpu_usage:
- - 8646879
- - 24472255
- - 36438778
- - 30657443
- usage_in_usermode: 50000000
- total_usage: 100215355
- usage_in_kernelmode: 30000000
- system_cpu_usage: 739306590000000
- online_cpus: 4
- throttling_data:
- periods: 0
- throttled_periods: 0
- throttled_time: 0
- precpu_stats:
- cpu_usage:
- percpu_usage:
- - 8646879
- - 24350896
- - 36438778
- - 30657443
- usage_in_usermode: 50000000
- total_usage: 100093996
- usage_in_kernelmode: 30000000
- system_cpu_usage: 9492140000000
- online_cpus: 4
- throttling_data:
- periods: 0
- throttled_periods: 0
- throttled_time: 0
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "stream"
- in: "query"
- description: |
- Stream the output. If false, the stats will be output once and then
- it will disconnect.
- type: "boolean"
- default: true
- - name: "one-shot"
- in: "query"
- description: |
- Only get a single stat instead of waiting for 2 cycles. Must be used
- with `stream=false`.
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/resize:
- post:
- summary: "Resize a container TTY"
- description: "Resize the TTY for a container."
- operationId: "ContainerResize"
- consumes:
- - "application/octet-stream"
- produces:
- - "text/plain"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "cannot resize container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "h"
- in: "query"
- description: "Height of the TTY session in characters"
- type: "integer"
- - name: "w"
- in: "query"
- description: "Width of the TTY session in characters"
- type: "integer"
- tags: ["Container"]
- /containers/{id}/start:
- post:
- summary: "Start a container"
- operationId: "ContainerStart"
- responses:
- 204:
- description: "no error"
- 304:
- description: "container already started"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "detachKeys"
- in: "query"
- description: |
- Override the key sequence for detaching a container. Format is a
- single character `[a-Z]` or `ctrl-<value>` where `<value>` is one
- of: `a-z`, `@`, `^`, `[`, `,` or `_`.
- type: "string"
- tags: ["Container"]
- /containers/{id}/stop:
- post:
- summary: "Stop a container"
- operationId: "ContainerStop"
- responses:
- 204:
- description: "no error"
- 304:
- description: "container already stopped"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "t"
- in: "query"
- description: "Number of seconds to wait before killing the container"
- type: "integer"
- tags: ["Container"]
- /containers/{id}/restart:
- post:
- summary: "Restart a container"
- operationId: "ContainerRestart"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "t"
- in: "query"
- description: "Number of seconds to wait before killing the container"
- type: "integer"
- tags: ["Container"]
- /containers/{id}/kill:
- post:
- summary: "Kill a container"
- description: |
- Send a POSIX signal to a container, defaulting to killing to the
- container.
- operationId: "ContainerKill"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "container is not running"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "signal"
- in: "query"
- description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)"
- type: "string"
- default: "SIGKILL"
- tags: ["Container"]
- /containers/{id}/update:
- post:
- summary: "Update a container"
- description: |
- Change various configuration options of a container without having to
- recreate it.
- operationId: "ContainerUpdate"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 200:
- description: "The container has been updated."
- schema:
- type: "object"
- title: "ContainerUpdateResponse"
- description: "OK response to ContainerUpdate operation"
- properties:
- Warnings:
- type: "array"
- items:
- type: "string"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "update"
- in: "body"
- required: true
- schema:
- allOf:
- - $ref: "#/definitions/Resources"
- - type: "object"
- properties:
- RestartPolicy:
- $ref: "#/definitions/RestartPolicy"
- example:
- BlkioWeight: 300
- CpuShares: 512
- CpuPeriod: 100000
- CpuQuota: 50000
- CpuRealtimePeriod: 1000000
- CpuRealtimeRuntime: 10000
- CpusetCpus: "0,1"
- CpusetMems: "0"
- Memory: 314572800
- MemorySwap: 514288000
- MemoryReservation: 209715200
- KernelMemory: 52428800
- RestartPolicy:
- MaximumRetryCount: 4
- Name: "on-failure"
- tags: ["Container"]
- /containers/{id}/rename:
- post:
- summary: "Rename a container"
- operationId: "ContainerRename"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "name already in use"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "name"
- in: "query"
- required: true
- description: "New name for the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/pause:
- post:
- summary: "Pause a container"
- description: |
- Use the freezer cgroup to suspend all processes in a container.
-
- Traditionally, when suspending a process the `SIGSTOP` signal is used,
- which is observable by the process being suspended. With the freezer
- cgroup the process is unaware, and unable to capture, that it is being
- suspended, and subsequently resumed.
- operationId: "ContainerPause"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/unpause:
- post:
- summary: "Unpause a container"
- description: "Resume a container which has been paused."
- operationId: "ContainerUnpause"
- responses:
- 204:
- description: "no error"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- tags: ["Container"]
- /containers/{id}/attach:
- post:
- summary: "Attach to a container"
- description: |
- Attach to a container to read its output or send it input. You can attach
- to the same container multiple times and you can reattach to containers
- that have been detached.
-
- Either the `stream` or `logs` parameter must be `true` for this endpoint
- to do anything.
-
- See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/)
- for more details.
-
- ### Hijacking
-
- This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`,
- and `stderr` on the same socket.
-
- This is the response from the daemon for an attach request:
-
- ```
- HTTP/1.1 200 OK
- Content-Type: application/vnd.docker.raw-stream
-
- [STREAM]
- ```
-
- After the headers and two new lines, the TCP connection can now be used
- for raw, bidirectional communication between the client and server.
-
- To hint potential proxies about connection hijacking, the Docker client
- can also optionally send connection upgrade headers.
-
- For example, the client sends this request to upgrade the connection:
-
- ```
- POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1
- Upgrade: tcp
- Connection: Upgrade
- ```
-
- The Docker daemon will respond with a `101 UPGRADED` response, and will
- similarly follow with the raw stream:
-
- ```
- HTTP/1.1 101 UPGRADED
- Content-Type: application/vnd.docker.raw-stream
- Connection: Upgrade
- Upgrade: tcp
-
- [STREAM]
- ```
-
- ### Stream format
-
- When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate),
- the stream over the hijacked connected is multiplexed to separate out
- `stdout` and `stderr`. The stream consists of a series of frames, each
- containing a header and a payload.
-
- The header contains the information which the stream writes (`stdout` or
- `stderr`). It also contains the size of the associated frame encoded in
- the last four bytes (`uint32`).
-
- It is encoded on the first eight bytes like this:
-
- ```go
- header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}
- ```
-
- `STREAM_TYPE` can be:
-
- - 0: `stdin` (is written on `stdout`)
- - 1: `stdout`
- - 2: `stderr`
-
- `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size
- encoded as big endian.
-
- Following the header is the payload, which is the specified number of
- bytes of `STREAM_TYPE`.
-
- The simplest way to implement this protocol is the following:
-
- 1. Read 8 bytes.
- 2. Choose `stdout` or `stderr` depending on the first byte.
- 3. Extract the frame size from the last four bytes.
- 4. Read the extracted size and output it on the correct output.
- 5. Goto 1.
-
- ### Stream format when using a TTY
-
- When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate),
- the stream is not multiplexed. The data exchanged over the hijacked
- connection is simply the raw data from the process PTY and client's
- `stdin`.
-
- operationId: "ContainerAttach"
- produces:
- - "application/vnd.docker.raw-stream"
- responses:
- 101:
- description: "no error, hints proxy about hijacking"
- 200:
- description: "no error, no upgrade header found"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "detachKeys"
- in: "query"
- description: |
- Override the key sequence for detaching a container.Format is a single
- character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
- `@`, `^`, `[`, `,` or `_`.
- type: "string"
- - name: "logs"
- in: "query"
- description: |
- Replay previous logs from the container.
-
- This is useful for attaching to a container that has started and you
- want to output everything since the container started.
-
- If `stream` is also enabled, once all the previous output has been
- returned, it will seamlessly transition into streaming current
- output.
- type: "boolean"
- default: false
- - name: "stream"
- in: "query"
- description: |
- Stream attached streams from the time the request was made onwards.
- type: "boolean"
- default: false
- - name: "stdin"
- in: "query"
- description: "Attach to `stdin`"
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Attach to `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Attach to `stderr`"
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/attach/ws:
- get:
- summary: "Attach to a container via a websocket"
- operationId: "ContainerAttachWebsocket"
- responses:
- 101:
- description: "no error, hints proxy about hijacking"
- 200:
- description: "no error, no upgrade header found"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "detachKeys"
- in: "query"
- description: |
- Override the key sequence for detaching a container.Format is a single
- character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`,
- `@`, `^`, `[`, `,`, or `_`.
- type: "string"
- - name: "logs"
- in: "query"
- description: "Return logs"
- type: "boolean"
- default: false
- - name: "stream"
- in: "query"
- description: "Return stream"
- type: "boolean"
- default: false
- - name: "stdin"
- in: "query"
- description: "Attach to `stdin`"
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Attach to `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Attach to `stderr`"
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/wait:
- post:
- summary: "Wait for a container"
- description: "Block until a container stops, then returns the exit code."
- operationId: "ContainerWait"
- produces: ["application/json"]
- responses:
- 200:
- description: "The container has exit."
- schema:
- type: "object"
- title: "ContainerWaitResponse"
- description: "OK response to ContainerWait operation"
- required: [StatusCode]
- properties:
- StatusCode:
- description: "Exit code of the container"
- type: "integer"
- x-nullable: false
- Error:
- description: "container waiting error, if any"
- type: "object"
- properties:
- Message:
- description: "Details of an error"
- type: "string"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "condition"
- in: "query"
- description: |
- Wait until a container state reaches the given condition, either
- 'not-running' (default), 'next-exit', or 'removed'.
- type: "string"
- default: "not-running"
- tags: ["Container"]
- /containers/{id}:
- delete:
- summary: "Remove a container"
- operationId: "ContainerDelete"
- responses:
- 204:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: |
- You cannot remove a running container: c2ada9df5af8. Stop the
- container before attempting removal or force remove
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "v"
- in: "query"
- description: "Remove anonymous volumes associated with the container."
- type: "boolean"
- default: false
- - name: "force"
- in: "query"
- description: "If the container is running, kill it before removing it."
- type: "boolean"
- default: false
- - name: "link"
- in: "query"
- description: "Remove the specified link associated with the container."
- type: "boolean"
- default: false
- tags: ["Container"]
- /containers/{id}/archive:
- head:
- summary: "Get information about files in a container"
- description: |
- A response header `X-Docker-Container-Path-Stat` is returned, containing
- a base64 - encoded JSON object with some filesystem header information
- about the path.
- operationId: "ContainerArchiveInfo"
- responses:
- 200:
- description: "no error"
- headers:
- X-Docker-Container-Path-Stat:
- type: "string"
- description: |
- A base64 - encoded JSON object with some filesystem header
- information about the path
- 400:
- description: "Bad parameter"
- schema:
- allOf:
- - $ref: "#/definitions/ErrorResponse"
- - type: "object"
- properties:
- message:
- description: |
- The error message. Either "must specify path parameter"
- (path cannot be empty) or "not a directory" (path was
- asserted to be a directory but exists as a file).
- type: "string"
- x-nullable: false
- 404:
- description: "Container or path does not exist"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "path"
- in: "query"
- required: true
- description: "Resource in the container’s filesystem to archive."
- type: "string"
- tags: ["Container"]
- get:
- summary: "Get an archive of a filesystem resource in a container"
- description: "Get a tar archive of a resource in the filesystem of container id."
- operationId: "ContainerArchive"
- produces: ["application/x-tar"]
- responses:
- 200:
- description: "no error"
- 400:
- description: "Bad parameter"
- schema:
- allOf:
- - $ref: "#/definitions/ErrorResponse"
- - type: "object"
- properties:
- message:
- description: |
- The error message. Either "must specify path parameter"
- (path cannot be empty) or "not a directory" (path was
- asserted to be a directory but exists as a file).
- type: "string"
- x-nullable: false
- 404:
- description: "Container or path does not exist"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "path"
- in: "query"
- required: true
- description: "Resource in the container’s filesystem to archive."
- type: "string"
- tags: ["Container"]
- put:
- summary: "Extract an archive of files or folders to a directory in a container"
- description: "Upload a tar archive to be extracted to a path in the filesystem of container id."
- operationId: "PutContainerArchive"
- consumes: ["application/x-tar", "application/octet-stream"]
- responses:
- 200:
- description: "The content was extracted successfully"
- 400:
- description: "Bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 403:
- description: "Permission denied, the volume or container rootfs is marked as read-only."
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "No such container or path does not exist inside the container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the container"
- type: "string"
- - name: "path"
- in: "query"
- required: true
- description: "Path to a directory in the container to extract the archive’s contents into. "
- type: "string"
- - name: "noOverwriteDirNonDir"
- in: "query"
- description: |
- If `1`, `true`, or `True` then it will be an error if unpacking the
- given content would cause an existing directory to be replaced with
- a non-directory and vice versa.
- type: "string"
- - name: "copyUIDGID"
- in: "query"
- description: |
- If `1`, `true`, then it will copy UID/GID maps to the dest file or
- dir
- type: "string"
- - name: "inputStream"
- in: "body"
- required: true
- description: |
- The input stream must be a tar archive compressed with one of the
- following algorithms: `identity` (no compression), `gzip`, `bzip2`,
- or `xz`.
- schema:
- type: "string"
- format: "binary"
- tags: ["Container"]
- /containers/prune:
- post:
- summary: "Delete stopped containers"
- produces:
- - "application/json"
- operationId: "ContainerPrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `until=<timestamp>` Prune containers created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune containers with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "ContainerPruneResponse"
- properties:
- ContainersDeleted:
- description: "Container IDs that were deleted"
- type: "array"
- items:
- type: "string"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Container"]
- /images/json:
- get:
- summary: "List Images"
- description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image."
- operationId: "ImageList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "Summary image data for the images matching the query"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/ImageSummary"
- examples:
- application/json:
- - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
- ParentId: ""
- RepoTags:
- - "ubuntu:12.04"
- - "ubuntu:precise"
- RepoDigests:
- - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787"
- Created: 1474925151
- Size: 103579269
- VirtualSize: 103579269
- SharedSize: 0
- Labels: {}
- Containers: 2
- - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175"
- ParentId: ""
- RepoTags:
- - "ubuntu:12.10"
- - "ubuntu:quantal"
- RepoDigests:
- - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7"
- - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3"
- Created: 1403128455
- Size: 172064416
- VirtualSize: 172064416
- SharedSize: 0
- Labels: {}
- Containers: 5
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "all"
- in: "query"
- description: "Show all images. Only images from a final layer (no children) are shown by default."
- type: "boolean"
- default: false
- - name: "filters"
- in: "query"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the images list.
-
- Available filters:
-
- - `before`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- - `dangling=true`
- - `label=key` or `label="key=value"` of an image label
- - `reference`=(`<image-name>[:<tag>]`)
- - `since`=(`<image-name>[:<tag>]`, `<image id>` or `<image@digest>`)
- type: "string"
- - name: "digests"
- in: "query"
- description: "Show digest information as a `RepoDigests` field on each image."
- type: "boolean"
- default: false
- tags: ["Image"]
- /build:
- post:
- summary: "Build an image"
- description: |
- Build an image from a tar archive with a `Dockerfile` in it.
-
- The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/).
-
- The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output.
-
- The build is canceled if the client drops the connection by quitting or being killed.
- operationId: "ImageBuild"
- consumes:
- - "application/octet-stream"
- produces:
- - "application/json"
- parameters:
- - name: "inputStream"
- in: "body"
- description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz."
- schema:
- type: "string"
- format: "binary"
- - name: "dockerfile"
- in: "query"
- description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`."
- type: "string"
- default: "Dockerfile"
- - name: "t"
- in: "query"
- description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters."
- type: "string"
- - name: "extrahosts"
- in: "query"
- description: "Extra hosts to add to /etc/hosts"
- type: "string"
- - name: "remote"
- in: "query"
- description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball."
- type: "string"
- - name: "q"
- in: "query"
- description: "Suppress verbose build output."
- type: "boolean"
- default: false
- - name: "nocache"
- in: "query"
- description: "Do not use the cache when building the image."
- type: "boolean"
- default: false
- - name: "cachefrom"
- in: "query"
- description: "JSON array of images used for build cache resolution."
- type: "string"
- - name: "pull"
- in: "query"
- description: "Attempt to pull the image even if an older image exists locally."
- type: "string"
- - name: "rm"
- in: "query"
- description: "Remove intermediate containers after a successful build."
- type: "boolean"
- default: true
- - name: "forcerm"
- in: "query"
- description: "Always remove intermediate containers, even upon failure."
- type: "boolean"
- default: false
- - name: "memory"
- in: "query"
- description: "Set memory limit for build."
- type: "integer"
- - name: "memswap"
- in: "query"
- description: "Total memory (memory + swap). Set as `-1` to disable swap."
- type: "integer"
- - name: "cpushares"
- in: "query"
- description: "CPU shares (relative weight)."
- type: "integer"
- - name: "cpusetcpus"
- in: "query"
- description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)."
- type: "string"
- - name: "cpuperiod"
- in: "query"
- description: "The length of a CPU period in microseconds."
- type: "integer"
- - name: "cpuquota"
- in: "query"
- description: "Microseconds of CPU time that the container can get in a CPU period."
- type: "integer"
- - name: "buildargs"
- in: "query"
- description: >
- JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker
- uses the buildargs as the environment context for commands run via the `Dockerfile` RUN
- instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for
- passing secret values.
-
-
- For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the
- query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded.
-
-
- [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)
- type: "string"
- - name: "shmsize"
- in: "query"
- description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB."
- type: "integer"
- - name: "squash"
- in: "query"
- description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*"
- type: "boolean"
- - name: "labels"
- in: "query"
- description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs."
- type: "string"
- - name: "networkmode"
- in: "query"
- description: |
- Sets the networking mode for the run commands during build. Supported
- standard values are: `bridge`, `host`, `none`, and `container:<name|id>`.
- Any other value is taken as a custom network's name or ID to which this
- container should connect to.
- type: "string"
- - name: "Content-type"
- in: "header"
- type: "string"
- enum:
- - "application/x-tar"
- default: "application/x-tar"
- - name: "X-Registry-Config"
- in: "header"
- description: |
- This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to.
-
- The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example:
-
- ```
- {
- "docker.example.com": {
- "username": "janedoe",
- "password": "hunter2"
- },
- "https://index.docker.io/v1/": {
- "username": "mobydock",
- "password": "conta1n3rize14"
- }
- }
- ```
-
- Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
- type: "string"
- - name: "platform"
- in: "query"
- description: "Platform in the format os[/arch[/variant]]"
- type: "string"
- default: ""
- - name: "target"
- in: "query"
- description: "Target build stage"
- type: "string"
- default: ""
- - name: "outputs"
- in: "query"
- description: "BuildKit output configuration"
- type: "string"
- default: ""
- responses:
- 200:
- description: "no error"
- 400:
- description: "Bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Image"]
- /build/prune:
- post:
- summary: "Delete builder cache"
- produces:
- - "application/json"
- operationId: "BuildPrune"
- parameters:
- - name: "keep-storage"
- in: "query"
- description: "Amount of disk space in bytes to keep for cache"
- type: "integer"
- format: "int64"
- - name: "all"
- in: "query"
- type: "boolean"
- description: "Remove all types of build cache"
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the list of build cache objects.
-
- Available filters:
-
- - `until=<duration>`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h')
- - `id=<id>`
- - `parent=<id>`
- - `type=<string>`
- - `description=<string>`
- - `inuse`
- - `shared`
- - `private`
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "BuildPruneResponse"
- properties:
- CachesDeleted:
- type: "array"
- items:
- description: "ID of build cache object"
- type: "string"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Image"]
- /images/create:
- post:
- summary: "Create an image"
- description: "Create an image by either pulling it from a registry or importing it."
- operationId: "ImageCreate"
- consumes:
- - "text/plain"
- - "application/octet-stream"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- 404:
- description: "repository does not exist or no read access"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "fromImage"
- in: "query"
- description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed."
- type: "string"
- - name: "fromSrc"
- in: "query"
- description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image."
- type: "string"
- - name: "repo"
- in: "query"
- description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image."
- type: "string"
- - name: "tag"
- in: "query"
- description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled."
- type: "string"
- - name: "message"
- in: "query"
- description: "Set commit message for imported image."
- type: "string"
- - name: "inputImage"
- in: "body"
- description: "Image content if the value `-` has been specified in fromSrc query parameter"
- schema:
- type: "string"
- required: false
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- - name: "platform"
- in: "query"
- description: "Platform in the format os[/arch[/variant]]"
- type: "string"
- default: ""
- tags: ["Image"]
- /images/{name}/json:
- get:
- summary: "Inspect an image"
- description: "Return low-level information about an image."
- operationId: "ImageInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/Image"
- examples:
- application/json:
- Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c"
- Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a"
- Comment: ""
- Os: "linux"
- Architecture: "amd64"
- Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
- ContainerConfig:
- Tty: false
- Hostname: "e611e15f9c9d"
- Domainname: ""
- AttachStdout: false
- PublishService: ""
- AttachStdin: false
- OpenStdin: false
- StdinOnce: false
- NetworkDisabled: false
- OnBuild: []
- Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
- User: ""
- WorkingDir: ""
- MacAddress: ""
- AttachStderr: false
- Labels:
- com.example.license: "GPL"
- com.example.version: "1.0"
- com.example.vendor: "Acme"
- Env:
- - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- Cmd:
- - "/bin/sh"
- - "-c"
- - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0"
- DockerVersion: "1.9.0-dev"
- VirtualSize: 188359297
- Size: 0
- Author: ""
- Created: "2015-09-10T08:30:53.26995814Z"
- GraphDriver:
- Name: "aufs"
- Data: {}
- RepoDigests:
- - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"
- RepoTags:
- - "example:1.0"
- - "example:latest"
- - "example:stable"
- Config:
- Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"
- NetworkDisabled: false
- OnBuild: []
- StdinOnce: false
- PublishService: ""
- AttachStdin: false
- OpenStdin: false
- Domainname: ""
- AttachStdout: false
- Tty: false
- Hostname: "e611e15f9c9d"
- Cmd:
- - "/bin/bash"
- Env:
- - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- Labels:
- com.example.vendor: "Acme"
- com.example.version: "1.0"
- com.example.license: "GPL"
- MacAddress: ""
- AttachStderr: false
- WorkingDir: ""
- User: ""
- RootFS:
- Type: "layers"
- Layers:
- - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6"
- - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such image: someimage (tag: latest)"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or id"
- type: "string"
- required: true
- tags: ["Image"]
- /images/{name}/history:
- get:
- summary: "Get the history of an image"
- description: "Return parent layers of an image."
- operationId: "ImageHistory"
- produces: ["application/json"]
- responses:
- 200:
- description: "List of image layers"
- schema:
- type: "array"
- items:
- type: "object"
- x-go-name: HistoryResponseItem
- title: "HistoryResponseItem"
- description: "individual image layer information in response to ImageHistory operation"
- required: [Id, Created, CreatedBy, Tags, Size, Comment]
- properties:
- Id:
- type: "string"
- x-nullable: false
- Created:
- type: "integer"
- format: "int64"
- x-nullable: false
- CreatedBy:
- type: "string"
- x-nullable: false
- Tags:
- type: "array"
- items:
- type: "string"
- Size:
- type: "integer"
- format: "int64"
- x-nullable: false
- Comment:
- type: "string"
- x-nullable: false
- examples:
- application/json:
- - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710"
- Created: 1398108230
- CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /"
- Tags:
- - "ubuntu:lucid"
- - "ubuntu:10.04"
- Size: 182964289
- Comment: ""
- - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8"
- Created: 1398108222
- CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi <admwiggin@gmail.com> - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/"
- Tags: []
- Size: 0
- Comment: ""
- - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"
- Created: 1371157430
- CreatedBy: ""
- Tags:
- - "scratch12:latest"
- - "scratch:latest"
- Size: 0
- Comment: "Imported from -"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID"
- type: "string"
- required: true
- tags: ["Image"]
- /images/{name}/push:
- post:
- summary: "Push an image"
- description: |
- Push an image to a registry.
-
- If you wish to push an image on to a private registry, that image must
- already have a tag which references the registry. For example,
- `registry.example.com/myimage:latest`.
-
- The push is cancelled if the HTTP connection is closed.
- operationId: "ImagePush"
- consumes:
- - "application/octet-stream"
- responses:
- 200:
- description: "No error"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID."
- type: "string"
- required: true
- - name: "tag"
- in: "query"
- description: "The tag to associate with the image on the registry."
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- required: true
- tags: ["Image"]
- /images/{name}/tag:
- post:
- summary: "Tag an image"
- description: "Tag an image so that it becomes part of a repository."
- operationId: "ImageTag"
- responses:
- 201:
- description: "No error"
- 400:
- description: "Bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID to tag."
- type: "string"
- required: true
- - name: "repo"
- in: "query"
- description: "The repository to tag in. For example, `someuser/someimage`."
- type: "string"
- - name: "tag"
- in: "query"
- description: "The name of the new tag."
- type: "string"
- tags: ["Image"]
- /images/{name}:
- delete:
- summary: "Remove an image"
- description: |
- Remove an image, along with any untagged parent images that were
- referenced by that image.
-
- Images can't be removed if they have descendant images, are being
- used by a running container or are being used by a build.
- operationId: "ImageDelete"
- produces: ["application/json"]
- responses:
- 200:
- description: "The image was deleted successfully"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/ImageDeleteResponseItem"
- examples:
- application/json:
- - Untagged: "3e2f21a89f"
- - Deleted: "3e2f21a89f"
- - Deleted: "53b4f83ac9"
- 404:
- description: "No such image"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Conflict"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID"
- type: "string"
- required: true
- - name: "force"
- in: "query"
- description: "Remove the image even if it is being used by stopped containers or has other tags"
- type: "boolean"
- default: false
- - name: "noprune"
- in: "query"
- description: "Do not delete untagged parent images"
- type: "boolean"
- default: false
- tags: ["Image"]
- /images/search:
- get:
- summary: "Search images"
- description: "Search for an image on Docker Hub."
- operationId: "ImageSearch"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- type: "array"
- items:
- type: "object"
- title: "ImageSearchResponseItem"
- properties:
- description:
- type: "string"
- is_official:
- type: "boolean"
- is_automated:
- type: "boolean"
- name:
- type: "string"
- star_count:
- type: "integer"
- examples:
- application/json:
- - description: ""
- is_official: false
- is_automated: false
- name: "wma55/u1210sshd"
- star_count: 0
- - description: ""
- is_official: false
- is_automated: false
- name: "jdswinbank/sshd"
- star_count: 0
- - description: ""
- is_official: false
- is_automated: false
- name: "vgauthier/sshd"
- star_count: 0
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "term"
- in: "query"
- description: "Term to search"
- type: "string"
- required: true
- - name: "limit"
- in: "query"
- description: "Maximum number of results to return"
- type: "integer"
- - name: "filters"
- in: "query"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
-
- - `is-automated=(true|false)`
- - `is-official=(true|false)`
- - `stars=<number>` Matches images that has at least 'number' stars.
- type: "string"
- tags: ["Image"]
- /images/prune:
- post:
- summary: "Delete unused images"
- produces:
- - "application/json"
- operationId: "ImagePrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters:
-
- - `dangling=<boolean>` When set to `true` (or `1`), prune only
- unused *and* untagged images. When set to `false`
- (or `0`), all unused images are pruned.
- - `until=<string>` Prune images created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune images with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "ImagePruneResponse"
- properties:
- ImagesDeleted:
- description: "Images that were deleted"
- type: "array"
- items:
- $ref: "#/definitions/ImageDeleteResponseItem"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Image"]
- /auth:
- post:
- summary: "Check auth configuration"
- description: |
- Validate credentials for a registry and, if available, get an identity
- token for accessing the registry without password.
- operationId: "SystemAuth"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 200:
- description: "An identity token was generated successfully."
- schema:
- type: "object"
- title: "SystemAuthResponse"
- required: [Status]
- properties:
- Status:
- description: "The status of the authentication"
- type: "string"
- x-nullable: false
- IdentityToken:
- description: "An opaque token used to authenticate a user after a successful login"
- type: "string"
- x-nullable: false
- examples:
- application/json:
- Status: "Login Succeeded"
- IdentityToken: "9cbaf023786cd7..."
- 204:
- description: "No error"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "authConfig"
- in: "body"
- description: "Authentication to check"
- schema:
- $ref: "#/definitions/AuthConfig"
- tags: ["System"]
- /info:
- get:
- summary: "Get system information"
- operationId: "SystemInfo"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/SystemInfo"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /version:
- get:
- summary: "Get version"
- description: "Returns the version of Docker that is running and various information about the system that Docker is running on."
- operationId: "SystemVersion"
- produces: ["application/json"]
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/SystemVersion"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /_ping:
- get:
- summary: "Ping"
- description: "This is a dummy endpoint you can use to test if the server is accessible."
- operationId: "SystemPing"
- produces: ["text/plain"]
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- example: "OK"
- headers:
- API-Version:
- type: "string"
- description: "Max API Version the server supports"
- Builder-Version:
- type: "string"
- description: "Default version of docker image builder"
- Docker-Experimental:
- type: "boolean"
- description: "If the server is running with experimental mode enabled"
- Cache-Control:
- type: "string"
- default: "no-cache, no-store, must-revalidate"
- Pragma:
- type: "string"
- default: "no-cache"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- headers:
- Cache-Control:
- type: "string"
- default: "no-cache, no-store, must-revalidate"
- Pragma:
- type: "string"
- default: "no-cache"
- tags: ["System"]
- head:
- summary: "Ping"
- description: "This is a dummy endpoint you can use to test if the server is accessible."
- operationId: "SystemPingHead"
- produces: ["text/plain"]
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- example: "(empty)"
- headers:
- API-Version:
- type: "string"
- description: "Max API Version the server supports"
- Builder-Version:
- type: "string"
- description: "Default version of docker image builder"
- Docker-Experimental:
- type: "boolean"
- description: "If the server is running with experimental mode enabled"
- Cache-Control:
- type: "string"
- default: "no-cache, no-store, must-revalidate"
- Pragma:
- type: "string"
- default: "no-cache"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /commit:
- post:
- summary: "Create a new image from a container"
- operationId: "ImageCommit"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "containerConfig"
- in: "body"
- description: "The container configuration"
- schema:
- $ref: "#/definitions/ContainerConfig"
- - name: "container"
- in: "query"
- description: "The ID or name of the container to commit"
- type: "string"
- - name: "repo"
- in: "query"
- description: "Repository name for the created image"
- type: "string"
- - name: "tag"
- in: "query"
- description: "Tag name for the create image"
- type: "string"
- - name: "comment"
- in: "query"
- description: "Commit message"
- type: "string"
- - name: "author"
- in: "query"
- description: "Author of the image (e.g., `John Hannibal Smith <hannibal@a-team.com>`)"
- type: "string"
- - name: "pause"
- in: "query"
- description: "Whether to pause the container before committing"
- type: "boolean"
- default: true
- - name: "changes"
- in: "query"
- description: "`Dockerfile` instructions to apply while committing"
- type: "string"
- tags: ["Image"]
- /events:
- get:
- summary: "Monitor events"
- description: |
- Stream real-time events from the server.
-
- Various objects within Docker report events when something happens to them.
-
- Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune`
-
- Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune`
-
- Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune`
-
- Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune`
-
- The Docker daemon reports these events: `reload`
-
- Services report these events: `create`, `update`, and `remove`
-
- Nodes report these events: `create`, `update`, and `remove`
-
- Secrets report these events: `create`, `update`, and `remove`
-
- Configs report these events: `create`, `update`, and `remove`
-
- The Builder reports `prune` events
-
- operationId: "SystemEvents"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "SystemEventsResponse"
- properties:
- Type:
- description: "The type of object emitting the event"
- type: "string"
- Action:
- description: "The type of event"
- type: "string"
- Actor:
- type: "object"
- properties:
- ID:
- description: "The ID of the object emitting the event"
- type: "string"
- Attributes:
- description: "Various key/value attributes of the object, depending on its type"
- type: "object"
- additionalProperties:
- type: "string"
- time:
- description: "Timestamp of event"
- type: "integer"
- timeNano:
- description: "Timestamp of event, with nanosecond accuracy"
- type: "integer"
- format: "int64"
- examples:
- application/json:
- Type: "container"
- Action: "create"
- Actor:
- ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743"
- Attributes:
- com.example.some-label: "some-label-value"
- image: "alpine"
- name: "my-container"
- time: 1461943101
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "since"
- in: "query"
- description: "Show events created since this timestamp then stream new events."
- type: "string"
- - name: "until"
- in: "query"
- description: "Show events created until this timestamp then stop streaming."
- type: "string"
- - name: "filters"
- in: "query"
- description: |
- A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
-
- - `config=<string>` config name or ID
- - `container=<string>` container name or ID
- - `daemon=<string>` daemon name or ID
- - `event=<string>` event type
- - `image=<string>` image name or ID
- - `label=<string>` image or container label
- - `network=<string>` network name or ID
- - `node=<string>` node ID
- - `plugin`=<string> plugin name or ID
- - `scope`=<string> local or swarm
- - `secret=<string>` secret name or ID
- - `service=<string>` service name or ID
- - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`
- - `volume=<string>` volume name
- type: "string"
- tags: ["System"]
- /system/df:
- get:
- summary: "Get data usage information"
- operationId: "SystemDataUsage"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "SystemDataUsageResponse"
- properties:
- LayersSize:
- type: "integer"
- format: "int64"
- Images:
- type: "array"
- items:
- $ref: "#/definitions/ImageSummary"
- Containers:
- type: "array"
- items:
- $ref: "#/definitions/ContainerSummary"
- Volumes:
- type: "array"
- items:
- $ref: "#/definitions/Volume"
- BuildCache:
- type: "array"
- items:
- $ref: "#/definitions/BuildCache"
- example:
- LayersSize: 1092588
- Images:
- -
- Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
- ParentId: ""
- RepoTags:
- - "busybox:latest"
- RepoDigests:
- - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6"
- Created: 1466724217
- Size: 1092588
- SharedSize: 0
- VirtualSize: 1092588
- Labels: {}
- Containers: 1
- Containers:
- -
- Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148"
- Names:
- - "/top"
- Image: "busybox"
- ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749"
- Command: "top"
- Created: 1472592424
- Ports: []
- SizeRootFs: 1092588
- Labels: {}
- State: "exited"
- Status: "Exited (0) 56 minutes ago"
- HostConfig:
- NetworkMode: "default"
- NetworkSettings:
- Networks:
- bridge:
- IPAMConfig: null
- Links: null
- Aliases: null
- NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92"
- EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a"
- Gateway: "172.18.0.1"
- IPAddress: "172.18.0.2"
- IPPrefixLen: 16
- IPv6Gateway: ""
- GlobalIPv6Address: ""
- GlobalIPv6PrefixLen: 0
- MacAddress: "02:42:ac:12:00:02"
- Mounts: []
- Volumes:
- -
- Name: "my-volume"
- Driver: "local"
- Mountpoint: "/var/lib/docker/volumes/my-volume/_data"
- Labels: null
- Scope: "local"
- Options: null
- UsageData:
- Size: 10920104
- RefCount: 2
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["System"]
- /images/{name}/get:
- get:
- summary: "Export an image"
- description: |
- Get a tarball containing all images and metadata for a repository.
-
- If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced.
-
- ### Image tarball format
-
- An image tarball contains one directory per image layer (named using its long ID), each containing these files:
-
- - `VERSION`: currently `1.0` - the file format version
- - `json`: detailed layer information, similar to `docker inspect layer_id`
- - `layer.tar`: A tarfile containing the filesystem changes in this layer
-
- The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions.
-
- If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs.
-
- ```json
- {
- "hello-world": {
- "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1"
- }
- }
- ```
- operationId: "ImageGet"
- produces:
- - "application/x-tar"
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- format: "binary"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or ID"
- type: "string"
- required: true
- tags: ["Image"]
- /images/get:
- get:
- summary: "Export several images"
- description: |
- Get a tarball containing all images and metadata for several image
- repositories.
-
- For each value of the `names` parameter: if it is a specific name and
- tag (e.g. `ubuntu:latest`), then only that image (and its parents) are
- returned; if it is an image ID, similarly only that image (and its parents)
- are returned and there would be no names referenced in the 'repositories'
- file for this image ID.
-
- For details on the format, see the [export image endpoint](#operation/ImageGet).
- operationId: "ImageGetAll"
- produces:
- - "application/x-tar"
- responses:
- 200:
- description: "no error"
- schema:
- type: "string"
- format: "binary"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "names"
- in: "query"
- description: "Image names to filter by"
- type: "array"
- items:
- type: "string"
- tags: ["Image"]
- /images/load:
- post:
- summary: "Import images"
- description: |
- Load a set of images and tags into a repository.
-
- For details on the format, see the [export image endpoint](#operation/ImageGet).
- operationId: "ImageLoad"
- consumes:
- - "application/x-tar"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "imagesTarball"
- in: "body"
- description: "Tar archive containing images"
- schema:
- type: "string"
- format: "binary"
- - name: "quiet"
- in: "query"
- description: "Suppress progress details during load."
- type: "boolean"
- default: false
- tags: ["Image"]
- /containers/{id}/exec:
- post:
- summary: "Create an exec instance"
- description: "Run a command inside a running container."
- operationId: "ContainerExec"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 404:
- description: "no such container"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such container: c2ada9df5af8"
- 409:
- description: "container is paused"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "execConfig"
- in: "body"
- description: "Exec configuration"
- schema:
- type: "object"
- properties:
- AttachStdin:
- type: "boolean"
- description: "Attach to `stdin` of the exec command."
- AttachStdout:
- type: "boolean"
- description: "Attach to `stdout` of the exec command."
- AttachStderr:
- type: "boolean"
- description: "Attach to `stderr` of the exec command."
- DetachKeys:
- type: "string"
- description: |
- Override the key sequence for detaching a container. Format is
- a single character `[a-Z]` or `ctrl-<value>` where `<value>`
- is one of: `a-z`, `@`, `^`, `[`, `,` or `_`.
- Tty:
- type: "boolean"
- description: "Allocate a pseudo-TTY."
- Env:
- description: |
- A list of environment variables in the form `["VAR=value", ...]`.
- type: "array"
- items:
- type: "string"
- Cmd:
- type: "array"
- description: "Command to run, as a string or array of strings."
- items:
- type: "string"
- Privileged:
- type: "boolean"
- description: "Runs the exec process with extended privileges."
- default: false
- User:
- type: "string"
- description: |
- The user, and optionally, group to run the exec process inside
- the container. Format is one of: `user`, `user:group`, `uid`,
- or `uid:gid`.
- WorkingDir:
- type: "string"
- description: |
- The working directory for the exec process inside the container.
- example:
- AttachStdin: false
- AttachStdout: true
- AttachStderr: true
- DetachKeys: "ctrl-p,ctrl-q"
- Tty: false
- Cmd:
- - "date"
- Env:
- - "FOO=bar"
- - "BAZ=quux"
- required: true
- - name: "id"
- in: "path"
- description: "ID or name of container"
- type: "string"
- required: true
- tags: ["Exec"]
- /exec/{id}/start:
- post:
- summary: "Start an exec instance"
- description: |
- Starts a previously set up exec instance. If detach is true, this endpoint
- returns immediately after starting the command. Otherwise, it sets up an
- interactive session with the command.
- operationId: "ExecStart"
- consumes:
- - "application/json"
- produces:
- - "application/vnd.docker.raw-stream"
- responses:
- 200:
- description: "No error"
- 404:
- description: "No such exec instance"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Container is stopped or paused"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "execStartConfig"
- in: "body"
- schema:
- type: "object"
- properties:
- Detach:
- type: "boolean"
- description: "Detach from the command."
- Tty:
- type: "boolean"
- description: "Allocate a pseudo-TTY."
- example:
- Detach: false
- Tty: false
- - name: "id"
- in: "path"
- description: "Exec instance ID"
- required: true
- type: "string"
- tags: ["Exec"]
- /exec/{id}/resize:
- post:
- summary: "Resize an exec instance"
- description: |
- Resize the TTY session used by an exec instance. This endpoint only works
- if `tty` was specified as part of creating and starting the exec instance.
- operationId: "ExecResize"
- responses:
- 201:
- description: "No error"
- 404:
- description: "No such exec instance"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Exec instance ID"
- required: true
- type: "string"
- - name: "h"
- in: "query"
- description: "Height of the TTY session in characters"
- type: "integer"
- - name: "w"
- in: "query"
- description: "Width of the TTY session in characters"
- type: "integer"
- tags: ["Exec"]
- /exec/{id}/json:
- get:
- summary: "Inspect an exec instance"
- description: "Return low-level information about an exec instance."
- operationId: "ExecInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "ExecInspectResponse"
- properties:
- CanRemove:
- type: "boolean"
- DetachKeys:
- type: "string"
- ID:
- type: "string"
- Running:
- type: "boolean"
- ExitCode:
- type: "integer"
- ProcessConfig:
- $ref: "#/definitions/ProcessConfig"
- OpenStdin:
- type: "boolean"
- OpenStderr:
- type: "boolean"
- OpenStdout:
- type: "boolean"
- ContainerID:
- type: "string"
- Pid:
- type: "integer"
- description: "The system process ID for the exec process."
- examples:
- application/json:
- CanRemove: false
- ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126"
- DetachKeys: ""
- ExitCode: 2
- ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b"
- OpenStderr: true
- OpenStdin: true
- OpenStdout: true
- ProcessConfig:
- arguments:
- - "-c"
- - "exit 2"
- entrypoint: "sh"
- privileged: false
- tty: true
- user: "1000"
- Running: false
- Pid: 42000
- 404:
- description: "No such exec instance"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Exec instance ID"
- required: true
- type: "string"
- tags: ["Exec"]
-
- /volumes:
- get:
- summary: "List volumes"
- operationId: "VolumeList"
- produces: ["application/json"]
- responses:
- 200:
- description: "Summary volume data that matches the query"
- schema:
- type: "object"
- title: "VolumeListResponse"
- description: "Volume list response"
- required: [Volumes, Warnings]
- properties:
- Volumes:
- type: "array"
- x-nullable: false
- description: "List of volumes"
- items:
- $ref: "#/definitions/Volume"
- Warnings:
- type: "array"
- x-nullable: false
- description: |
- Warnings that occurred when fetching the list of volumes.
- items:
- type: "string"
-
- examples:
- application/json:
- Volumes:
- - CreatedAt: "2017-07-19T12:00:26Z"
- Name: "tardis"
- Driver: "local"
- Mountpoint: "/var/lib/docker/volumes/tardis"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Scope: "local"
- Options:
- device: "tmpfs"
- o: "size=100m,uid=1000"
- type: "tmpfs"
- Warnings: []
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- JSON encoded value of the filters (a `map[string][]string`) to
- process on the volumes list. Available filters:
-
- - `dangling=<boolean>` When set to `true` (or `1`), returns all
- volumes that are not in use by a container. When set to `false`
- (or `0`), only volumes that are in use by one or more
- containers are returned.
- - `driver=<volume-driver-name>` Matches volumes based on their driver.
- - `label=<key>` or `label=<key>:<value>` Matches volumes based on
- the presence of a `label` alone or a `label` and a value.
- - `name=<volume-name>` Matches all or part of a volume name.
- type: "string"
- format: "json"
- tags: ["Volume"]
-
- /volumes/create:
- post:
- summary: "Create a volume"
- operationId: "VolumeCreate"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 201:
- description: "The volume was created successfully"
- schema:
- $ref: "#/definitions/Volume"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "volumeConfig"
- in: "body"
- required: true
- description: "Volume configuration"
- schema:
- type: "object"
- description: "Volume configuration"
- title: "VolumeConfig"
- properties:
- Name:
- description: |
- The new volume's name. If not specified, Docker generates a name.
- type: "string"
- x-nullable: false
- Driver:
- description: "Name of the volume driver to use."
- type: "string"
- default: "local"
- x-nullable: false
- DriverOpts:
- description: |
- A mapping of driver options and values. These options are
- passed directly to the driver and are driver specific.
- type: "object"
- additionalProperties:
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- Name: "tardis"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- Driver: "custom"
- tags: ["Volume"]
-
- /volumes/{name}:
- get:
- summary: "Inspect a volume"
- operationId: "VolumeInspect"
- produces: ["application/json"]
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/Volume"
- 404:
- description: "No such volume"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- required: true
- description: "Volume name or ID"
- type: "string"
- tags: ["Volume"]
-
- delete:
- summary: "Remove a volume"
- description: "Instruct the driver to remove the volume."
- operationId: "VolumeDelete"
- responses:
- 204:
- description: "The volume was removed"
- 404:
- description: "No such volume or volume driver"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "Volume is in use and cannot be removed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- required: true
- description: "Volume name or ID"
- type: "string"
- - name: "force"
- in: "query"
- description: "Force the removal of the volume"
- type: "boolean"
- default: false
- tags: ["Volume"]
- /volumes/prune:
- post:
- summary: "Delete unused volumes"
- produces:
- - "application/json"
- operationId: "VolumePrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "VolumePruneResponse"
- properties:
- VolumesDeleted:
- description: "Volumes that were deleted"
- type: "array"
- items:
- type: "string"
- SpaceReclaimed:
- description: "Disk space reclaimed in bytes"
- type: "integer"
- format: "int64"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Volume"]
- /networks:
- get:
- summary: "List networks"
- description: |
- Returns a list of networks. For details on the format, see the
- [network inspect endpoint](#operation/NetworkInspect).
-
- Note that it uses a different, smaller representation of a network than
- inspecting a single network. For example, the list of containers attached
- to the network is not propagated in API versions 1.28 and up.
- operationId: "NetworkList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Network"
- examples:
- application/json:
- - Name: "bridge"
- Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566"
- Created: "2016-10-19T06:21:00.416543526Z"
- Scope: "local"
- Driver: "bridge"
- EnableIPv6: false
- Internal: false
- Attachable: false
- Ingress: false
- IPAM:
- Driver: "default"
- Config:
- -
- Subnet: "172.17.0.0/16"
- Options:
- com.docker.network.bridge.default_bridge: "true"
- com.docker.network.bridge.enable_icc: "true"
- com.docker.network.bridge.enable_ip_masquerade: "true"
- com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
- com.docker.network.bridge.name: "docker0"
- com.docker.network.driver.mtu: "1500"
- - Name: "none"
- Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794"
- Created: "0001-01-01T00:00:00Z"
- Scope: "local"
- Driver: "null"
- EnableIPv6: false
- Internal: false
- Attachable: false
- Ingress: false
- IPAM:
- Driver: "default"
- Config: []
- Containers: {}
- Options: {}
- - Name: "host"
- Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e"
- Created: "0001-01-01T00:00:00Z"
- Scope: "local"
- Driver: "host"
- EnableIPv6: false
- Internal: false
- Attachable: false
- Ingress: false
- IPAM:
- Driver: "default"
- Config: []
- Containers: {}
- Options: {}
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- JSON encoded value of the filters (a `map[string][]string`) to process
- on the networks list.
-
- Available filters:
-
- - `dangling=<boolean>` When set to `true` (or `1`), returns all
- networks that are not in use by a container. When set to `false`
- (or `0`), only networks that are in use by one or more
- containers are returned.
- - `driver=<driver-name>` Matches a network's driver.
- - `id=<network-id>` Matches all or part of a network ID.
- - `label=<key>` or `label=<key>=<value>` of a network label.
- - `name=<network-name>` Matches all or part of a network name.
- - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`).
- - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks.
- type: "string"
- tags: ["Network"]
-
- /networks/{id}:
- get:
- summary: "Inspect a network"
- operationId: "NetworkInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "No error"
- schema:
- $ref: "#/definitions/Network"
- 404:
- description: "Network not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- - name: "verbose"
- in: "query"
- description: "Detailed inspect output for troubleshooting"
- type: "boolean"
- default: false
- - name: "scope"
- in: "query"
- description: "Filter the network by scope (swarm, global, or local)"
- type: "string"
- tags: ["Network"]
-
- delete:
- summary: "Remove a network"
- operationId: "NetworkDelete"
- responses:
- 204:
- description: "No error"
- 403:
- description: "operation not supported for pre-defined networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such network"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- tags: ["Network"]
-
- /networks/create:
- post:
- summary: "Create a network"
- operationId: "NetworkCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "No error"
- schema:
- type: "object"
- title: "NetworkCreateResponse"
- properties:
- Id:
- description: "The ID of the created network."
- type: "string"
- Warning:
- type: "string"
- example:
- Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30"
- Warning: ""
- 403:
- description: "operation not supported for pre-defined networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "plugin not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "networkConfig"
- in: "body"
- description: "Network configuration"
- required: true
- schema:
- type: "object"
- required: ["Name"]
- properties:
- Name:
- description: "The network's name."
- type: "string"
- CheckDuplicate:
- description: |
- Check for networks with duplicate names. Since Network is
- primarily keyed based on a random ID and not on the name, and
- network name is strictly a user-friendly alias to the network
- which is uniquely identified using ID, there is no guaranteed
- way to check for duplicates. CheckDuplicate is there to provide
- a best effort checking of any networks which has the same name
- but it is not guaranteed to catch all name collisions.
- type: "boolean"
- Driver:
- description: "Name of the network driver plugin to use."
- type: "string"
- default: "bridge"
- Internal:
- description: "Restrict external access to the network."
- type: "boolean"
- Attachable:
- description: |
- Globally scoped network is manually attachable by regular
- containers from workers in swarm mode.
- type: "boolean"
- Ingress:
- description: |
- Ingress network is the network which provides the routing-mesh
- in swarm mode.
- type: "boolean"
- IPAM:
- description: "Optional custom IP scheme for the network."
- $ref: "#/definitions/IPAM"
- EnableIPv6:
- description: "Enable IPv6 on the network."
- type: "boolean"
- Options:
- description: "Network specific options to be used by the drivers."
- type: "object"
- additionalProperties:
- type: "string"
- Labels:
- description: "User-defined key/value metadata."
- type: "object"
- additionalProperties:
- type: "string"
- example:
- Name: "isolated_nw"
- CheckDuplicate: false
- Driver: "bridge"
- EnableIPv6: true
- IPAM:
- Driver: "default"
- Config:
- - Subnet: "172.20.0.0/16"
- IPRange: "172.20.10.0/24"
- Gateway: "172.20.10.11"
- - Subnet: "2001:db8:abcd::/64"
- Gateway: "2001:db8:abcd::1011"
- Options:
- foo: "bar"
- Internal: true
- Attachable: false
- Ingress: false
- Options:
- com.docker.network.bridge.default_bridge: "true"
- com.docker.network.bridge.enable_icc: "true"
- com.docker.network.bridge.enable_ip_masquerade: "true"
- com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
- com.docker.network.bridge.name: "docker0"
- com.docker.network.driver.mtu: "1500"
- Labels:
- com.example.some-label: "some-value"
- com.example.some-other-label: "some-other-value"
- tags: ["Network"]
-
- /networks/{id}/connect:
- post:
- summary: "Connect a container to a network"
- operationId: "NetworkConnect"
- consumes:
- - "application/json"
- responses:
- 200:
- description: "No error"
- 403:
- description: "Operation not supported for swarm scoped networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "Network or container not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- - name: "container"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- Container:
- type: "string"
- description: "The ID or name of the container to connect to the network."
- EndpointConfig:
- $ref: "#/definitions/EndpointSettings"
- example:
- Container: "3613f73ba0e4"
- EndpointConfig:
- IPAMConfig:
- IPv4Address: "172.24.56.89"
- IPv6Address: "2001:db8::5689"
- tags: ["Network"]
-
- /networks/{id}/disconnect:
- post:
- summary: "Disconnect a container from a network"
- operationId: "NetworkDisconnect"
- consumes:
- - "application/json"
- responses:
- 200:
- description: "No error"
- 403:
- description: "Operation not supported for swarm scoped networks"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "Network or container not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "Network ID or name"
- required: true
- type: "string"
- - name: "container"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- Container:
- type: "string"
- description: |
- The ID or name of the container to disconnect from the network.
- Force:
- type: "boolean"
- description: |
- Force the container to disconnect from the network.
- tags: ["Network"]
- /networks/prune:
- post:
- summary: "Delete unused networks"
- produces:
- - "application/json"
- operationId: "NetworkPrune"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the prune list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `until=<timestamp>` Prune networks created before this timestamp. The `<timestamp>` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time.
- - `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune networks with (or without, in case `label!=...` is used) the specified labels.
- type: "string"
- responses:
- 200:
- description: "No error"
- schema:
- type: "object"
- title: "NetworkPruneResponse"
- properties:
- NetworksDeleted:
- description: "Networks that were deleted"
- type: "array"
- items:
- type: "string"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Network"]
- /plugins:
- get:
- summary: "List plugins"
- operationId: "PluginList"
- description: "Returns information about installed plugins."
- produces: ["application/json"]
- responses:
- 200:
- description: "No error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Plugin"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the plugin list.
-
- Available filters:
-
- - `capability=<capability name>`
- - `enable=<true>|<false>`
- tags: ["Plugin"]
-
- /plugins/privileges:
- get:
- summary: "Get plugin privileges"
- operationId: "GetPluginPrivileges"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- description: |
- Describes a permission the user has to accept upon installing
- the plugin.
- type: "object"
- title: "PluginPrivilegeItem"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- example:
- - Name: "network"
- Description: ""
- Value:
- - "host"
- - Name: "mount"
- Description: ""
- Value:
- - "/data"
- - Name: "device"
- Description: ""
- Value:
- - "/dev/cpu_dma_latency"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "remote"
- in: "query"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- tags:
- - "Plugin"
-
- /plugins/pull:
- post:
- summary: "Install a plugin"
- operationId: "PluginPull"
- description: |
- Pulls and installs a plugin. After the plugin is installed, it can be
- enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable).
- produces:
- - "application/json"
- responses:
- 204:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "remote"
- in: "query"
- description: |
- Remote reference for plugin to install.
-
- The `:latest` tag is optional, and is used as the default if omitted.
- required: true
- type: "string"
- - name: "name"
- in: "query"
- description: |
- Local name for the pulled plugin.
-
- The `:latest` tag is optional, and is used as the default if omitted.
- required: false
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration to use when pulling a plugin
- from a registry.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- - name: "body"
- in: "body"
- schema:
- type: "array"
- items:
- description: |
- Describes a permission accepted by the user upon installing the
- plugin.
- type: "object"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- example:
- - Name: "network"
- Description: ""
- Value:
- - "host"
- - Name: "mount"
- Description: ""
- Value:
- - "/data"
- - Name: "device"
- Description: ""
- Value:
- - "/dev/cpu_dma_latency"
- tags: ["Plugin"]
- /plugins/{name}/json:
- get:
- summary: "Inspect a plugin"
- operationId: "PluginInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Plugin"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- tags: ["Plugin"]
- /plugins/{name}:
- delete:
- summary: "Remove a plugin"
- operationId: "PluginDelete"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Plugin"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "force"
- in: "query"
- description: |
- Disable the plugin before removing. This may result in issues if the
- plugin is in use by a container.
- type: "boolean"
- default: false
- tags: ["Plugin"]
- /plugins/{name}/enable:
- post:
- summary: "Enable a plugin"
- operationId: "PluginEnable"
- responses:
- 200:
- description: "no error"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "timeout"
- in: "query"
- description: "Set the HTTP client timeout (in seconds)"
- type: "integer"
- default: 0
- tags: ["Plugin"]
- /plugins/{name}/disable:
- post:
- summary: "Disable a plugin"
- operationId: "PluginDisable"
- responses:
- 200:
- description: "no error"
- 404:
- description: "plugin is not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- tags: ["Plugin"]
- /plugins/{name}/upgrade:
- post:
- summary: "Upgrade a plugin"
- operationId: "PluginUpgrade"
- responses:
- 204:
- description: "no error"
- 404:
- description: "plugin not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "remote"
- in: "query"
- description: |
- Remote reference to upgrade to.
-
- The `:latest` tag is optional, and is used as the default if omitted.
- required: true
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration to use when pulling a plugin
- from a registry.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- - name: "body"
- in: "body"
- schema:
- type: "array"
- items:
- description: |
- Describes a permission accepted by the user upon installing the
- plugin.
- type: "object"
- properties:
- Name:
- type: "string"
- Description:
- type: "string"
- Value:
- type: "array"
- items:
- type: "string"
- example:
- - Name: "network"
- Description: ""
- Value:
- - "host"
- - Name: "mount"
- Description: ""
- Value:
- - "/data"
- - Name: "device"
- Description: ""
- Value:
- - "/dev/cpu_dma_latency"
- tags: ["Plugin"]
- /plugins/create:
- post:
- summary: "Create a plugin"
- operationId: "PluginCreate"
- consumes:
- - "application/x-tar"
- responses:
- 204:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "query"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "tarContext"
- in: "body"
- description: "Path to tar containing plugin rootfs and manifest"
- schema:
- type: "string"
- format: "binary"
- tags: ["Plugin"]
- /plugins/{name}/push:
- post:
- summary: "Push a plugin"
- operationId: "PluginPush"
- description: |
- Push a plugin to the registry.
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- responses:
- 200:
- description: "no error"
- 404:
- description: "plugin not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Plugin"]
- /plugins/{name}/set:
- post:
- summary: "Configure a plugin"
- operationId: "PluginSet"
- consumes:
- - "application/json"
- parameters:
- - name: "name"
- in: "path"
- description: |
- The name of the plugin. The `:latest` tag is optional, and is the
- default if omitted.
- required: true
- type: "string"
- - name: "body"
- in: "body"
- schema:
- type: "array"
- items:
- type: "string"
- example: ["DEBUG=1"]
- responses:
- 204:
- description: "No error"
- 404:
- description: "Plugin not installed"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Plugin"]
- /nodes:
- get:
- summary: "List nodes"
- operationId: "NodeList"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Node"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- description: |
- Filters to process on the nodes list, encoded as JSON (a `map[string][]string`).
-
- Available filters:
- - `id=<node id>`
- - `label=<engine label>`
- - `membership=`(`accepted`|`pending`)`
- - `name=<node name>`
- - `node.label=<node label>`
- - `role=`(`manager`|`worker`)`
- type: "string"
- tags: ["Node"]
- /nodes/{id}:
- get:
- summary: "Inspect a node"
- operationId: "NodeInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Node"
- 404:
- description: "no such node"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the node"
- type: "string"
- required: true
- tags: ["Node"]
- delete:
- summary: "Delete a node"
- operationId: "NodeDelete"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such node"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the node"
- type: "string"
- required: true
- - name: "force"
- in: "query"
- description: "Force remove a node from the swarm"
- default: false
- type: "boolean"
- tags: ["Node"]
- /nodes/{id}/update:
- post:
- summary: "Update a node"
- operationId: "NodeUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such node"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID of the node"
- type: "string"
- required: true
- - name: "body"
- in: "body"
- schema:
- $ref: "#/definitions/NodeSpec"
- - name: "version"
- in: "query"
- description: |
- The version number of the node object being updated. This is required
- to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- tags: ["Node"]
- /swarm:
- get:
- summary: "Inspect swarm"
- operationId: "SwarmInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Swarm"
- 404:
- description: "no such swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Swarm"]
- /swarm/init:
- post:
- summary: "Initialize a new swarm"
- operationId: "SwarmInit"
- produces:
- - "application/json"
- - "text/plain"
- responses:
- 200:
- description: "no error"
- schema:
- description: "The node ID"
- type: "string"
- example: "7v2t30z9blmxuhnyo6s4cpenp"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is already part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- ListenAddr:
- description: |
- Listen address used for inter-manager communication, as well
- as determining the networking interface used for the VXLAN
- Tunnel Endpoint (VTEP). This can either be an address/port
- combination in the form `192.168.1.1:4567`, or an interface
- followed by a port number, like `eth0:4567`. If the port number
- is omitted, the default swarm listening port is used.
- type: "string"
- AdvertiseAddr:
- description: |
- Externally reachable address advertised to other nodes. This
- can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number,
- like `eth0:4567`. If the port number is omitted, the port
- number from the listen address is used. If `AdvertiseAddr` is
- not specified, it will be automatically detected when possible.
- type: "string"
- DataPathAddr:
- description: |
- Address or interface to use for data path traffic (format:
- `<ip|interface>`), for example, `192.168.1.1`, or an interface,
- like `eth0`. If `DataPathAddr` is unspecified, the same address
- as `AdvertiseAddr` is used.
-
- The `DataPathAddr` specifies the address that global scope
- network drivers will publish towards other nodes in order to
- reach the containers running on this node. Using this parameter
- it is possible to separate the container data traffic from the
- management traffic of the cluster.
- type: "string"
- DataPathPort:
- description: |
- DataPathPort specifies the data path port number for data traffic.
- Acceptable port range is 1024 to 49151.
- if no port is set or is set to 0, default port 4789 will be used.
- type: "integer"
- format: "uint32"
- DefaultAddrPool:
- description: |
- Default Address Pool specifies default subnet pools for global
- scope networks.
- type: "array"
- items:
- type: "string"
- example: ["10.10.0.0/16", "20.20.0.0/16"]
- ForceNewCluster:
- description: "Force creation of a new swarm."
- type: "boolean"
- SubnetSize:
- description: |
- SubnetSize specifies the subnet size of the networks created
- from the default subnet pool.
- type: "integer"
- format: "uint32"
- Spec:
- $ref: "#/definitions/SwarmSpec"
- example:
- ListenAddr: "0.0.0.0:2377"
- AdvertiseAddr: "192.168.1.1:2377"
- DataPathPort: 4789
- DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"]
- SubnetSize: 24
- ForceNewCluster: false
- Spec:
- Orchestration: {}
- Raft: {}
- Dispatcher: {}
- CAConfig: {}
- EncryptionConfig:
- AutoLockManagers: false
- tags: ["Swarm"]
- /swarm/join:
- post:
- summary: "Join an existing swarm"
- operationId: "SwarmJoin"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is already part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- ListenAddr:
- description: |
- Listen address used for inter-manager communication if the node
- gets promoted to manager, as well as determining the networking
- interface used for the VXLAN Tunnel Endpoint (VTEP).
- type: "string"
- AdvertiseAddr:
- description: |
- Externally reachable address advertised to other nodes. This
- can either be an address/port combination in the form
- `192.168.1.1:4567`, or an interface followed by a port number,
- like `eth0:4567`. If the port number is omitted, the port
- number from the listen address is used. If `AdvertiseAddr` is
- not specified, it will be automatically detected when possible.
- type: "string"
- DataPathAddr:
- description: |
- Address or interface to use for data path traffic (format:
- `<ip|interface>`), for example, `192.168.1.1`, or an interface,
- like `eth0`. If `DataPathAddr` is unspecified, the same addres
- as `AdvertiseAddr` is used.
-
- The `DataPathAddr` specifies the address that global scope
- network drivers will publish towards other nodes in order to
- reach the containers running on this node. Using this parameter
- it is possible to separate the container data traffic from the
- management traffic of the cluster.
-
- type: "string"
- RemoteAddrs:
- description: |
- Addresses of manager nodes already participating in the swarm.
- type: "array"
- items:
- type: "string"
- JoinToken:
- description: "Secret token for joining this swarm."
- type: "string"
- example:
- ListenAddr: "0.0.0.0:2377"
- AdvertiseAddr: "192.168.1.1:2377"
- RemoteAddrs:
- - "node1:2377"
- JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
- tags: ["Swarm"]
- /swarm/leave:
- post:
- summary: "Leave a swarm"
- operationId: "SwarmLeave"
- responses:
- 200:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "force"
- description: |
- Force leave swarm, even if this is the last manager or that it will
- break the cluster.
- in: "query"
- type: "boolean"
- default: false
- tags: ["Swarm"]
- /swarm/update:
- post:
- summary: "Update a swarm"
- operationId: "SwarmUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- $ref: "#/definitions/SwarmSpec"
- - name: "version"
- in: "query"
- description: |
- The version number of the swarm object being updated. This is
- required to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- - name: "rotateWorkerToken"
- in: "query"
- description: "Rotate the worker join token."
- type: "boolean"
- default: false
- - name: "rotateManagerToken"
- in: "query"
- description: "Rotate the manager join token."
- type: "boolean"
- default: false
- - name: "rotateManagerUnlockKey"
- in: "query"
- description: "Rotate the manager unlock key."
- type: "boolean"
- default: false
- tags: ["Swarm"]
- /swarm/unlockkey:
- get:
- summary: "Get the unlock key"
- operationId: "SwarmUnlockkey"
- consumes:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "object"
- title: "UnlockKeyResponse"
- properties:
- UnlockKey:
- description: "The swarm's unlock key."
- type: "string"
- example:
- UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Swarm"]
- /swarm/unlock:
- post:
- summary: "Unlock a locked manager"
- operationId: "SwarmUnlock"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- type: "object"
- properties:
- UnlockKey:
- description: "The swarm's unlock key."
- type: "string"
- example:
- UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8"
- responses:
- 200:
- description: "no error"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Swarm"]
- /services:
- get:
- summary: "List services"
- operationId: "ServiceList"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Service"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the services list.
-
- Available filters:
-
- - `id=<service id>`
- - `label=<service label>`
- - `mode=["replicated"|"global"]`
- - `name=<service name>`
- - name: "status"
- in: "query"
- type: "boolean"
- description: |
- Include service status, with count of running and desired tasks.
- tags: ["Service"]
- /services/create:
- post:
- summary: "Create a service"
- operationId: "ServiceCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- type: "object"
- title: "ServiceCreateResponse"
- properties:
- ID:
- description: "The ID of the created service."
- type: "string"
- Warning:
- description: "Optional warning message"
- type: "string"
- example:
- ID: "ak7w3gjqoa3kuz8xcpnyy0pvl"
- Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 403:
- description: "network is not eligible for services"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 409:
- description: "name conflicts with an existing service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- required: true
- schema:
- allOf:
- - $ref: "#/definitions/ServiceSpec"
- - type: "object"
- example:
- Name: "web"
- TaskTemplate:
- ContainerSpec:
- Image: "nginx:alpine"
- Mounts:
- -
- ReadOnly: true
- Source: "web-data"
- Target: "/usr/share/nginx/html"
- Type: "volume"
- VolumeOptions:
- DriverConfig: {}
- Labels:
- com.example.something: "something-value"
- Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"]
- User: "33"
- DNSConfig:
- Nameservers: ["8.8.8.8"]
- Search: ["example.org"]
- Options: ["timeout:3"]
- Secrets:
- -
- File:
- Name: "www.example.org.key"
- UID: "33"
- GID: "33"
- Mode: 384
- SecretID: "fpjqlhnwb19zds35k8wn80lq9"
- SecretName: "example_org_domain_key"
- LogDriver:
- Name: "json-file"
- Options:
- max-file: "3"
- max-size: "10M"
- Placement: {}
- Resources:
- Limits:
- MemoryBytes: 104857600
- Reservations: {}
- RestartPolicy:
- Condition: "on-failure"
- Delay: 10000000000
- MaxAttempts: 10
- Mode:
- Replicated:
- Replicas: 4
- UpdateConfig:
- Parallelism: 2
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- RollbackConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- EndpointSpec:
- Ports:
- -
- Protocol: "tcp"
- PublishedPort: 8080
- TargetPort: 80
- Labels:
- foo: "bar"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration for pulling from private
- registries.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
- tags: ["Service"]
- /services/{id}:
- get:
- summary: "Inspect a service"
- operationId: "ServiceInspect"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Service"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID or name of service."
- required: true
- type: "string"
- - name: "insertDefaults"
- in: "query"
- description: "Fill empty fields with default values."
- type: "boolean"
- default: false
- tags: ["Service"]
- delete:
- summary: "Delete a service"
- operationId: "ServiceDelete"
- responses:
- 200:
- description: "no error"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID or name of service."
- required: true
- type: "string"
- tags: ["Service"]
- /services/{id}/update:
- post:
- summary: "Update a service"
- operationId: "ServiceUpdate"
- consumes: ["application/json"]
- produces: ["application/json"]
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/ServiceUpdateResponse"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID or name of service."
- required: true
- type: "string"
- - name: "body"
- in: "body"
- required: true
- schema:
- allOf:
- - $ref: "#/definitions/ServiceSpec"
- - type: "object"
- example:
- Name: "top"
- TaskTemplate:
- ContainerSpec:
- Image: "busybox"
- Args:
- - "top"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ForceUpdate: 0
- Mode:
- Replicated:
- Replicas: 1
- UpdateConfig:
- Parallelism: 2
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- RollbackConfig:
- Parallelism: 1
- Delay: 1000000000
- FailureAction: "pause"
- Monitor: 15000000000
- MaxFailureRatio: 0.15
- EndpointSpec:
- Mode: "vip"
-
- - name: "version"
- in: "query"
- description: |
- The version number of the service object being updated. This is
- required to avoid conflicting writes.
- This version number should be the value as currently set on the
- service *before* the update. You can find the current version by
- calling `GET /services/{id}`
- required: true
- type: "integer"
- - name: "registryAuthFrom"
- in: "query"
- description: |
- If the `X-Registry-Auth` header is not specified, this parameter
- indicates where to find registry authorization credentials.
- type: "string"
- enum: ["spec", "previous-spec"]
- default: "spec"
- - name: "rollback"
- in: "query"
- description: |
- Set to this parameter to `previous` to cause a server-side rollback
- to the previous service spec. The supplied spec will be ignored in
- this case.
- type: "string"
- - name: "X-Registry-Auth"
- in: "header"
- description: |
- A base64url-encoded auth configuration for pulling from private
- registries.
-
- Refer to the [authentication section](#section/Authentication) for
- details.
- type: "string"
-
- tags: ["Service"]
- /services/{id}/logs:
- get:
- summary: "Get service logs"
- description: |
- Get `stdout` and `stderr` logs from a service. See also
- [`/containers/{id}/logs`](#operation/ContainerLogs).
-
- **Note**: This endpoint works only for services with the `local`,
- `json-file` or `journald` logging drivers.
- operationId: "ServiceLogs"
- responses:
- 200:
- description: "logs returned as a stream in response body"
- schema:
- type: "string"
- format: "binary"
- 404:
- description: "no such service"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such service: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID or name of the service"
- type: "string"
- - name: "details"
- in: "query"
- description: "Show service context and extra details provided to logs."
- type: "boolean"
- default: false
- - name: "follow"
- in: "query"
- description: "Keep connection after returning logs."
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Return logs from `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Return logs from `stderr`"
- type: "boolean"
- default: false
- - name: "since"
- in: "query"
- description: "Only return logs since this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "timestamps"
- in: "query"
- description: "Add timestamps to every log line"
- type: "boolean"
- default: false
- - name: "tail"
- in: "query"
- description: |
- Only return this number of log lines from the end of the logs.
- Specify as an integer or `all` to output all log lines.
- type: "string"
- default: "all"
- tags: ["Service"]
- /tasks:
- get:
- summary: "List tasks"
- operationId: "TaskList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Task"
- example:
- - ID: "0kzzo1i0y4jz6027t0k7aezc7"
- Version:
- Index: 71
- CreatedAt: "2016-06-07T21:07:31.171892745Z"
- UpdatedAt: "2016-06-07T21:07:31.376370513Z"
- Spec:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Slot: 1
- NodeID: "60gvrl6tm78dmak4yl7srz94v"
- Status:
- Timestamp: "2016-06-07T21:07:31.290032978Z"
- State: "running"
- Message: "started"
- ContainerStatus:
- ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035"
- PID: 677
- DesiredState: "running"
- NetworksAttachments:
- - Network:
- ID: "4qvuz4ko70xaltuqbt8956gd1"
- Version:
- Index: 18
- CreatedAt: "2016-06-07T20:31:11.912919752Z"
- UpdatedAt: "2016-06-07T21:07:29.955277358Z"
- Spec:
- Name: "ingress"
- Labels:
- com.docker.swarm.internal: "true"
- DriverConfiguration: {}
- IPAMOptions:
- Driver: {}
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- DriverState:
- Name: "overlay"
- Options:
- com.docker.network.driver.overlay.vxlanid_list: "256"
- IPAMOptions:
- Driver:
- Name: "default"
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- Addresses:
- - "10.255.0.10/16"
- - ID: "1yljwbmlr8er2waf8orvqpwms"
- Version:
- Index: 30
- CreatedAt: "2016-06-07T21:07:30.019104782Z"
- UpdatedAt: "2016-06-07T21:07:30.231958098Z"
- Name: "hopeful_cori"
- Spec:
- ContainerSpec:
- Image: "redis"
- Resources:
- Limits: {}
- Reservations: {}
- RestartPolicy:
- Condition: "any"
- MaxAttempts: 0
- Placement: {}
- ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz"
- Slot: 1
- NodeID: "60gvrl6tm78dmak4yl7srz94v"
- Status:
- Timestamp: "2016-06-07T21:07:30.202183143Z"
- State: "shutdown"
- Message: "shutdown"
- ContainerStatus:
- ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"
- DesiredState: "shutdown"
- NetworksAttachments:
- - Network:
- ID: "4qvuz4ko70xaltuqbt8956gd1"
- Version:
- Index: 18
- CreatedAt: "2016-06-07T20:31:11.912919752Z"
- UpdatedAt: "2016-06-07T21:07:29.955277358Z"
- Spec:
- Name: "ingress"
- Labels:
- com.docker.swarm.internal: "true"
- DriverConfiguration: {}
- IPAMOptions:
- Driver: {}
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- DriverState:
- Name: "overlay"
- Options:
- com.docker.network.driver.overlay.vxlanid_list: "256"
- IPAMOptions:
- Driver:
- Name: "default"
- Configs:
- - Subnet: "10.255.0.0/16"
- Gateway: "10.255.0.1"
- Addresses:
- - "10.255.0.5/16"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the tasks list.
-
- Available filters:
-
- - `desired-state=(running | shutdown | accepted)`
- - `id=<task id>`
- - `label=key` or `label="key=value"`
- - `name=<task name>`
- - `node=<node id or name>`
- - `service=<service name>`
- tags: ["Task"]
- /tasks/{id}:
- get:
- summary: "Inspect a task"
- operationId: "TaskInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Task"
- 404:
- description: "no such task"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "ID of the task"
- required: true
- type: "string"
- tags: ["Task"]
- /tasks/{id}/logs:
- get:
- summary: "Get task logs"
- description: |
- Get `stdout` and `stderr` logs from a task.
- See also [`/containers/{id}/logs`](#operation/ContainerLogs).
-
- **Note**: This endpoint works only for services with the `local`,
- `json-file` or `journald` logging drivers.
- operationId: "TaskLogs"
- responses:
- 200:
- description: "logs returned as a stream in response body"
- schema:
- type: "string"
- format: "binary"
- 404:
- description: "no such task"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such task: c2ada9df5af8"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- description: "ID of the task"
- type: "string"
- - name: "details"
- in: "query"
- description: "Show task context and extra details provided to logs."
- type: "boolean"
- default: false
- - name: "follow"
- in: "query"
- description: "Keep connection after returning logs."
- type: "boolean"
- default: false
- - name: "stdout"
- in: "query"
- description: "Return logs from `stdout`"
- type: "boolean"
- default: false
- - name: "stderr"
- in: "query"
- description: "Return logs from `stderr`"
- type: "boolean"
- default: false
- - name: "since"
- in: "query"
- description: "Only return logs since this time, as a UNIX timestamp"
- type: "integer"
- default: 0
- - name: "timestamps"
- in: "query"
- description: "Add timestamps to every log line"
- type: "boolean"
- default: false
- - name: "tail"
- in: "query"
- description: |
- Only return this number of log lines from the end of the logs.
- Specify as an integer or `all` to output all log lines.
- type: "string"
- default: "all"
- tags: ["Task"]
- /secrets:
- get:
- summary: "List secrets"
- operationId: "SecretList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Secret"
- example:
- - ID: "blt1owaxmitz71s9v5zh81zun"
- Version:
- Index: 85
- CreatedAt: "2017-07-20T13:55:28.678958722Z"
- UpdatedAt: "2017-07-20T13:55:28.678958722Z"
- Spec:
- Name: "mysql-passwd"
- Labels:
- some.label: "some.value"
- Driver:
- Name: "secret-bucket"
- Options:
- OptionA: "value for driver option A"
- OptionB: "value for driver option B"
- - ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "app-dev.crt"
- Labels:
- foo: "bar"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the secrets list.
-
- Available filters:
-
- - `id=<secret id>`
- - `label=<key> or label=<key>=value`
- - `name=<secret name>`
- - `names=<secret name>`
- tags: ["Secret"]
- /secrets/create:
- post:
- summary: "Create a secret"
- operationId: "SecretCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 409:
- description: "name conflicts with an existing object"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- schema:
- allOf:
- - $ref: "#/definitions/SecretSpec"
- - type: "object"
- example:
- Name: "app-key.crt"
- Labels:
- foo: "bar"
- Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
- Driver:
- Name: "secret-bucket"
- Options:
- OptionA: "value for driver option A"
- OptionB: "value for driver option B"
- tags: ["Secret"]
- /secrets/{id}:
- get:
- summary: "Inspect a secret"
- operationId: "SecretInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Secret"
- examples:
- application/json:
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "app-dev.crt"
- Labels:
- foo: "bar"
- Driver:
- Name: "secret-bucket"
- Options:
- OptionA: "value for driver option A"
- OptionB: "value for driver option B"
-
- 404:
- description: "secret not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the secret"
- tags: ["Secret"]
- delete:
- summary: "Delete a secret"
- operationId: "SecretDelete"
- produces:
- - "application/json"
- responses:
- 204:
- description: "no error"
- 404:
- description: "secret not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the secret"
- tags: ["Secret"]
- /secrets/{id}/update:
- post:
- summary: "Update a Secret"
- operationId: "SecretUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such secret"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the secret"
- type: "string"
- required: true
- - name: "body"
- in: "body"
- schema:
- $ref: "#/definitions/SecretSpec"
- description: |
- The spec of the secret to update. Currently, only the Labels field
- can be updated. All other fields must remain unchanged from the
- [SecretInspect endpoint](#operation/SecretInspect) response values.
- - name: "version"
- in: "query"
- description: |
- The version number of the secret object being updated. This is
- required to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- tags: ["Secret"]
- /configs:
- get:
- summary: "List configs"
- operationId: "ConfigList"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- type: "array"
- items:
- $ref: "#/definitions/Config"
- example:
- - ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "server.conf"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "filters"
- in: "query"
- type: "string"
- description: |
- A JSON encoded value of the filters (a `map[string][]string`) to
- process on the configs list.
-
- Available filters:
-
- - `id=<config id>`
- - `label=<key> or label=<key>=value`
- - `name=<config name>`
- - `names=<config name>`
- tags: ["Config"]
- /configs/create:
- post:
- summary: "Create a config"
- operationId: "ConfigCreate"
- consumes:
- - "application/json"
- produces:
- - "application/json"
- responses:
- 201:
- description: "no error"
- schema:
- $ref: "#/definitions/IdResponse"
- 409:
- description: "name conflicts with an existing object"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "body"
- in: "body"
- schema:
- allOf:
- - $ref: "#/definitions/ConfigSpec"
- - type: "object"
- example:
- Name: "server.conf"
- Labels:
- foo: "bar"
- Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg=="
- tags: ["Config"]
- /configs/{id}:
- get:
- summary: "Inspect a config"
- operationId: "ConfigInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "no error"
- schema:
- $ref: "#/definitions/Config"
- examples:
- application/json:
- ID: "ktnbjxoalbkvbvedmg1urrz8h"
- Version:
- Index: 11
- CreatedAt: "2016-11-05T01:20:17.327670065Z"
- UpdatedAt: "2016-11-05T01:20:17.327670065Z"
- Spec:
- Name: "app-dev.crt"
- 404:
- description: "config not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the config"
- tags: ["Config"]
- delete:
- summary: "Delete a config"
- operationId: "ConfigDelete"
- produces:
- - "application/json"
- responses:
- 204:
- description: "no error"
- 404:
- description: "config not found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- required: true
- type: "string"
- description: "ID of the config"
- tags: ["Config"]
- /configs/{id}/update:
- post:
- summary: "Update a Config"
- operationId: "ConfigUpdate"
- responses:
- 200:
- description: "no error"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 404:
- description: "no such config"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 503:
- description: "node is not part of a swarm"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "id"
- in: "path"
- description: "The ID or name of the config"
- type: "string"
- required: true
- - name: "body"
- in: "body"
- schema:
- $ref: "#/definitions/ConfigSpec"
- description: |
- The spec of the config to update. Currently, only the Labels field
- can be updated. All other fields must remain unchanged from the
- [ConfigInspect endpoint](#operation/ConfigInspect) response values.
- - name: "version"
- in: "query"
- description: |
- The version number of the config object being updated. This is
- required to avoid conflicting writes.
- type: "integer"
- format: "int64"
- required: true
- tags: ["Config"]
- /distribution/{name}/json:
- get:
- summary: "Get image information from the registry"
- description: |
- Return image digest and platform information by contacting the registry.
- operationId: "DistributionInspect"
- produces:
- - "application/json"
- responses:
- 200:
- description: "descriptor and platform information"
- schema:
- type: "object"
- x-go-name: DistributionInspect
- title: "DistributionInspectResponse"
- required: [Descriptor, Platforms]
- properties:
- Descriptor:
- type: "object"
- description: |
- A descriptor struct containing digest, media type, and size.
- properties:
- MediaType:
- type: "string"
- Size:
- type: "integer"
- format: "int64"
- Digest:
- type: "string"
- URLs:
- type: "array"
- items:
- type: "string"
- Platforms:
- type: "array"
- description: |
- An array containing all platforms supported by the image.
- items:
- type: "object"
- properties:
- Architecture:
- type: "string"
- OS:
- type: "string"
- OSVersion:
- type: "string"
- OSFeatures:
- type: "array"
- items:
- type: "string"
- Variant:
- type: "string"
- Features:
- type: "array"
- items:
- type: "string"
- examples:
- application/json:
- Descriptor:
- MediaType: "application/vnd.docker.distribution.manifest.v2+json"
- Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96"
- Size: 3987495
- URLs:
- - ""
- Platforms:
- - Architecture: "amd64"
- OS: "linux"
- OSVersion: ""
- OSFeatures:
- - ""
- Variant: ""
- Features:
- - ""
- 401:
- description: "Failed authentication or no image found"
- schema:
- $ref: "#/definitions/ErrorResponse"
- examples:
- application/json:
- message: "No such image: someimage (tag: latest)"
- 500:
- description: "Server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- parameters:
- - name: "name"
- in: "path"
- description: "Image name or id"
- type: "string"
- required: true
- tags: ["Distribution"]
- /session:
- post:
- summary: "Initialize interactive session"
- description: |
- Start a new interactive session with a server. Session allows server to
- call back to the client for advanced capabilities.
-
- ### Hijacking
-
- This endpoint hijacks the HTTP connection to HTTP2 transport that allows
- the client to expose gPRC services on that connection.
-
- For example, the client sends this request to upgrade the connection:
-
- ```
- POST /session HTTP/1.1
- Upgrade: h2c
- Connection: Upgrade
- ```
-
- The Docker daemon responds with a `101 UPGRADED` response follow with
- the raw stream:
-
- ```
- HTTP/1.1 101 UPGRADED
- Connection: Upgrade
- Upgrade: h2c
- ```
- operationId: "Session"
- produces:
- - "application/vnd.docker.raw-stream"
- responses:
- 101:
- description: "no error, hijacking successful"
- 400:
- description: "bad parameter"
- schema:
- $ref: "#/definitions/ErrorResponse"
- 500:
- description: "server error"
- schema:
- $ref: "#/definitions/ErrorResponse"
- tags: ["Session"]
diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go
deleted file mode 100644
index ddf15bb..0000000
--- a/vendor/github.com/docker/docker/api/types/auth.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package types // import "github.com/docker/docker/api/types"
-
-// AuthConfig contains authorization information for connecting to a Registry
-type AuthConfig struct {
- Username string `json:"username,omitempty"`
- Password string `json:"password,omitempty"`
- Auth string `json:"auth,omitempty"`
-
- // Email is an optional value associated with the username.
- // This field is deprecated and will be removed in a later
- // version of docker.
- Email string `json:"email,omitempty"`
-
- ServerAddress string `json:"serveraddress,omitempty"`
-
- // IdentityToken is used to authenticate the user and get
- // an access token for the registry.
- IdentityToken string `json:"identitytoken,omitempty"`
-
- // RegistryToken is a bearer token to be sent to a registry
- RegistryToken string `json:"registrytoken,omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
deleted file mode 100644
index bf3463b..0000000
--- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package blkiodev // import "github.com/docker/docker/api/types/blkiodev"
-
-import "fmt"
-
-// WeightDevice is a structure that holds device:weight pair
-type WeightDevice struct {
- Path string
- Weight uint16
-}
-
-func (w *WeightDevice) String() string {
- return fmt.Sprintf("%s:%d", w.Path, w.Weight)
-}
-
-// ThrottleDevice is a structure that holds device:rate_per_second pair
-type ThrottleDevice struct {
- Path string
- Rate uint64
-}
-
-func (t *ThrottleDevice) String() string {
- return fmt.Sprintf("%s:%d", t.Path, t.Rate)
-}
diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go
deleted file mode 100644
index 9c464b7..0000000
--- a/vendor/github.com/docker/docker/api/types/client.go
+++ /dev/null
@@ -1,419 +0,0 @@
-package types // import "github.com/docker/docker/api/types"
-
-import (
- "bufio"
- "io"
- "net"
-
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/filters"
- units "github.com/docker/go-units"
-)
-
-// CheckpointCreateOptions holds parameters to create a checkpoint from a container
-type CheckpointCreateOptions struct {
- CheckpointID string
- CheckpointDir string
- Exit bool
-}
-
-// CheckpointListOptions holds parameters to list checkpoints for a container
-type CheckpointListOptions struct {
- CheckpointDir string
-}
-
-// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
-type CheckpointDeleteOptions struct {
- CheckpointID string
- CheckpointDir string
-}
-
-// ContainerAttachOptions holds parameters to attach to a container.
-type ContainerAttachOptions struct {
- Stream bool
- Stdin bool
- Stdout bool
- Stderr bool
- DetachKeys string
- Logs bool
-}
-
-// ContainerCommitOptions holds parameters to commit changes into a container.
-type ContainerCommitOptions struct {
- Reference string
- Comment string
- Author string
- Changes []string
- Pause bool
- Config *container.Config
-}
-
-// ContainerExecInspect holds information returned by exec inspect.
-type ContainerExecInspect struct {
- ExecID string `json:"ID"`
- ContainerID string
- Running bool
- ExitCode int
- Pid int
-}
-
-// ContainerListOptions holds parameters to list containers with.
-type ContainerListOptions struct {
- Quiet bool
- Size bool
- All bool
- Latest bool
- Since string
- Before string
- Limit int
- Filters filters.Args
-}
-
-// ContainerLogsOptions holds parameters to filter logs with.
-type ContainerLogsOptions struct {
- ShowStdout bool
- ShowStderr bool
- Since string
- Until string
- Timestamps bool
- Follow bool
- Tail string
- Details bool
-}
-
-// ContainerRemoveOptions holds parameters to remove containers.
-type ContainerRemoveOptions struct {
- RemoveVolumes bool
- RemoveLinks bool
- Force bool
-}
-
-// ContainerStartOptions holds parameters to start containers.
-type ContainerStartOptions struct {
- CheckpointID string
- CheckpointDir string
-}
-
-// CopyToContainerOptions holds information
-// about files to copy into a container
-type CopyToContainerOptions struct {
- AllowOverwriteDirWithFile bool
- CopyUIDGID bool
-}
-
-// EventsOptions holds parameters to filter events with.
-type EventsOptions struct {
- Since string
- Until string
- Filters filters.Args
-}
-
-// NetworkListOptions holds parameters to filter the list of networks with.
-type NetworkListOptions struct {
- Filters filters.Args
-}
-
-// HijackedResponse holds connection information for a hijacked request.
-type HijackedResponse struct {
- Conn net.Conn
- Reader *bufio.Reader
-}
-
-// Close closes the hijacked connection and reader.
-func (h *HijackedResponse) Close() {
- h.Conn.Close()
-}
-
-// CloseWriter is an interface that implements structs
-// that close input streams to prevent from writing.
-type CloseWriter interface {
- CloseWrite() error
-}
-
-// CloseWrite closes a readWriter for writing.
-func (h *HijackedResponse) CloseWrite() error {
- if conn, ok := h.Conn.(CloseWriter); ok {
- return conn.CloseWrite()
- }
- return nil
-}
-
-// ImageBuildOptions holds the information
-// necessary to build images.
-type ImageBuildOptions struct {
- Tags []string
- SuppressOutput bool
- RemoteContext string
- NoCache bool
- Remove bool
- ForceRemove bool
- PullParent bool
- Isolation container.Isolation
- CPUSetCPUs string
- CPUSetMems string
- CPUShares int64
- CPUQuota int64
- CPUPeriod int64
- Memory int64
- MemorySwap int64
- CgroupParent string
- NetworkMode string
- ShmSize int64
- Dockerfile string
- Ulimits []*units.Ulimit
- // BuildArgs needs to be a *string instead of just a string so that
- // we can tell the difference between "" (empty string) and no value
- // at all (nil). See the parsing of buildArgs in
- // api/server/router/build/build_routes.go for even more info.
- BuildArgs map[string]*string
- AuthConfigs map[string]AuthConfig
- Context io.Reader
- Labels map[string]string
- // squash the resulting image's layers to the parent
- // preserves the original image and creates a new one from the parent with all
- // the changes applied to a single layer
- Squash bool
- // CacheFrom specifies images that are used for matching cache. Images
- // specified here do not need to have a valid parent chain to match cache.
- CacheFrom []string
- SecurityOpt []string
- ExtraHosts []string // List of extra hosts
- Target string
- SessionID string
- Platform string
- // Version specifies the version of the unerlying builder to use
- Version BuilderVersion
- // BuildID is an optional identifier that can be passed together with the
- // build request. The same identifier can be used to gracefully cancel the
- // build with the cancel request.
- BuildID string
- // Outputs defines configurations for exporting build results. Only supported
- // in BuildKit mode
- Outputs []ImageBuildOutput
-}
-
-// ImageBuildOutput defines configuration for exporting a build result
-type ImageBuildOutput struct {
- Type string
- Attrs map[string]string
-}
-
-// BuilderVersion sets the version of underlying builder to use
-type BuilderVersion string
-
-const (
- // BuilderV1 is the first generation builder in docker daemon
- BuilderV1 BuilderVersion = "1"
- // BuilderBuildKit is builder based on moby/buildkit project
- BuilderBuildKit BuilderVersion = "2"
-)
-
-// ImageBuildResponse holds information
-// returned by a server after building
-// an image.
-type ImageBuildResponse struct {
- Body io.ReadCloser
- OSType string
-}
-
-// ImageCreateOptions holds information to create images.
-type ImageCreateOptions struct {
- RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
- Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
-}
-
-// ImageImportSource holds source information for ImageImport
-type ImageImportSource struct {
- Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
- SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
-}
-
-// ImageImportOptions holds information to import images from the client host.
-type ImageImportOptions struct {
- Tag string // Tag is the name to tag this image with. This attribute is deprecated.
- Message string // Message is the message to tag the image with
- Changes []string // Changes are the raw changes to apply to this image
- Platform string // Platform is the target platform of the image
-}
-
-// ImageListOptions holds parameters to filter the list of images with.
-type ImageListOptions struct {
- All bool
- Filters filters.Args
-}
-
-// ImageLoadResponse returns information to the client about a load process.
-type ImageLoadResponse struct {
- // Body must be closed to avoid a resource leak
- Body io.ReadCloser
- JSON bool
-}
-
-// ImagePullOptions holds information to pull images.
-type ImagePullOptions struct {
- All bool
- RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
- PrivilegeFunc RequestPrivilegeFunc
- Platform string
-}
-
-// RequestPrivilegeFunc is a function interface that
-// clients can supply to retry operations after
-// getting an authorization error.
-// This function returns the registry authentication
-// header value in base 64 format, or an error
-// if the privilege request fails.
-type RequestPrivilegeFunc func() (string, error)
-
-// ImagePushOptions holds information to push images.
-type ImagePushOptions ImagePullOptions
-
-// ImageRemoveOptions holds parameters to remove images.
-type ImageRemoveOptions struct {
- Force bool
- PruneChildren bool
-}
-
-// ImageSearchOptions holds parameters to search images with.
-type ImageSearchOptions struct {
- RegistryAuth string
- PrivilegeFunc RequestPrivilegeFunc
- Filters filters.Args
- Limit int
-}
-
-// ResizeOptions holds parameters to resize a tty.
-// It can be used to resize container ttys and
-// exec process ttys too.
-type ResizeOptions struct {
- Height uint
- Width uint
-}
-
-// NodeListOptions holds parameters to list nodes with.
-type NodeListOptions struct {
- Filters filters.Args
-}
-
-// NodeRemoveOptions holds parameters to remove nodes with.
-type NodeRemoveOptions struct {
- Force bool
-}
-
-// ServiceCreateOptions contains the options to use when creating a service.
-type ServiceCreateOptions struct {
- // EncodedRegistryAuth is the encoded registry authorization credentials to
- // use when updating the service.
- //
- // This field follows the format of the X-Registry-Auth header.
- EncodedRegistryAuth string
-
- // QueryRegistry indicates whether the service update requires
- // contacting a registry. A registry may be contacted to retrieve
- // the image digest and manifest, which in turn can be used to update
- // platform or other information about the service.
- QueryRegistry bool
-}
-
-// ServiceCreateResponse contains the information returned to a client
-// on the creation of a new service.
-type ServiceCreateResponse struct {
- // ID is the ID of the created service.
- ID string
- // Warnings is a set of non-fatal warning messages to pass on to the user.
- Warnings []string `json:",omitempty"`
-}
-
-// Values for RegistryAuthFrom in ServiceUpdateOptions
-const (
- RegistryAuthFromSpec = "spec"
- RegistryAuthFromPreviousSpec = "previous-spec"
-)
-
-// ServiceUpdateOptions contains the options to be used for updating services.
-type ServiceUpdateOptions struct {
- // EncodedRegistryAuth is the encoded registry authorization credentials to
- // use when updating the service.
- //
- // This field follows the format of the X-Registry-Auth header.
- EncodedRegistryAuth string
-
- // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
- // into this field. While it does open API users up to racy writes, most
- // users may not need that level of consistency in practice.
-
- // RegistryAuthFrom specifies where to find the registry authorization
- // credentials if they are not given in EncodedRegistryAuth. Valid
- // values are "spec" and "previous-spec".
- RegistryAuthFrom string
-
- // Rollback indicates whether a server-side rollback should be
- // performed. When this is set, the provided spec will be ignored.
- // The valid values are "previous" and "none". An empty value is the
- // same as "none".
- Rollback string
-
- // QueryRegistry indicates whether the service update requires
- // contacting a registry. A registry may be contacted to retrieve
- // the image digest and manifest, which in turn can be used to update
- // platform or other information about the service.
- QueryRegistry bool
-}
-
-// ServiceListOptions holds parameters to list services with.
-type ServiceListOptions struct {
- Filters filters.Args
-
- // Status indicates whether the server should include the service task
- // count of running and desired tasks.
- Status bool
-}
-
-// ServiceInspectOptions holds parameters related to the "service inspect"
-// operation.
-type ServiceInspectOptions struct {
- InsertDefaults bool
-}
-
-// TaskListOptions holds parameters to list tasks with.
-type TaskListOptions struct {
- Filters filters.Args
-}
-
-// PluginRemoveOptions holds parameters to remove plugins.
-type PluginRemoveOptions struct {
- Force bool
-}
-
-// PluginEnableOptions holds parameters to enable plugins.
-type PluginEnableOptions struct {
- Timeout int
-}
-
-// PluginDisableOptions holds parameters to disable plugins.
-type PluginDisableOptions struct {
- Force bool
-}
-
-// PluginInstallOptions holds parameters to install a plugin.
-type PluginInstallOptions struct {
- Disabled bool
- AcceptAllPermissions bool
- RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
- RemoteRef string // RemoteRef is the plugin name on the registry
- PrivilegeFunc RequestPrivilegeFunc
- AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
- Args []string
-}
-
-// SwarmUnlockKeyResponse contains the response for Engine API:
-// GET /swarm/unlockkey
-type SwarmUnlockKeyResponse struct {
- // UnlockKey is the unlock key in ASCII-armored format.
- UnlockKey string
-}
-
-// PluginCreateOptions hold all options to plugin create.
-type PluginCreateOptions struct {
- RepoName string
-}
diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go
deleted file mode 100644
index 3dd133a..0000000
--- a/vendor/github.com/docker/docker/api/types/configs.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package types // import "github.com/docker/docker/api/types"
-
-import (
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/network"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// configs holds structs used for internal communication between the
-// frontend (such as an http server) and the backend (such as the
-// docker daemon).
-
-// ContainerCreateConfig is the parameter set to ContainerCreate()
-type ContainerCreateConfig struct {
- Name string
- Config *container.Config
- HostConfig *container.HostConfig
- NetworkingConfig *network.NetworkingConfig
- Platform *specs.Platform
- AdjustCPUShares bool
-}
-
-// ContainerRmConfig holds arguments for the container remove
-// operation. This struct is used to tell the backend what operations
-// to perform.
-type ContainerRmConfig struct {
- ForceRemove, RemoveVolume, RemoveLink bool
-}
-
-// ExecConfig is a small subset of the Config struct that holds the configuration
-// for the exec feature of docker.
-type ExecConfig struct {
- User string // User that will run the command
- Privileged bool // Is the container in privileged mode
- Tty bool // Attach standard streams to a tty.
- AttachStdin bool // Attach the standard input, makes possible user interaction
- AttachStderr bool // Attach the standard error
- AttachStdout bool // Attach the standard output
- Detach bool // Execute in detach mode
- DetachKeys string // Escape keys for detach
- Env []string // Environment variables
- WorkingDir string // Working directory
- Cmd []string // Execution commands and args
-}
-
-// PluginRmConfig holds arguments for plugin remove.
-type PluginRmConfig struct {
- ForceRemove bool
-}
-
-// PluginEnableConfig holds arguments for plugin enable
-type PluginEnableConfig struct {
- Timeout int
-}
-
-// PluginDisableConfig holds arguments for plugin disable.
-type PluginDisableConfig struct {
- ForceDisable bool
-}
-
-// NetworkListConfig stores the options available for listing networks
-type NetworkListConfig struct {
- // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here
- Detailed bool
- Verbose bool
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go
deleted file mode 100644
index f767195..0000000
--- a/vendor/github.com/docker/docker/api/types/container/config.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-import (
- "time"
-
- "github.com/docker/docker/api/types/strslice"
- "github.com/docker/go-connections/nat"
-)
-
-// MinimumDuration puts a minimum on user configured duration.
-// This is to prevent API error on time unit. For example, API may
-// set 3 as healthcheck interval with intention of 3 seconds, but
-// Docker interprets it as 3 nanoseconds.
-const MinimumDuration = 1 * time.Millisecond
-
-// HealthConfig holds configuration settings for the HEALTHCHECK feature.
-type HealthConfig struct {
- // Test is the test to perform to check that the container is healthy.
- // An empty slice means to inherit the default.
- // The options are:
- // {} : inherit healthcheck
- // {"NONE"} : disable healthcheck
- // {"CMD", args...} : exec arguments directly
- // {"CMD-SHELL", command} : run command with system's default shell
- Test []string `json:",omitempty"`
-
- // Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
- StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
-
- // Retries is the number of consecutive failures needed to consider a container as unhealthy.
- // Zero means inherit.
- Retries int `json:",omitempty"`
-}
-
-// Config contains the configuration data about a container.
-// It should hold only portable information about the container.
-// Here, "portable" means "independent from the host we are running on".
-// Non-portable information *should* appear in HostConfig.
-// All fields added to this struct must be marked `omitempty` to keep getting
-// predictable hashes from the old `v1Compatibility` configuration.
-type Config struct {
- Hostname string // Hostname
- Domainname string // Domainname
- User string // User that will run the command(s) inside the container, also support user:group
- AttachStdin bool // Attach the standard input, makes possible user interaction
- AttachStdout bool // Attach the standard output
- AttachStderr bool // Attach the standard error
- ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
- Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
- OpenStdin bool // Open stdin
- StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
- Env []string // List of environment variable to set in the container
- Cmd strslice.StrSlice // Command to run when starting the container
- Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
- ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
- Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
- Volumes map[string]struct{} // List of volumes (mounts) used for the container
- WorkingDir string // Current directory (PWD) in the command will be launched
- Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
- NetworkDisabled bool `json:",omitempty"` // Is network disabled
- MacAddress string `json:",omitempty"` // Mac Address of the container
- OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
- Labels map[string]string // List of labels set to this container
- StopSignal string `json:",omitempty"` // Signal to stop a container
- StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
- Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go
deleted file mode 100644
index 16dd501..0000000
--- a/vendor/github.com/docker/docker/api/types/container/container_changes.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// ContainerChangeResponseItem change item in response to ContainerChanges operation
-// swagger:model ContainerChangeResponseItem
-type ContainerChangeResponseItem struct {
-
- // Kind of change
- // Required: true
- Kind uint8 `json:"Kind"`
-
- // Path to file that has changed
- // Required: true
- Path string `json:"Path"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go
deleted file mode 100644
index d0c852f..0000000
--- a/vendor/github.com/docker/docker/api/types/container/container_create.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// ContainerCreateCreatedBody OK response to ContainerCreate operation
-// swagger:model ContainerCreateCreatedBody
-type ContainerCreateCreatedBody struct {
-
- // The ID of the created container
- // Required: true
- ID string `json:"Id"`
-
- // Warnings encountered when creating the container
- // Required: true
- Warnings []string `json:"Warnings"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go
deleted file mode 100644
index 63381da..0000000
--- a/vendor/github.com/docker/docker/api/types/container/container_top.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// ContainerTopOKBody OK response to ContainerTop operation
-// swagger:model ContainerTopOKBody
-type ContainerTopOKBody struct {
-
- // Each process running in the container, where each is process
- // is an array of values corresponding to the titles.
- //
- // Required: true
- Processes [][]string `json:"Processes"`
-
- // The ps column titles
- // Required: true
- Titles []string `json:"Titles"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go
deleted file mode 100644
index c10f175..0000000
--- a/vendor/github.com/docker/docker/api/types/container/container_update.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// ContainerUpdateOKBody OK response to ContainerUpdate operation
-// swagger:model ContainerUpdateOKBody
-type ContainerUpdateOKBody struct {
-
- // warnings
- // Required: true
- Warnings []string `json:"Warnings"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go
deleted file mode 100644
index 49e05ae..0000000
--- a/vendor/github.com/docker/docker/api/types/container/container_wait.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// ContainerWaitOKBodyError container waiting error, if any
-// swagger:model ContainerWaitOKBodyError
-type ContainerWaitOKBodyError struct {
-
- // Details of an error
- Message string `json:"Message,omitempty"`
-}
-
-// ContainerWaitOKBody OK response to ContainerWait operation
-// swagger:model ContainerWaitOKBody
-type ContainerWaitOKBody struct {
-
- // error
- // Required: true
- Error *ContainerWaitOKBodyError `json:"Error"`
-
- // Exit code of the container
- // Required: true
- StatusCode int64 `json:"StatusCode"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go
deleted file mode 100644
index 2d1cbaa..0000000
--- a/vendor/github.com/docker/docker/api/types/container/host_config.go
+++ /dev/null
@@ -1,447 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-import (
- "strings"
-
- "github.com/docker/docker/api/types/blkiodev"
- "github.com/docker/docker/api/types/mount"
- "github.com/docker/docker/api/types/strslice"
- "github.com/docker/go-connections/nat"
- units "github.com/docker/go-units"
-)
-
-// CgroupnsMode represents the cgroup namespace mode of the container
-type CgroupnsMode string
-
-// IsPrivate indicates whether the container uses its own private cgroup namespace
-func (c CgroupnsMode) IsPrivate() bool {
- return c == "private"
-}
-
-// IsHost indicates whether the container shares the host's cgroup namespace
-func (c CgroupnsMode) IsHost() bool {
- return c == "host"
-}
-
-// IsEmpty indicates whether the container cgroup namespace mode is unset
-func (c CgroupnsMode) IsEmpty() bool {
- return c == ""
-}
-
-// Valid indicates whether the cgroup namespace mode is valid
-func (c CgroupnsMode) Valid() bool {
- return c.IsEmpty() || c.IsPrivate() || c.IsHost()
-}
-
-// Isolation represents the isolation technology of a container. The supported
-// values are platform specific
-type Isolation string
-
-// IsDefault indicates the default isolation technology of a container. On Linux this
-// is the native driver. On Windows, this is a Windows Server Container.
-func (i Isolation) IsDefault() bool {
- return strings.ToLower(string(i)) == "default" || string(i) == ""
-}
-
-// IsHyperV indicates the use of a Hyper-V partition for isolation
-func (i Isolation) IsHyperV() bool {
- return strings.ToLower(string(i)) == "hyperv"
-}
-
-// IsProcess indicates the use of process isolation
-func (i Isolation) IsProcess() bool {
- return strings.ToLower(string(i)) == "process"
-}
-
-const (
- // IsolationEmpty is unspecified (same behavior as default)
- IsolationEmpty = Isolation("")
- // IsolationDefault is the default isolation mode on current daemon
- IsolationDefault = Isolation("default")
- // IsolationProcess is process isolation mode
- IsolationProcess = Isolation("process")
- // IsolationHyperV is HyperV isolation mode
- IsolationHyperV = Isolation("hyperv")
-)
-
-// IpcMode represents the container ipc stack.
-type IpcMode string
-
-// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
-func (n IpcMode) IsPrivate() bool {
- return n == "private"
-}
-
-// IsHost indicates whether the container shares the host's ipc namespace.
-func (n IpcMode) IsHost() bool {
- return n == "host"
-}
-
-// IsShareable indicates whether the container's ipc namespace can be shared with another container.
-func (n IpcMode) IsShareable() bool {
- return n == "shareable"
-}
-
-// IsContainer indicates whether the container uses another container's ipc namespace.
-func (n IpcMode) IsContainer() bool {
- parts := strings.SplitN(string(n), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// IsNone indicates whether container IpcMode is set to "none".
-func (n IpcMode) IsNone() bool {
- return n == "none"
-}
-
-// IsEmpty indicates whether container IpcMode is empty
-func (n IpcMode) IsEmpty() bool {
- return n == ""
-}
-
-// Valid indicates whether the ipc mode is valid.
-func (n IpcMode) Valid() bool {
- return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
-}
-
-// Container returns the name of the container ipc stack is going to be used.
-func (n IpcMode) Container() string {
- parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 && parts[0] == "container" {
- return parts[1]
- }
- return ""
-}
-
-// NetworkMode represents the container network stack.
-type NetworkMode string
-
-// IsNone indicates whether container isn't using a network stack.
-func (n NetworkMode) IsNone() bool {
- return n == "none"
-}
-
-// IsDefault indicates whether container uses the default network stack.
-func (n NetworkMode) IsDefault() bool {
- return n == "default"
-}
-
-// IsPrivate indicates whether container uses its private network stack.
-func (n NetworkMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
-}
-
-// IsContainer indicates whether container uses a container network stack.
-func (n NetworkMode) IsContainer() bool {
- parts := strings.SplitN(string(n), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// ConnectedContainer is the id of the container which network this container is connected to.
-func (n NetworkMode) ConnectedContainer() string {
- parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// UserDefined indicates user-created network
-func (n NetworkMode) UserDefined() string {
- if n.IsUserDefined() {
- return string(n)
- }
- return ""
-}
-
-// UsernsMode represents userns mode in the container.
-type UsernsMode string
-
-// IsHost indicates whether the container uses the host's userns.
-func (n UsernsMode) IsHost() bool {
- return n == "host"
-}
-
-// IsPrivate indicates whether the container uses the a private userns.
-func (n UsernsMode) IsPrivate() bool {
- return !(n.IsHost())
-}
-
-// Valid indicates whether the userns is valid.
-func (n UsernsMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- default:
- return false
- }
- return true
-}
-
-// CgroupSpec represents the cgroup to use for the container.
-type CgroupSpec string
-
-// IsContainer indicates whether the container is using another container cgroup
-func (c CgroupSpec) IsContainer() bool {
- parts := strings.SplitN(string(c), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// Valid indicates whether the cgroup spec is valid.
-func (c CgroupSpec) Valid() bool {
- return c.IsContainer() || c == ""
-}
-
-// Container returns the name of the container whose cgroup will be used.
-func (c CgroupSpec) Container() string {
- parts := strings.SplitN(string(c), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// UTSMode represents the UTS namespace of the container.
-type UTSMode string
-
-// IsPrivate indicates whether the container uses its private UTS namespace.
-func (n UTSMode) IsPrivate() bool {
- return !(n.IsHost())
-}
-
-// IsHost indicates whether the container uses the host's UTS namespace.
-func (n UTSMode) IsHost() bool {
- return n == "host"
-}
-
-// Valid indicates whether the UTS namespace is valid.
-func (n UTSMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- default:
- return false
- }
- return true
-}
-
-// PidMode represents the pid namespace of the container.
-type PidMode string
-
-// IsPrivate indicates whether the container uses its own new pid namespace.
-func (n PidMode) IsPrivate() bool {
- return !(n.IsHost() || n.IsContainer())
-}
-
-// IsHost indicates whether the container uses the host's pid namespace.
-func (n PidMode) IsHost() bool {
- return n == "host"
-}
-
-// IsContainer indicates whether the container uses a container's pid namespace.
-func (n PidMode) IsContainer() bool {
- parts := strings.SplitN(string(n), ":", 2)
- return len(parts) > 1 && parts[0] == "container"
-}
-
-// Valid indicates whether the pid namespace is valid.
-func (n PidMode) Valid() bool {
- parts := strings.Split(string(n), ":")
- switch mode := parts[0]; mode {
- case "", "host":
- case "container":
- if len(parts) != 2 || parts[1] == "" {
- return false
- }
- default:
- return false
- }
- return true
-}
-
-// Container returns the name of the container whose pid namespace is going to be used.
-func (n PidMode) Container() string {
- parts := strings.SplitN(string(n), ":", 2)
- if len(parts) > 1 {
- return parts[1]
- }
- return ""
-}
-
-// DeviceRequest represents a request for devices from a device driver.
-// Used by GPU device drivers.
-type DeviceRequest struct {
- Driver string // Name of device driver
- Count int // Number of devices to request (-1 = All)
- DeviceIDs []string // List of device IDs as recognizable by the device driver
- Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
- Options map[string]string // Options to pass onto the device driver
-}
-
-// DeviceMapping represents the device mapping between the host and the container.
-type DeviceMapping struct {
- PathOnHost string
- PathInContainer string
- CgroupPermissions string
-}
-
-// RestartPolicy represents the restart policies of the container.
-type RestartPolicy struct {
- Name string
- MaximumRetryCount int
-}
-
-// IsNone indicates whether the container has the "no" restart policy.
-// This means the container will not automatically restart when exiting.
-func (rp *RestartPolicy) IsNone() bool {
- return rp.Name == "no" || rp.Name == ""
-}
-
-// IsAlways indicates whether the container has the "always" restart policy.
-// This means the container will automatically restart regardless of the exit status.
-func (rp *RestartPolicy) IsAlways() bool {
- return rp.Name == "always"
-}
-
-// IsOnFailure indicates whether the container has the "on-failure" restart policy.
-// This means the container will automatically restart of exiting with a non-zero exit status.
-func (rp *RestartPolicy) IsOnFailure() bool {
- return rp.Name == "on-failure"
-}
-
-// IsUnlessStopped indicates whether the container has the
-// "unless-stopped" restart policy. This means the container will
-// automatically restart unless user has put it to stopped state.
-func (rp *RestartPolicy) IsUnlessStopped() bool {
- return rp.Name == "unless-stopped"
-}
-
-// IsSame compares two RestartPolicy to see if they are the same
-func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
- return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
-}
-
-// LogMode is a type to define the available modes for logging
-// These modes affect how logs are handled when log messages start piling up.
-type LogMode string
-
-// Available logging modes
-const (
- LogModeUnset = ""
- LogModeBlocking LogMode = "blocking"
- LogModeNonBlock LogMode = "non-blocking"
-)
-
-// LogConfig represents the logging configuration of the container.
-type LogConfig struct {
- Type string
- Config map[string]string
-}
-
-// Resources contains container's resources (cgroups config, ulimits...)
-type Resources struct {
- // Applicable to all platforms
- CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
- Memory int64 // Memory limit (in bytes)
- NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
-
- // Applicable to UNIX platforms
- CgroupParent string // Parent cgroup.
- BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
- BlkioWeightDevice []*blkiodev.WeightDevice
- BlkioDeviceReadBps []*blkiodev.ThrottleDevice
- BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
- BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
- BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
- CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
- CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
- CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
- CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
- CpusetCpus string // CpusetCpus 0-2, 0,1
- CpusetMems string // CpusetMems 0-2, 0,1
- Devices []DeviceMapping // List of devices to map inside the container
- DeviceCgroupRules []string // List of rule to be added to the device cgroup
- DeviceRequests []DeviceRequest // List of device requests for device drivers
- KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
- KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
- MemoryReservation int64 // Memory soft limit (in bytes)
- MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
- MemorySwappiness *int64 // Tuning container memory swappiness behaviour
- OomKillDisable *bool // Whether to disable OOM Killer or not
- PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
- Ulimits []*units.Ulimit // List of ulimits to be set in the container
-
- // Applicable to Windows
- CPUCount int64 `json:"CpuCount"` // CPU count
- CPUPercent int64 `json:"CpuPercent"` // CPU percent
- IOMaximumIOps uint64 // Maximum IOps for the container system drive
- IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
-}
-
-// UpdateConfig holds the mutable attributes of a Container.
-// Those attributes can be updated at runtime.
-type UpdateConfig struct {
- // Contains container's resources (cgroups, ulimits)
- Resources
- RestartPolicy RestartPolicy
-}
-
-// HostConfig the non-portable Config structure of a container.
-// Here, "non-portable" means "dependent of the host we are running on".
-// Portable information *should* appear in Config.
-type HostConfig struct {
- // Applicable to all platforms
- Binds []string // List of volume bindings for this container
- ContainerIDFile string // File (path) where the containerId is written
- LogConfig LogConfig // Configuration of the logs for this container
- NetworkMode NetworkMode // Network mode to use for the container
- PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
- RestartPolicy RestartPolicy // Restart policy to be used for the container
- AutoRemove bool // Automatically remove container when it exits
- VolumeDriver string // Name of the volume driver used to mount volumes
- VolumesFrom []string // List of volumes to take from other container
-
- // Applicable to UNIX platforms
- CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
- CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
- CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
- DNS []string `json:"Dns"` // List of DNS server to lookup
- DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
- DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
- ExtraHosts []string // List of extra hosts
- GroupAdd []string // List of additional groups that the container process will run as
- IpcMode IpcMode // IPC namespace to use for the container
- Cgroup CgroupSpec // Cgroup to use for the container
- Links []string // List of links (in the name:alias form)
- OomScoreAdj int // Container preference for OOM-killing
- PidMode PidMode // PID namespace to use for the container
- Privileged bool // Is the container in privileged mode
- PublishAllPorts bool // Should docker publish all exposed port for the container
- ReadonlyRootfs bool // Is the container root filesystem in read-only
- SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
- StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
- Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
- UTSMode UTSMode // UTS namespace to use for the container
- UsernsMode UsernsMode // The user namespace to use for the container
- ShmSize int64 // Total shm memory usage
- Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
- Runtime string `json:",omitempty"` // Runtime to use with this container
-
- // Applicable to Windows
- ConsoleSize [2]uint // Initial console size (height,width)
- Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
-
- // Contains container's resources (cgroups, ulimits)
- Resources
-
- // Mounts specs used by the container
- Mounts []mount.Mount `json:",omitempty"`
-
- // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths)
- MaskedPaths []string
-
- // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths)
- ReadonlyPaths []string
-
- // Run a custom init inside the container, if null, use the daemon's configured settings
- Init *bool `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
deleted file mode 100644
index cf6fdf4..0000000
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// +build !windows
-
-package container // import "github.com/docker/docker/api/types/container"
-
-// IsValid indicates if an isolation technology is valid
-func (i Isolation) IsValid() bool {
- return i.IsDefault()
-}
-
-// NetworkName returns the name of the network stack.
-func (n NetworkMode) NetworkName() string {
- if n.IsBridge() {
- return "bridge"
- } else if n.IsHost() {
- return "host"
- } else if n.IsContainer() {
- return "container"
- } else if n.IsNone() {
- return "none"
- } else if n.IsDefault() {
- return "default"
- } else if n.IsUserDefined() {
- return n.UserDefined()
- }
- return ""
-}
-
-// IsBridge indicates whether container uses the bridge network stack
-func (n NetworkMode) IsBridge() bool {
- return n == "bridge"
-}
-
-// IsHost indicates whether container uses the host network stack.
-func (n NetworkMode) IsHost() bool {
- return n == "host"
-}
-
-// IsUserDefined indicates user-created network
-func (n NetworkMode) IsUserDefined() bool {
- return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
deleted file mode 100644
index 99f803a..0000000
--- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// IsBridge indicates whether container uses the bridge network stack
-// in windows it is given the name NAT
-func (n NetworkMode) IsBridge() bool {
- return n == "nat"
-}
-
-// IsHost indicates whether container uses the host network stack.
-// returns false as this is not supported by windows
-func (n NetworkMode) IsHost() bool {
- return false
-}
-
-// IsUserDefined indicates user-created network
-func (n NetworkMode) IsUserDefined() bool {
- return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
-}
-
-// IsValid indicates if an isolation technology is valid
-func (i Isolation) IsValid() bool {
- return i.IsDefault() || i.IsHyperV() || i.IsProcess()
-}
-
-// NetworkName returns the name of the network stack.
-func (n NetworkMode) NetworkName() string {
- if n.IsDefault() {
- return "default"
- } else if n.IsBridge() {
- return "nat"
- } else if n.IsNone() {
- return "none"
- } else if n.IsContainer() {
- return "container"
- } else if n.IsUserDefined() {
- return n.UserDefined()
- }
-
- return ""
-}
diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go
deleted file mode 100644
index cd8311f..0000000
--- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package container // import "github.com/docker/docker/api/types/container"
-
-// WaitCondition is a type used to specify a container state for which
-// to wait.
-type WaitCondition string
-
-// Possible WaitCondition Values.
-//
-// WaitConditionNotRunning (default) is used to wait for any of the non-running
-// states: "created", "exited", "dead", "removing", or "removed".
-//
-// WaitConditionNextExit is used to wait for the next time the state changes
-// to a non-running state. If the state is currently "created" or "exited",
-// this would cause Wait() to block until either the container runs and exits
-// or is removed.
-//
-// WaitConditionRemoved is used to wait for the container to be removed.
-const (
- WaitConditionNotRunning WaitCondition = "not-running"
- WaitConditionNextExit WaitCondition = "next-exit"
- WaitConditionRemoved WaitCondition = "removed"
-)
diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go
deleted file mode 100644
index dc942d9..0000000
--- a/vendor/github.com/docker/docker/api/types/error_response.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// ErrorResponse Represents an error.
-// swagger:model ErrorResponse
-type ErrorResponse struct {
-
- // The error message.
- // Required: true
- Message string `json:"message"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go
deleted file mode 100644
index f84f034..0000000
--- a/vendor/github.com/docker/docker/api/types/error_response_ext.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package types
-
-// Error returns the error message
-func (e ErrorResponse) Error() string {
- return e.Message
-}
diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go
deleted file mode 100644
index aa8fba8..0000000
--- a/vendor/github.com/docker/docker/api/types/events/events.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package events // import "github.com/docker/docker/api/types/events"
-
-const (
- // BuilderEventType is the event type that the builder generates
- BuilderEventType = "builder"
- // ContainerEventType is the event type that containers generate
- ContainerEventType = "container"
- // DaemonEventType is the event type that daemon generate
- DaemonEventType = "daemon"
- // ImageEventType is the event type that images generate
- ImageEventType = "image"
- // NetworkEventType is the event type that networks generate
- NetworkEventType = "network"
- // PluginEventType is the event type that plugins generate
- PluginEventType = "plugin"
- // VolumeEventType is the event type that volumes generate
- VolumeEventType = "volume"
- // ServiceEventType is the event type that services generate
- ServiceEventType = "service"
- // NodeEventType is the event type that nodes generate
- NodeEventType = "node"
- // SecretEventType is the event type that secrets generate
- SecretEventType = "secret"
- // ConfigEventType is the event type that configs generate
- ConfigEventType = "config"
-)
-
-// Actor describes something that generates events,
-// like a container, or a network, or a volume.
-// It has a defined name and a set or attributes.
-// The container attributes are its labels, other actors
-// can generate these attributes from other properties.
-type Actor struct {
- ID string
- Attributes map[string]string
-}
-
-// Message represents the information an event contains
-type Message struct {
- // Deprecated information from JSONMessage.
- // With data only in container events.
- Status string `json:"status,omitempty"`
- ID string `json:"id,omitempty"`
- From string `json:"from,omitempty"`
-
- Type string
- Action string
- Actor Actor
- // Engine events are local scope. Cluster events are swarm scope.
- Scope string `json:"scope,omitempty"`
-
- Time int64 `json:"time,omitempty"`
- TimeNano int64 `json:"timeNano,omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go
deleted file mode 100644
index 4bc91cf..0000000
--- a/vendor/github.com/docker/docker/api/types/filters/parse.go
+++ /dev/null
@@ -1,324 +0,0 @@
-/*Package filters provides tools for encoding a mapping of keys to a set of
-multiple values.
-*/
-package filters // import "github.com/docker/docker/api/types/filters"
-
-import (
- "encoding/json"
- "regexp"
- "strings"
-
- "github.com/docker/docker/api/types/versions"
-)
-
-// Args stores a mapping of keys to a set of multiple values.
-type Args struct {
- fields map[string]map[string]bool
-}
-
-// KeyValuePair are used to initialize a new Args
-type KeyValuePair struct {
- Key string
- Value string
-}
-
-// Arg creates a new KeyValuePair for initializing Args
-func Arg(key, value string) KeyValuePair {
- return KeyValuePair{Key: key, Value: value}
-}
-
-// NewArgs returns a new Args populated with the initial args
-func NewArgs(initialArgs ...KeyValuePair) Args {
- args := Args{fields: map[string]map[string]bool{}}
- for _, arg := range initialArgs {
- args.Add(arg.Key, arg.Value)
- }
- return args
-}
-
-// Keys returns all the keys in list of Args
-func (args Args) Keys() []string {
- keys := make([]string, 0, len(args.fields))
- for k := range args.fields {
- keys = append(keys, k)
- }
- return keys
-}
-
-// MarshalJSON returns a JSON byte representation of the Args
-func (args Args) MarshalJSON() ([]byte, error) {
- if len(args.fields) == 0 {
- return []byte{}, nil
- }
- return json.Marshal(args.fields)
-}
-
-// ToJSON returns the Args as a JSON encoded string
-func ToJSON(a Args) (string, error) {
- if a.Len() == 0 {
- return "", nil
- }
- buf, err := json.Marshal(a)
- return string(buf), err
-}
-
-// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
-// then the encoded format will use an older legacy format where the values are a
-// list of strings, instead of a set.
-//
-// Deprecated: do not use in any new code; use ToJSON instead
-func ToParamWithVersion(version string, a Args) (string, error) {
- if a.Len() == 0 {
- return "", nil
- }
-
- if version != "" && versions.LessThan(version, "1.22") {
- buf, err := json.Marshal(convertArgsToSlice(a.fields))
- return string(buf), err
- }
-
- return ToJSON(a)
-}
-
-// FromJSON decodes a JSON encoded string into Args
-func FromJSON(p string) (Args, error) {
- args := NewArgs()
-
- if p == "" {
- return args, nil
- }
-
- raw := []byte(p)
- err := json.Unmarshal(raw, &args)
- if err == nil {
- return args, nil
- }
-
- // Fallback to parsing arguments in the legacy slice format
- deprecated := map[string][]string{}
- if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
- return args, err
- }
-
- args.fields = deprecatedArgs(deprecated)
- return args, nil
-}
-
-// UnmarshalJSON populates the Args from JSON encode bytes
-func (args Args) UnmarshalJSON(raw []byte) error {
- if len(raw) == 0 {
- return nil
- }
- return json.Unmarshal(raw, &args.fields)
-}
-
-// Get returns the list of values associated with the key
-func (args Args) Get(key string) []string {
- values := args.fields[key]
- if values == nil {
- return make([]string, 0)
- }
- slice := make([]string, 0, len(values))
- for key := range values {
- slice = append(slice, key)
- }
- return slice
-}
-
-// Add a new value to the set of values
-func (args Args) Add(key, value string) {
- if _, ok := args.fields[key]; ok {
- args.fields[key][value] = true
- } else {
- args.fields[key] = map[string]bool{value: true}
- }
-}
-
-// Del removes a value from the set
-func (args Args) Del(key, value string) {
- if _, ok := args.fields[key]; ok {
- delete(args.fields[key], value)
- if len(args.fields[key]) == 0 {
- delete(args.fields, key)
- }
- }
-}
-
-// Len returns the number of keys in the mapping
-func (args Args) Len() int {
- return len(args.fields)
-}
-
-// MatchKVList returns true if all the pairs in sources exist as key=value
-// pairs in the mapping at key, or if there are no values at key.
-func (args Args) MatchKVList(key string, sources map[string]string) bool {
- fieldValues := args.fields[key]
-
- // do not filter if there is no filter set or cannot determine filter
- if len(fieldValues) == 0 {
- return true
- }
-
- if len(sources) == 0 {
- return false
- }
-
- for value := range fieldValues {
- testKV := strings.SplitN(value, "=", 2)
-
- v, ok := sources[testKV[0]]
- if !ok {
- return false
- }
- if len(testKV) == 2 && testKV[1] != v {
- return false
- }
- }
-
- return true
-}
-
-// Match returns true if any of the values at key match the source string
-func (args Args) Match(field, source string) bool {
- if args.ExactMatch(field, source) {
- return true
- }
-
- fieldValues := args.fields[field]
- for name2match := range fieldValues {
- match, err := regexp.MatchString(name2match, source)
- if err != nil {
- continue
- }
- if match {
- return true
- }
- }
- return false
-}
-
-// ExactMatch returns true if the source matches exactly one of the values.
-func (args Args) ExactMatch(key, source string) bool {
- fieldValues, ok := args.fields[key]
- // do not filter if there is no filter set or cannot determine filter
- if !ok || len(fieldValues) == 0 {
- return true
- }
-
- // try to match full name value to avoid O(N) regular expression matching
- return fieldValues[source]
-}
-
-// UniqueExactMatch returns true if there is only one value and the source
-// matches exactly the value.
-func (args Args) UniqueExactMatch(key, source string) bool {
- fieldValues := args.fields[key]
- // do not filter if there is no filter set or cannot determine filter
- if len(fieldValues) == 0 {
- return true
- }
- if len(args.fields[key]) != 1 {
- return false
- }
-
- // try to match full name value to avoid O(N) regular expression matching
- return fieldValues[source]
-}
-
-// FuzzyMatch returns true if the source matches exactly one value, or the
-// source has one of the values as a prefix.
-func (args Args) FuzzyMatch(key, source string) bool {
- if args.ExactMatch(key, source) {
- return true
- }
-
- fieldValues := args.fields[key]
- for prefix := range fieldValues {
- if strings.HasPrefix(source, prefix) {
- return true
- }
- }
- return false
-}
-
-// Contains returns true if the key exists in the mapping
-func (args Args) Contains(field string) bool {
- _, ok := args.fields[field]
- return ok
-}
-
-type invalidFilter string
-
-func (e invalidFilter) Error() string {
- return "Invalid filter '" + string(e) + "'"
-}
-
-func (invalidFilter) InvalidParameter() {}
-
-// Validate compared the set of accepted keys against the keys in the mapping.
-// An error is returned if any mapping keys are not in the accepted set.
-func (args Args) Validate(accepted map[string]bool) error {
- for name := range args.fields {
- if !accepted[name] {
- return invalidFilter(name)
- }
- }
- return nil
-}
-
-// WalkValues iterates over the list of values for a key in the mapping and calls
-// op() for each value. If op returns an error the iteration stops and the
-// error is returned.
-func (args Args) WalkValues(field string, op func(value string) error) error {
- if _, ok := args.fields[field]; !ok {
- return nil
- }
- for v := range args.fields[field] {
- if err := op(v); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Clone returns a copy of args.
-func (args Args) Clone() (newArgs Args) {
- newArgs.fields = make(map[string]map[string]bool, len(args.fields))
- for k, m := range args.fields {
- var mm map[string]bool
- if m != nil {
- mm = make(map[string]bool, len(m))
- for kk, v := range m {
- mm[kk] = v
- }
- }
- newArgs.fields[k] = mm
- }
- return newArgs
-}
-
-func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
- m := map[string]map[string]bool{}
- for k, v := range d {
- values := map[string]bool{}
- for _, vv := range v {
- values[vv] = true
- }
- m[k] = values
- }
- return m
-}
-
-func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
- m := map[string][]string{}
- for k, v := range f {
- values := []string{}
- for kk := range v {
- if v[kk] {
- values = append(values, kk)
- }
- }
- m[k] = values
- }
- return m
-}
diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go
deleted file mode 100644
index 4d9bf1c..0000000
--- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// GraphDriverData Information about a container's graph driver.
-// swagger:model GraphDriverData
-type GraphDriverData struct {
-
- // data
- // Required: true
- Data map[string]string `json:"Data"`
-
- // name
- // Required: true
- Name string `json:"Name"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go
deleted file mode 100644
index 7592d2f..0000000
--- a/vendor/github.com/docker/docker/api/types/id_response.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// IDResponse Response to an API call that returns just an Id
-// swagger:model IdResponse
-type IDResponse struct {
-
- // The id of the newly created object.
- // Required: true
- ID string `json:"Id"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go
deleted file mode 100644
index e302bb0..0000000
--- a/vendor/github.com/docker/docker/api/types/image/image_history.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package image // import "github.com/docker/docker/api/types/image"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// HistoryResponseItem individual image layer information in response to ImageHistory operation
-// swagger:model HistoryResponseItem
-type HistoryResponseItem struct {
-
- // comment
- // Required: true
- Comment string `json:"Comment"`
-
- // created
- // Required: true
- Created int64 `json:"Created"`
-
- // created by
- // Required: true
- CreatedBy string `json:"CreatedBy"`
-
- // Id
- // Required: true
- ID string `json:"Id"`
-
- // size
- // Required: true
- Size int64 `json:"Size"`
-
- // tags
- // Required: true
- Tags []string `json:"Tags"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
deleted file mode 100644
index b9a65a0..0000000
--- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// ImageDeleteResponseItem image delete response item
-// swagger:model ImageDeleteResponseItem
-type ImageDeleteResponseItem struct {
-
- // The image ID of an image that was deleted
- Deleted string `json:"Deleted,omitempty"`
-
- // The image ID of an image that was untagged
- Untagged string `json:"Untagged,omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go
deleted file mode 100644
index e145b3d..0000000
--- a/vendor/github.com/docker/docker/api/types/image_summary.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// ImageSummary image summary
-// swagger:model ImageSummary
-type ImageSummary struct {
-
- // containers
- // Required: true
- Containers int64 `json:"Containers"`
-
- // created
- // Required: true
- Created int64 `json:"Created"`
-
- // Id
- // Required: true
- ID string `json:"Id"`
-
- // labels
- // Required: true
- Labels map[string]string `json:"Labels"`
-
- // parent Id
- // Required: true
- ParentID string `json:"ParentId"`
-
- // repo digests
- // Required: true
- RepoDigests []string `json:"RepoDigests"`
-
- // repo tags
- // Required: true
- RepoTags []string `json:"RepoTags"`
-
- // shared size
- // Required: true
- SharedSize int64 `json:"SharedSize"`
-
- // size
- // Required: true
- Size int64 `json:"Size"`
-
- // virtual size
- // Required: true
- VirtualSize int64 `json:"VirtualSize"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go
deleted file mode 100644
index 443b8d0..0000000
--- a/vendor/github.com/docker/docker/api/types/mount/mount.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package mount // import "github.com/docker/docker/api/types/mount"
-
-import (
- "os"
-)
-
-// Type represents the type of a mount.
-type Type string
-
-// Type constants
-const (
- // TypeBind is the type for mounting host dir
- TypeBind Type = "bind"
- // TypeVolume is the type for remote storage volumes
- TypeVolume Type = "volume"
- // TypeTmpfs is the type for mounting tmpfs
- TypeTmpfs Type = "tmpfs"
- // TypeNamedPipe is the type for mounting Windows named pipes
- TypeNamedPipe Type = "npipe"
-)
-
-// Mount represents a mount (volume).
-type Mount struct {
- Type Type `json:",omitempty"`
- // Source specifies the name of the mount. Depending on mount type, this
- // may be a volume name or a host path, or even ignored.
- // Source is not supported for tmpfs (must be an empty value)
- Source string `json:",omitempty"`
- Target string `json:",omitempty"`
- ReadOnly bool `json:",omitempty"`
- Consistency Consistency `json:",omitempty"`
-
- BindOptions *BindOptions `json:",omitempty"`
- VolumeOptions *VolumeOptions `json:",omitempty"`
- TmpfsOptions *TmpfsOptions `json:",omitempty"`
-}
-
-// Propagation represents the propagation of a mount.
-type Propagation string
-
-const (
- // PropagationRPrivate RPRIVATE
- PropagationRPrivate Propagation = "rprivate"
- // PropagationPrivate PRIVATE
- PropagationPrivate Propagation = "private"
- // PropagationRShared RSHARED
- PropagationRShared Propagation = "rshared"
- // PropagationShared SHARED
- PropagationShared Propagation = "shared"
- // PropagationRSlave RSLAVE
- PropagationRSlave Propagation = "rslave"
- // PropagationSlave SLAVE
- PropagationSlave Propagation = "slave"
-)
-
-// Propagations is the list of all valid mount propagations
-var Propagations = []Propagation{
- PropagationRPrivate,
- PropagationPrivate,
- PropagationRShared,
- PropagationShared,
- PropagationRSlave,
- PropagationSlave,
-}
-
-// Consistency represents the consistency requirements of a mount.
-type Consistency string
-
-const (
- // ConsistencyFull guarantees bind mount-like consistency
- ConsistencyFull Consistency = "consistent"
- // ConsistencyCached mounts can cache read data and FS structure
- ConsistencyCached Consistency = "cached"
- // ConsistencyDelegated mounts can cache read and written data and structure
- ConsistencyDelegated Consistency = "delegated"
- // ConsistencyDefault provides "consistent" behavior unless overridden
- ConsistencyDefault Consistency = "default"
-)
-
-// BindOptions defines options specific to mounts of type "bind".
-type BindOptions struct {
- Propagation Propagation `json:",omitempty"`
- NonRecursive bool `json:",omitempty"`
-}
-
-// VolumeOptions represents the options for a mount of type volume.
-type VolumeOptions struct {
- NoCopy bool `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
- DriverConfig *Driver `json:",omitempty"`
-}
-
-// Driver represents a volume driver.
-type Driver struct {
- Name string `json:",omitempty"`
- Options map[string]string `json:",omitempty"`
-}
-
-// TmpfsOptions defines options specific to mounts of type "tmpfs".
-type TmpfsOptions struct {
- // Size sets the size of the tmpfs, in bytes.
- //
- // This will be converted to an operating system specific value
- // depending on the host. For example, on linux, it will be converted to
- // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
- // docker, uses a straight byte value.
- //
- // Percentages are not supported.
- SizeBytes int64 `json:",omitempty"`
- // Mode of the tmpfs upon creation
- Mode os.FileMode `json:",omitempty"`
-
- // TODO(stevvooe): There are several more tmpfs flags, specified in the
- // daemon, that are accepted. Only the most basic are added for now.
- //
- // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56
- //
- // var validFlags = map[string]bool{
- // "": true,
- // "size": true, X
- // "mode": true, X
- // "uid": true,
- // "gid": true,
- // "nr_inodes": true,
- // "nr_blocks": true,
- // "mpol": true,
- // }
- //
- // Some of these may be straightforward to add, but others, such as
- // uid/gid have implications in a clustered system.
-}
diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go
deleted file mode 100644
index 437b184..0000000
--- a/vendor/github.com/docker/docker/api/types/network/network.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package network // import "github.com/docker/docker/api/types/network"
-import (
- "github.com/docker/docker/api/types/filters"
-)
-
-// Address represents an IP address
-type Address struct {
- Addr string
- PrefixLen int
-}
-
-// IPAM represents IP Address Management
-type IPAM struct {
- Driver string
- Options map[string]string // Per network IPAM driver options
- Config []IPAMConfig
-}
-
-// IPAMConfig represents IPAM configurations
-type IPAMConfig struct {
- Subnet string `json:",omitempty"`
- IPRange string `json:",omitempty"`
- Gateway string `json:",omitempty"`
- AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
-}
-
-// EndpointIPAMConfig represents IPAM configurations for the endpoint
-type EndpointIPAMConfig struct {
- IPv4Address string `json:",omitempty"`
- IPv6Address string `json:",omitempty"`
- LinkLocalIPs []string `json:",omitempty"`
-}
-
-// Copy makes a copy of the endpoint ipam config
-func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
- cfgCopy := *cfg
- cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
- cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
- return &cfgCopy
-}
-
-// PeerInfo represents one peer of an overlay network
-type PeerInfo struct {
- Name string
- IP string
-}
-
-// EndpointSettings stores the network endpoint details
-type EndpointSettings struct {
- // Configurations
- IPAMConfig *EndpointIPAMConfig
- Links []string
- Aliases []string
- // Operational data
- NetworkID string
- EndpointID string
- Gateway string
- IPAddress string
- IPPrefixLen int
- IPv6Gateway string
- GlobalIPv6Address string
- GlobalIPv6PrefixLen int
- MacAddress string
- DriverOpts map[string]string
-}
-
-// Task carries the information about one backend task
-type Task struct {
- Name string
- EndpointID string
- EndpointIP string
- Info map[string]string
-}
-
-// ServiceInfo represents service parameters with the list of service's tasks
-type ServiceInfo struct {
- VIP string
- Ports []string
- LocalLBIndex int
- Tasks []Task
-}
-
-// Copy makes a deep copy of `EndpointSettings`
-func (es *EndpointSettings) Copy() *EndpointSettings {
- epCopy := *es
- if es.IPAMConfig != nil {
- epCopy.IPAMConfig = es.IPAMConfig.Copy()
- }
-
- if es.Links != nil {
- links := make([]string, 0, len(es.Links))
- epCopy.Links = append(links, es.Links...)
- }
-
- if es.Aliases != nil {
- aliases := make([]string, 0, len(es.Aliases))
- epCopy.Aliases = append(aliases, es.Aliases...)
- }
- return &epCopy
-}
-
-// NetworkingConfig represents the container's networking configuration for each of its interfaces
-// Carries the networking configs specified in the `docker run` and `docker network connect` commands
-type NetworkingConfig struct {
- EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
-}
-
-// ConfigReference specifies the source which provides a network's configuration
-type ConfigReference struct {
- Network string
-}
-
-var acceptedFilters = map[string]bool{
- "dangling": true,
- "driver": true,
- "id": true,
- "label": true,
- "name": true,
- "scope": true,
- "type": true,
-}
-
-// ValidateFilters validates the list of filter args with the available filters.
-func ValidateFilters(filter filters.Args) error {
- return filter.Validate(acceptedFilters)
-}
diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go
deleted file mode 100644
index abae48b..0000000
--- a/vendor/github.com/docker/docker/api/types/plugin.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// Plugin A plugin for the Engine API
-// swagger:model Plugin
-type Plugin struct {
-
- // config
- // Required: true
- Config PluginConfig `json:"Config"`
-
- // True if the plugin is running. False if the plugin is not running, only installed.
- // Required: true
- Enabled bool `json:"Enabled"`
-
- // Id
- ID string `json:"Id,omitempty"`
-
- // name
- // Required: true
- Name string `json:"Name"`
-
- // plugin remote reference used to push/pull the plugin
- PluginReference string `json:"PluginReference,omitempty"`
-
- // settings
- // Required: true
- Settings PluginSettings `json:"Settings"`
-}
-
-// PluginConfig The config of a plugin.
-// swagger:model PluginConfig
-type PluginConfig struct {
-
- // args
- // Required: true
- Args PluginConfigArgs `json:"Args"`
-
- // description
- // Required: true
- Description string `json:"Description"`
-
- // Docker Version used to create the plugin
- DockerVersion string `json:"DockerVersion,omitempty"`
-
- // documentation
- // Required: true
- Documentation string `json:"Documentation"`
-
- // entrypoint
- // Required: true
- Entrypoint []string `json:"Entrypoint"`
-
- // env
- // Required: true
- Env []PluginEnv `json:"Env"`
-
- // interface
- // Required: true
- Interface PluginConfigInterface `json:"Interface"`
-
- // ipc host
- // Required: true
- IpcHost bool `json:"IpcHost"`
-
- // linux
- // Required: true
- Linux PluginConfigLinux `json:"Linux"`
-
- // mounts
- // Required: true
- Mounts []PluginMount `json:"Mounts"`
-
- // network
- // Required: true
- Network PluginConfigNetwork `json:"Network"`
-
- // pid host
- // Required: true
- PidHost bool `json:"PidHost"`
-
- // propagated mount
- // Required: true
- PropagatedMount string `json:"PropagatedMount"`
-
- // user
- User PluginConfigUser `json:"User,omitempty"`
-
- // work dir
- // Required: true
- WorkDir string `json:"WorkDir"`
-
- // rootfs
- Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
-}
-
-// PluginConfigArgs plugin config args
-// swagger:model PluginConfigArgs
-type PluginConfigArgs struct {
-
- // description
- // Required: true
- Description string `json:"Description"`
-
- // name
- // Required: true
- Name string `json:"Name"`
-
- // settable
- // Required: true
- Settable []string `json:"Settable"`
-
- // value
- // Required: true
- Value []string `json:"Value"`
-}
-
-// PluginConfigInterface The interface between Docker and the plugin
-// swagger:model PluginConfigInterface
-type PluginConfigInterface struct {
-
- // Protocol to use for clients connecting to the plugin.
- ProtocolScheme string `json:"ProtocolScheme,omitempty"`
-
- // socket
- // Required: true
- Socket string `json:"Socket"`
-
- // types
- // Required: true
- Types []PluginInterfaceType `json:"Types"`
-}
-
-// PluginConfigLinux plugin config linux
-// swagger:model PluginConfigLinux
-type PluginConfigLinux struct {
-
- // allow all devices
- // Required: true
- AllowAllDevices bool `json:"AllowAllDevices"`
-
- // capabilities
- // Required: true
- Capabilities []string `json:"Capabilities"`
-
- // devices
- // Required: true
- Devices []PluginDevice `json:"Devices"`
-}
-
-// PluginConfigNetwork plugin config network
-// swagger:model PluginConfigNetwork
-type PluginConfigNetwork struct {
-
- // type
- // Required: true
- Type string `json:"Type"`
-}
-
-// PluginConfigRootfs plugin config rootfs
-// swagger:model PluginConfigRootfs
-type PluginConfigRootfs struct {
-
- // diff ids
- DiffIds []string `json:"diff_ids"`
-
- // type
- Type string `json:"type,omitempty"`
-}
-
-// PluginConfigUser plugin config user
-// swagger:model PluginConfigUser
-type PluginConfigUser struct {
-
- // g ID
- GID uint32 `json:"GID,omitempty"`
-
- // UID
- UID uint32 `json:"UID,omitempty"`
-}
-
-// PluginSettings Settings that can be modified by users.
-// swagger:model PluginSettings
-type PluginSettings struct {
-
- // args
- // Required: true
- Args []string `json:"Args"`
-
- // devices
- // Required: true
- Devices []PluginDevice `json:"Devices"`
-
- // env
- // Required: true
- Env []string `json:"Env"`
-
- // mounts
- // Required: true
- Mounts []PluginMount `json:"Mounts"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go
deleted file mode 100644
index 5699010..0000000
--- a/vendor/github.com/docker/docker/api/types/plugin_device.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// PluginDevice plugin device
-// swagger:model PluginDevice
-type PluginDevice struct {
-
- // description
- // Required: true
- Description string `json:"Description"`
-
- // name
- // Required: true
- Name string `json:"Name"`
-
- // path
- // Required: true
- Path *string `json:"Path"`
-
- // settable
- // Required: true
- Settable []string `json:"Settable"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go
deleted file mode 100644
index 32962dc..0000000
--- a/vendor/github.com/docker/docker/api/types/plugin_env.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// PluginEnv plugin env
-// swagger:model PluginEnv
-type PluginEnv struct {
-
- // description
- // Required: true
- Description string `json:"Description"`
-
- // name
- // Required: true
- Name string `json:"Name"`
-
- // settable
- // Required: true
- Settable []string `json:"Settable"`
-
- // value
- // Required: true
- Value *string `json:"Value"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
deleted file mode 100644
index c82f204..0000000
--- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// PluginInterfaceType plugin interface type
-// swagger:model PluginInterfaceType
-type PluginInterfaceType struct {
-
- // capability
- // Required: true
- Capability string `json:"Capability"`
-
- // prefix
- // Required: true
- Prefix string `json:"Prefix"`
-
- // version
- // Required: true
- Version string `json:"Version"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go
deleted file mode 100644
index 5c031cf..0000000
--- a/vendor/github.com/docker/docker/api/types/plugin_mount.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// PluginMount plugin mount
-// swagger:model PluginMount
-type PluginMount struct {
-
- // description
- // Required: true
- Description string `json:"Description"`
-
- // destination
- // Required: true
- Destination string `json:"Destination"`
-
- // name
- // Required: true
- Name string `json:"Name"`
-
- // options
- // Required: true
- Options []string `json:"Options"`
-
- // settable
- // Required: true
- Settable []string `json:"Settable"`
-
- // source
- // Required: true
- Source *string `json:"Source"`
-
- // type
- // Required: true
- Type string `json:"Type"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go
deleted file mode 100644
index 60d1fb5..0000000
--- a/vendor/github.com/docker/docker/api/types/plugin_responses.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package types // import "github.com/docker/docker/api/types"
-
-import (
- "encoding/json"
- "fmt"
- "sort"
-)
-
-// PluginsListResponse contains the response for the Engine API
-type PluginsListResponse []*Plugin
-
-// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
-func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
- versionIndex := len(p)
- prefixIndex := 0
- if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
- return fmt.Errorf("%q is not a plugin interface type", p)
- }
- p = p[1 : len(p)-1]
-loop:
- for i, b := range p {
- switch b {
- case '.':
- prefixIndex = i
- case '/':
- versionIndex = i
- break loop
- }
- }
- t.Prefix = string(p[:prefixIndex])
- t.Capability = string(p[prefixIndex+1 : versionIndex])
- if versionIndex < len(p) {
- t.Version = string(p[versionIndex+1:])
- }
- return nil
-}
-
-// MarshalJSON implements json.Marshaler for PluginInterfaceType
-func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
-}
-
-// String implements fmt.Stringer for PluginInterfaceType
-func (t PluginInterfaceType) String() string {
- return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
-}
-
-// PluginPrivilege describes a permission the user has to accept
-// upon installing a plugin.
-type PluginPrivilege struct {
- Name string
- Description string
- Value []string
-}
-
-// PluginPrivileges is a list of PluginPrivilege
-type PluginPrivileges []PluginPrivilege
-
-func (s PluginPrivileges) Len() int {
- return len(s)
-}
-
-func (s PluginPrivileges) Less(i, j int) bool {
- return s[i].Name < s[j].Name
-}
-
-func (s PluginPrivileges) Swap(i, j int) {
- sort.Strings(s[i].Value)
- sort.Strings(s[j].Value)
- s[i], s[j] = s[j], s[i]
-}
diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go
deleted file mode 100644
index d912347..0000000
--- a/vendor/github.com/docker/docker/api/types/port.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// Port An open port on a container
-// swagger:model Port
-type Port struct {
-
- // Host IP address that the container's port is mapped to
- IP string `json:"IP,omitempty"`
-
- // Port on the container
- // Required: true
- PrivatePort uint16 `json:"PrivatePort"`
-
- // Port exposed on the host
- PublicPort uint16 `json:"PublicPort,omitempty"`
-
- // type
- // Required: true
- Type string `json:"Type"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go
deleted file mode 100644
index f0a2113..0000000
--- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package registry // import "github.com/docker/docker/api/types/registry"
-
-// ----------------------------------------------------------------------------
-// DO NOT EDIT THIS FILE
-// This file was generated by `swagger generate operation`
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// AuthenticateOKBody authenticate o k body
-// swagger:model AuthenticateOKBody
-type AuthenticateOKBody struct {
-
- // An opaque token used to authenticate a user after a successful login
- // Required: true
- IdentityToken string `json:"IdentityToken"`
-
- // The status of the authentication
- // Required: true
- Status string `json:"Status"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go
deleted file mode 100644
index 53e4708..0000000
--- a/vendor/github.com/docker/docker/api/types/registry/registry.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package registry // import "github.com/docker/docker/api/types/registry"
-
-import (
- "encoding/json"
- "net"
-
- v1 "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// ServiceConfig stores daemon registry services configuration.
-type ServiceConfig struct {
- AllowNondistributableArtifactsCIDRs []*NetIPNet
- AllowNondistributableArtifactsHostnames []string
- InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
- IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
- Mirrors []string
-}
-
-// NetIPNet is the net.IPNet type, which can be marshalled and
-// unmarshalled to JSON
-type NetIPNet net.IPNet
-
-// String returns the CIDR notation of ipnet
-func (ipnet *NetIPNet) String() string {
- return (*net.IPNet)(ipnet).String()
-}
-
-// MarshalJSON returns the JSON representation of the IPNet
-func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
- return json.Marshal((*net.IPNet)(ipnet).String())
-}
-
-// UnmarshalJSON sets the IPNet from a byte array of JSON
-func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
- var ipnetStr string
- if err = json.Unmarshal(b, &ipnetStr); err == nil {
- var cidr *net.IPNet
- if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
- *ipnet = NetIPNet(*cidr)
- }
- }
- return
-}
-
-// IndexInfo contains information about a registry
-//
-// RepositoryInfo Examples:
-// {
-// "Index" : {
-// "Name" : "docker.io",
-// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
-// "Secure" : true,
-// "Official" : true,
-// },
-// "RemoteName" : "library/debian",
-// "LocalName" : "debian",
-// "CanonicalName" : "docker.io/debian"
-// "Official" : true,
-// }
-//
-// {
-// "Index" : {
-// "Name" : "127.0.0.1:5000",
-// "Mirrors" : [],
-// "Secure" : false,
-// "Official" : false,
-// },
-// "RemoteName" : "user/repo",
-// "LocalName" : "127.0.0.1:5000/user/repo",
-// "CanonicalName" : "127.0.0.1:5000/user/repo",
-// "Official" : false,
-// }
-type IndexInfo struct {
- // Name is the name of the registry, such as "docker.io"
- Name string
- // Mirrors is a list of mirrors, expressed as URIs
- Mirrors []string
- // Secure is set to false if the registry is part of the list of
- // insecure registries. Insecure registries accept HTTP and/or accept
- // HTTPS with certificates from unknown CAs.
- Secure bool
- // Official indicates whether this is an official registry
- Official bool
-}
-
-// SearchResult describes a search result returned from a registry
-type SearchResult struct {
- // StarCount indicates the number of stars this repository has
- StarCount int `json:"star_count"`
- // IsOfficial is true if the result is from an official repository.
- IsOfficial bool `json:"is_official"`
- // Name is the name of the repository
- Name string `json:"name"`
- // IsAutomated indicates whether the result is automated
- IsAutomated bool `json:"is_automated"`
- // Description is a textual description of the repository
- Description string `json:"description"`
-}
-
-// SearchResults lists a collection search results returned from a registry
-type SearchResults struct {
- // Query contains the query string that generated the search results
- Query string `json:"query"`
- // NumResults indicates the number of results the query returned
- NumResults int `json:"num_results"`
- // Results is a slice containing the actual results for the search
- Results []SearchResult `json:"results"`
-}
-
-// DistributionInspect describes the result obtained from contacting the
-// registry to retrieve image metadata
-type DistributionInspect struct {
- // Descriptor contains information about the manifest, including
- // the content addressable digest
- Descriptor v1.Descriptor
- // Platforms contains the list of platforms supported by the image,
- // obtained by parsing the manifest
- Platforms []v1.Platform
-}
diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go
deleted file mode 100644
index 74ea64b..0000000
--- a/vendor/github.com/docker/docker/api/types/service_update_response.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// ServiceUpdateResponse service update response
-// swagger:model ServiceUpdateResponse
-type ServiceUpdateResponse struct {
-
- // Optional warning messages
- Warnings []string `json:"Warnings"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go
deleted file mode 100644
index 20daebe..0000000
--- a/vendor/github.com/docker/docker/api/types/stats.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Package types is used for API stability in the types and response to the
-// consumers of the API stats endpoint.
-package types // import "github.com/docker/docker/api/types"
-
-import "time"
-
-// ThrottlingData stores CPU throttling stats of one running container.
-// Not used on Windows.
-type ThrottlingData struct {
- // Number of periods with throttling active
- Periods uint64 `json:"periods"`
- // Number of periods when the container hits its throttling limit.
- ThrottledPeriods uint64 `json:"throttled_periods"`
- // Aggregate time the container was throttled for in nanoseconds.
- ThrottledTime uint64 `json:"throttled_time"`
-}
-
-// CPUUsage stores All CPU stats aggregated since container inception.
-type CPUUsage struct {
- // Total CPU time consumed.
- // Units: nanoseconds (Linux)
- // Units: 100's of nanoseconds (Windows)
- TotalUsage uint64 `json:"total_usage"`
-
- // Total CPU time consumed per core (Linux). Not used on Windows.
- // Units: nanoseconds.
- PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
-
- // Time spent by tasks of the cgroup in kernel mode (Linux).
- // Time spent by all container processes in kernel mode (Windows).
- // Units: nanoseconds (Linux).
- // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
- UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
-
- // Time spent by tasks of the cgroup in user mode (Linux).
- // Time spent by all container processes in user mode (Windows).
- // Units: nanoseconds (Linux).
- // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
- UsageInUsermode uint64 `json:"usage_in_usermode"`
-}
-
-// CPUStats aggregates and wraps all CPU related info of container
-type CPUStats struct {
- // CPU Usage. Linux and Windows.
- CPUUsage CPUUsage `json:"cpu_usage"`
-
- // System Usage. Linux only.
- SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
-
- // Online CPUs. Linux only.
- OnlineCPUs uint32 `json:"online_cpus,omitempty"`
-
- // Throttling Data. Linux only.
- ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
-}
-
-// MemoryStats aggregates all memory stats since container inception on Linux.
-// Windows returns stats for commit and private working set only.
-type MemoryStats struct {
- // Linux Memory Stats
-
- // current res_counter usage for memory
- Usage uint64 `json:"usage,omitempty"`
- // maximum usage ever recorded.
- MaxUsage uint64 `json:"max_usage,omitempty"`
- // TODO(vishh): Export these as stronger types.
- // all the stats exported via memory.stat.
- Stats map[string]uint64 `json:"stats,omitempty"`
- // number of times memory usage hits limits.
- Failcnt uint64 `json:"failcnt,omitempty"`
- Limit uint64 `json:"limit,omitempty"`
-
- // Windows Memory Stats
- // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
-
- // committed bytes
- Commit uint64 `json:"commitbytes,omitempty"`
- // peak committed bytes
- CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
- // private working set
- PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
-}
-
-// BlkioStatEntry is one small entity to store a piece of Blkio stats
-// Not used on Windows.
-type BlkioStatEntry struct {
- Major uint64 `json:"major"`
- Minor uint64 `json:"minor"`
- Op string `json:"op"`
- Value uint64 `json:"value"`
-}
-
-// BlkioStats stores All IO service stats for data read and write.
-// This is a Linux specific structure as the differences between expressing
-// block I/O on Windows and Linux are sufficiently significant to make
-// little sense attempting to morph into a combined structure.
-type BlkioStats struct {
- // number of bytes transferred to and from the block device
- IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
- IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
- IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
- IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
- IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
- IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
- IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
- SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
-}
-
-// StorageStats is the disk I/O stats for read/write on Windows.
-type StorageStats struct {
- ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
- ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
- WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
- WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
-}
-
-// NetworkStats aggregates the network stats of one container
-type NetworkStats struct {
- // Bytes received. Windows and Linux.
- RxBytes uint64 `json:"rx_bytes"`
- // Packets received. Windows and Linux.
- RxPackets uint64 `json:"rx_packets"`
- // Received errors. Not used on Windows. Note that we don't `omitempty` this
- // field as it is expected in the >=v1.21 API stats structure.
- RxErrors uint64 `json:"rx_errors"`
- // Incoming packets dropped. Windows and Linux.
- RxDropped uint64 `json:"rx_dropped"`
- // Bytes sent. Windows and Linux.
- TxBytes uint64 `json:"tx_bytes"`
- // Packets sent. Windows and Linux.
- TxPackets uint64 `json:"tx_packets"`
- // Sent errors. Not used on Windows. Note that we don't `omitempty` this
- // field as it is expected in the >=v1.21 API stats structure.
- TxErrors uint64 `json:"tx_errors"`
- // Outgoing packets dropped. Windows and Linux.
- TxDropped uint64 `json:"tx_dropped"`
- // Endpoint ID. Not used on Linux.
- EndpointID string `json:"endpoint_id,omitempty"`
- // Instance ID. Not used on Linux.
- InstanceID string `json:"instance_id,omitempty"`
-}
-
-// PidsStats contains the stats of a container's pids
-type PidsStats struct {
- // Current is the number of pids in the cgroup
- Current uint64 `json:"current,omitempty"`
- // Limit is the hard limit on the number of pids in the cgroup.
- // A "Limit" of 0 means that there is no limit.
- Limit uint64 `json:"limit,omitempty"`
-}
-
-// Stats is Ultimate struct aggregating all types of stats of one container
-type Stats struct {
- // Common stats
- Read time.Time `json:"read"`
- PreRead time.Time `json:"preread"`
-
- // Linux specific stats, not populated on Windows.
- PidsStats PidsStats `json:"pids_stats,omitempty"`
- BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
-
- // Windows specific stats, not populated on Linux.
- NumProcs uint32 `json:"num_procs"`
- StorageStats StorageStats `json:"storage_stats,omitempty"`
-
- // Shared stats
- CPUStats CPUStats `json:"cpu_stats,omitempty"`
- PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
- MemoryStats MemoryStats `json:"memory_stats,omitempty"`
-}
-
-// StatsJSON is newly used Networks
-type StatsJSON struct {
- Stats
-
- Name string `json:"name,omitempty"`
- ID string `json:"id,omitempty"`
-
- // Networks request version >=1.21
- Networks map[string]NetworkStats `json:"networks,omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go
deleted file mode 100644
index 82921ce..0000000
--- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package strslice // import "github.com/docker/docker/api/types/strslice"
-
-import "encoding/json"
-
-// StrSlice represents a string or an array of strings.
-// We need to override the json decoder to accept both options.
-type StrSlice []string
-
-// UnmarshalJSON decodes the byte slice whether it's a string or an array of
-// strings. This method is needed to implement json.Unmarshaler.
-func (e *StrSlice) UnmarshalJSON(b []byte) error {
- if len(b) == 0 {
- // With no input, we preserve the existing value by returning nil and
- // leaving the target alone. This allows defining default values for
- // the type.
- return nil
- }
-
- p := make([]string, 0, 1)
- if err := json.Unmarshal(b, &p); err != nil {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
- return err
- }
- p = append(p, s)
- }
-
- *e = p
- return nil
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go
deleted file mode 100644
index ef020f4..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/common.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import "time"
-
-// Version represents the internal object version.
-type Version struct {
- Index uint64 `json:",omitempty"`
-}
-
-// Meta is a base object inherited by most of the other once.
-type Meta struct {
- Version Version `json:",omitempty"`
- CreatedAt time.Time `json:",omitempty"`
- UpdatedAt time.Time `json:",omitempty"`
-}
-
-// Annotations represents how to describe an object.
-type Annotations struct {
- Name string `json:",omitempty"`
- Labels map[string]string `json:"Labels"`
-}
-
-// Driver represents a driver (network, logging, secrets backend).
-type Driver struct {
- Name string `json:",omitempty"`
- Options map[string]string `json:",omitempty"`
-}
-
-// TLSInfo represents the TLS information about what CA certificate is trusted,
-// and who the issuer for a TLS certificate is
-type TLSInfo struct {
- // TrustRoot is the trusted CA root certificate in PEM format
- TrustRoot string `json:",omitempty"`
-
- // CertIssuer is the raw subject bytes of the issuer
- CertIssuerSubject []byte `json:",omitempty"`
-
- // CertIssuerPublicKey is the raw public key bytes of the issuer
- CertIssuerPublicKey []byte `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go
deleted file mode 100644
index 16202cc..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/config.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import "os"
-
-// Config represents a config.
-type Config struct {
- ID string
- Meta
- Spec ConfigSpec
-}
-
-// ConfigSpec represents a config specification from a config in swarm
-type ConfigSpec struct {
- Annotations
- Data []byte `json:",omitempty"`
-
- // Templating controls whether and how to evaluate the config payload as
- // a template. If it is not set, no templating is used.
- Templating *Driver `json:",omitempty"`
-}
-
-// ConfigReferenceFileTarget is a file target in a config reference
-type ConfigReferenceFileTarget struct {
- Name string
- UID string
- GID string
- Mode os.FileMode
-}
-
-// ConfigReferenceRuntimeTarget is a target for a config specifying that it
-// isn't mounted into the container but instead has some other purpose.
-type ConfigReferenceRuntimeTarget struct{}
-
-// ConfigReference is a reference to a config in swarm
-type ConfigReference struct {
- File *ConfigReferenceFileTarget `json:",omitempty"`
- Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"`
- ConfigID string
- ConfigName string
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go
deleted file mode 100644
index af5e1c0..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/container.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import (
- "time"
-
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/mount"
- "github.com/docker/go-units"
-)
-
-// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
-// Detailed documentation is available in:
-// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
-// `nameserver`, `search`, `options` have been supported.
-// TODO: `domain` is not supported yet.
-type DNSConfig struct {
- // Nameservers specifies the IP addresses of the name servers
- Nameservers []string `json:",omitempty"`
- // Search specifies the search list for host-name lookup
- Search []string `json:",omitempty"`
- // Options allows certain internal resolver variables to be modified
- Options []string `json:",omitempty"`
-}
-
-// SELinuxContext contains the SELinux labels of the container.
-type SELinuxContext struct {
- Disable bool
-
- User string
- Role string
- Type string
- Level string
-}
-
-// CredentialSpec for managed service account (Windows only)
-type CredentialSpec struct {
- Config string
- File string
- Registry string
-}
-
-// Privileges defines the security options for the container.
-type Privileges struct {
- CredentialSpec *CredentialSpec
- SELinuxContext *SELinuxContext
-}
-
-// ContainerSpec represents the spec of a container.
-type ContainerSpec struct {
- Image string `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
- Command []string `json:",omitempty"`
- Args []string `json:",omitempty"`
- Hostname string `json:",omitempty"`
- Env []string `json:",omitempty"`
- Dir string `json:",omitempty"`
- User string `json:",omitempty"`
- Groups []string `json:",omitempty"`
- Privileges *Privileges `json:",omitempty"`
- Init *bool `json:",omitempty"`
- StopSignal string `json:",omitempty"`
- TTY bool `json:",omitempty"`
- OpenStdin bool `json:",omitempty"`
- ReadOnly bool `json:",omitempty"`
- Mounts []mount.Mount `json:",omitempty"`
- StopGracePeriod *time.Duration `json:",omitempty"`
- Healthcheck *container.HealthConfig `json:",omitempty"`
- // The format of extra hosts on swarmkit is specified in:
- // http://man7.org/linux/man-pages/man5/hosts.5.html
- // IP_address canonical_hostname [aliases...]
- Hosts []string `json:",omitempty"`
- DNSConfig *DNSConfig `json:",omitempty"`
- Secrets []*SecretReference `json:",omitempty"`
- Configs []*ConfigReference `json:",omitempty"`
- Isolation container.Isolation `json:",omitempty"`
- Sysctls map[string]string `json:",omitempty"`
- CapabilityAdd []string `json:",omitempty"`
- CapabilityDrop []string `json:",omitempty"`
- Ulimits []*units.Ulimit `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go
deleted file mode 100644
index 98ef328..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/network.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import (
- "github.com/docker/docker/api/types/network"
-)
-
-// Endpoint represents an endpoint.
-type Endpoint struct {
- Spec EndpointSpec `json:",omitempty"`
- Ports []PortConfig `json:",omitempty"`
- VirtualIPs []EndpointVirtualIP `json:",omitempty"`
-}
-
-// EndpointSpec represents the spec of an endpoint.
-type EndpointSpec struct {
- Mode ResolutionMode `json:",omitempty"`
- Ports []PortConfig `json:",omitempty"`
-}
-
-// ResolutionMode represents a resolution mode.
-type ResolutionMode string
-
-const (
- // ResolutionModeVIP VIP
- ResolutionModeVIP ResolutionMode = "vip"
- // ResolutionModeDNSRR DNSRR
- ResolutionModeDNSRR ResolutionMode = "dnsrr"
-)
-
-// PortConfig represents the config of a port.
-type PortConfig struct {
- Name string `json:",omitempty"`
- Protocol PortConfigProtocol `json:",omitempty"`
- // TargetPort is the port inside the container
- TargetPort uint32 `json:",omitempty"`
- // PublishedPort is the port on the swarm hosts
- PublishedPort uint32 `json:",omitempty"`
- // PublishMode is the mode in which port is published
- PublishMode PortConfigPublishMode `json:",omitempty"`
-}
-
-// PortConfigPublishMode represents the mode in which the port is to
-// be published.
-type PortConfigPublishMode string
-
-const (
- // PortConfigPublishModeIngress is used for ports published
- // for ingress load balancing using routing mesh.
- PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
- // PortConfigPublishModeHost is used for ports published
- // for direct host level access on the host where the task is running.
- PortConfigPublishModeHost PortConfigPublishMode = "host"
-)
-
-// PortConfigProtocol represents the protocol of a port.
-type PortConfigProtocol string
-
-const (
- // TODO(stevvooe): These should be used generally, not just for PortConfig.
-
- // PortConfigProtocolTCP TCP
- PortConfigProtocolTCP PortConfigProtocol = "tcp"
- // PortConfigProtocolUDP UDP
- PortConfigProtocolUDP PortConfigProtocol = "udp"
- // PortConfigProtocolSCTP SCTP
- PortConfigProtocolSCTP PortConfigProtocol = "sctp"
-)
-
-// EndpointVirtualIP represents the virtual ip of a port.
-type EndpointVirtualIP struct {
- NetworkID string `json:",omitempty"`
- Addr string `json:",omitempty"`
-}
-
-// Network represents a network.
-type Network struct {
- ID string
- Meta
- Spec NetworkSpec `json:",omitempty"`
- DriverState Driver `json:",omitempty"`
- IPAMOptions *IPAMOptions `json:",omitempty"`
-}
-
-// NetworkSpec represents the spec of a network.
-type NetworkSpec struct {
- Annotations
- DriverConfiguration *Driver `json:",omitempty"`
- IPv6Enabled bool `json:",omitempty"`
- Internal bool `json:",omitempty"`
- Attachable bool `json:",omitempty"`
- Ingress bool `json:",omitempty"`
- IPAMOptions *IPAMOptions `json:",omitempty"`
- ConfigFrom *network.ConfigReference `json:",omitempty"`
- Scope string `json:",omitempty"`
-}
-
-// NetworkAttachmentConfig represents the configuration of a network attachment.
-type NetworkAttachmentConfig struct {
- Target string `json:",omitempty"`
- Aliases []string `json:",omitempty"`
- DriverOpts map[string]string `json:",omitempty"`
-}
-
-// NetworkAttachment represents a network attachment.
-type NetworkAttachment struct {
- Network Network `json:",omitempty"`
- Addresses []string `json:",omitempty"`
-}
-
-// IPAMOptions represents ipam options.
-type IPAMOptions struct {
- Driver Driver `json:",omitempty"`
- Configs []IPAMConfig `json:",omitempty"`
-}
-
-// IPAMConfig represents ipam configuration.
-type IPAMConfig struct {
- Subnet string `json:",omitempty"`
- Range string `json:",omitempty"`
- Gateway string `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go
deleted file mode 100644
index 1e30f5f..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/node.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-// Node represents a node.
-type Node struct {
- ID string
- Meta
- // Spec defines the desired state of the node as specified by the user.
- // The system will honor this and will *never* modify it.
- Spec NodeSpec `json:",omitempty"`
- // Description encapsulates the properties of the Node as reported by the
- // agent.
- Description NodeDescription `json:",omitempty"`
- // Status provides the current status of the node, as seen by the manager.
- Status NodeStatus `json:",omitempty"`
- // ManagerStatus provides the current status of the node's manager
- // component, if the node is a manager.
- ManagerStatus *ManagerStatus `json:",omitempty"`
-}
-
-// NodeSpec represents the spec of a node.
-type NodeSpec struct {
- Annotations
- Role NodeRole `json:",omitempty"`
- Availability NodeAvailability `json:",omitempty"`
-}
-
-// NodeRole represents the role of a node.
-type NodeRole string
-
-const (
- // NodeRoleWorker WORKER
- NodeRoleWorker NodeRole = "worker"
- // NodeRoleManager MANAGER
- NodeRoleManager NodeRole = "manager"
-)
-
-// NodeAvailability represents the availability of a node.
-type NodeAvailability string
-
-const (
- // NodeAvailabilityActive ACTIVE
- NodeAvailabilityActive NodeAvailability = "active"
- // NodeAvailabilityPause PAUSE
- NodeAvailabilityPause NodeAvailability = "pause"
- // NodeAvailabilityDrain DRAIN
- NodeAvailabilityDrain NodeAvailability = "drain"
-)
-
-// NodeDescription represents the description of a node.
-type NodeDescription struct {
- Hostname string `json:",omitempty"`
- Platform Platform `json:",omitempty"`
- Resources Resources `json:",omitempty"`
- Engine EngineDescription `json:",omitempty"`
- TLSInfo TLSInfo `json:",omitempty"`
-}
-
-// Platform represents the platform (Arch/OS).
-type Platform struct {
- Architecture string `json:",omitempty"`
- OS string `json:",omitempty"`
-}
-
-// EngineDescription represents the description of an engine.
-type EngineDescription struct {
- EngineVersion string `json:",omitempty"`
- Labels map[string]string `json:",omitempty"`
- Plugins []PluginDescription `json:",omitempty"`
-}
-
-// PluginDescription represents the description of an engine plugin.
-type PluginDescription struct {
- Type string `json:",omitempty"`
- Name string `json:",omitempty"`
-}
-
-// NodeStatus represents the status of a node.
-type NodeStatus struct {
- State NodeState `json:",omitempty"`
- Message string `json:",omitempty"`
- Addr string `json:",omitempty"`
-}
-
-// Reachability represents the reachability of a node.
-type Reachability string
-
-const (
- // ReachabilityUnknown UNKNOWN
- ReachabilityUnknown Reachability = "unknown"
- // ReachabilityUnreachable UNREACHABLE
- ReachabilityUnreachable Reachability = "unreachable"
- // ReachabilityReachable REACHABLE
- ReachabilityReachable Reachability = "reachable"
-)
-
-// ManagerStatus represents the status of a manager.
-type ManagerStatus struct {
- Leader bool `json:",omitempty"`
- Reachability Reachability `json:",omitempty"`
- Addr string `json:",omitempty"`
-}
-
-// NodeState represents the state of a node.
-type NodeState string
-
-const (
- // NodeStateUnknown UNKNOWN
- NodeStateUnknown NodeState = "unknown"
- // NodeStateDown DOWN
- NodeStateDown NodeState = "down"
- // NodeStateReady READY
- NodeStateReady NodeState = "ready"
- // NodeStateDisconnected DISCONNECTED
- NodeStateDisconnected NodeState = "disconnected"
-)
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go
deleted file mode 100644
index 0c77403..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-// RuntimeType is the type of runtime used for the TaskSpec
-type RuntimeType string
-
-// RuntimeURL is the proto type url
-type RuntimeURL string
-
-const (
- // RuntimeContainer is the container based runtime
- RuntimeContainer RuntimeType = "container"
- // RuntimePlugin is the plugin based runtime
- RuntimePlugin RuntimeType = "plugin"
- // RuntimeNetworkAttachment is the network attachment runtime
- RuntimeNetworkAttachment RuntimeType = "attachment"
-
- // RuntimeURLContainer is the proto url for the container type
- RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer"
- // RuntimeURLPlugin is the proto url for the plugin type
- RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin"
-)
-
-// NetworkAttachmentSpec represents the runtime spec type for network
-// attachment tasks
-type NetworkAttachmentSpec struct {
- ContainerID string
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
deleted file mode 100644
index 98c2806..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
+++ /dev/null
@@ -1,3 +0,0 @@
-//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
-
-package runtime // import "github.com/docker/docker/api/types/swarm/runtime"
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
deleted file mode 100644
index e450458..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
+++ /dev/null
@@ -1,754 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: plugin.proto
-
-/*
- Package runtime is a generated protocol buffer package.
-
- It is generated from these files:
- plugin.proto
-
- It has these top-level messages:
- PluginSpec
- PluginPrivilege
-*/
-package runtime
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-
-// PluginSpec defines the base payload which clients can specify for creating
-// a service with the plugin runtime.
-type PluginSpec struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
- Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
- Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
- Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
-}
-
-func (m *PluginSpec) Reset() { *m = PluginSpec{} }
-func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
-func (*PluginSpec) ProtoMessage() {}
-func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
-
-func (m *PluginSpec) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *PluginSpec) GetRemote() string {
- if m != nil {
- return m.Remote
- }
- return ""
-}
-
-func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
- if m != nil {
- return m.Privileges
- }
- return nil
-}
-
-func (m *PluginSpec) GetDisabled() bool {
- if m != nil {
- return m.Disabled
- }
- return false
-}
-
-func (m *PluginSpec) GetEnv() []string {
- if m != nil {
- return m.Env
- }
- return nil
-}
-
-// PluginPrivilege describes a permission the user has to accept
-// upon installing a plugin.
-type PluginPrivilege struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
- Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
-}
-
-func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
-func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
-func (*PluginPrivilege) ProtoMessage() {}
-func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
-
-func (m *PluginPrivilege) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *PluginPrivilege) GetDescription() string {
- if m != nil {
- return m.Description
- }
- return ""
-}
-
-func (m *PluginPrivilege) GetValue() []string {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
- proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
-}
-func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if len(m.Remote) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
- i += copy(dAtA[i:], m.Remote)
- }
- if len(m.Privileges) > 0 {
- for _, msg := range m.Privileges {
- dAtA[i] = 0x1a
- i++
- i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(dAtA[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.Disabled {
- dAtA[i] = 0x20
- i++
- if m.Disabled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
- }
- if len(m.Env) > 0 {
- for _, s := range m.Env {
- dAtA[i] = 0x2a
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
- }
- return i, nil
-}
-
-func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Name) > 0 {
- dAtA[i] = 0xa
- i++
- i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
- i += copy(dAtA[i:], m.Name)
- }
- if len(m.Description) > 0 {
- dAtA[i] = 0x12
- i++
- i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
- i += copy(dAtA[i:], m.Description)
- }
- if len(m.Value) > 0 {
- for _, s := range m.Value {
- dAtA[i] = 0x1a
- i++
- l = len(s)
- for l >= 1<<7 {
- dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
- l >>= 7
- i++
- }
- dAtA[i] = uint8(l)
- i++
- i += copy(dAtA[i:], s)
- }
- }
- return i, nil
-}
-
-func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return offset + 1
-}
-func (m *PluginSpec) Size() (n int) {
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovPlugin(uint64(l))
- }
- l = len(m.Remote)
- if l > 0 {
- n += 1 + l + sovPlugin(uint64(l))
- }
- if len(m.Privileges) > 0 {
- for _, e := range m.Privileges {
- l = e.Size()
- n += 1 + l + sovPlugin(uint64(l))
- }
- }
- if m.Disabled {
- n += 2
- }
- if len(m.Env) > 0 {
- for _, s := range m.Env {
- l = len(s)
- n += 1 + l + sovPlugin(uint64(l))
- }
- }
- return n
-}
-
-func (m *PluginPrivilege) Size() (n int) {
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovPlugin(uint64(l))
- }
- l = len(m.Description)
- if l > 0 {
- n += 1 + l + sovPlugin(uint64(l))
- }
- if len(m.Value) > 0 {
- for _, s := range m.Value {
- l = len(s)
- n += 1 + l + sovPlugin(uint64(l))
- }
- }
- return n
-}
-
-func sovPlugin(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozPlugin(x uint64) (n int) {
- return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *PluginSpec) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Remote = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Privileges = append(m.Privileges, &PluginPrivilege{})
- if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Disabled = bool(v != 0)
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Env = append(m.Env, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPlugin(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPlugin
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Description = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthPlugin
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipPlugin(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthPlugin
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipPlugin(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthPlugin
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowPlugin
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipPlugin(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
-)
-
-func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
-
-var fileDescriptorPlugin = []byte{
- // 256 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30,
- 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a,
- 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17,
- 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64,
- 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e,
- 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64,
- 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4,
- 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec,
- 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9,
- 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9,
- 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6,
- 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb,
- 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8,
- 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb,
- 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38,
- 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
deleted file mode 100644
index 9ef1690..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto
+++ /dev/null
@@ -1,21 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime";
-
-// PluginSpec defines the base payload which clients can specify for creating
-// a service with the plugin runtime.
-message PluginSpec {
- string name = 1;
- string remote = 2;
- repeated PluginPrivilege privileges = 3;
- bool disabled = 4;
- repeated string env = 5;
-}
-
-// PluginPrivilege describes a permission the user has to accept
-// upon installing a plugin.
-message PluginPrivilege {
- string name = 1;
- string description = 2;
- repeated string value = 3;
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go
deleted file mode 100644
index d5213ec..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/secret.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import "os"
-
-// Secret represents a secret.
-type Secret struct {
- ID string
- Meta
- Spec SecretSpec
-}
-
-// SecretSpec represents a secret specification from a secret in swarm
-type SecretSpec struct {
- Annotations
- Data []byte `json:",omitempty"`
- Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
-
- // Templating controls whether and how to evaluate the secret payload as
- // a template. If it is not set, no templating is used.
- Templating *Driver `json:",omitempty"`
-}
-
-// SecretReferenceFileTarget is a file target in a secret reference
-type SecretReferenceFileTarget struct {
- Name string
- UID string
- GID string
- Mode os.FileMode
-}
-
-// SecretReference is a reference to a secret in swarm
-type SecretReference struct {
- File *SecretReferenceFileTarget
- SecretID string
- SecretName string
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go
deleted file mode 100644
index 6eb452d..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/service.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import "time"
-
-// Service represents a service.
-type Service struct {
- ID string
- Meta
- Spec ServiceSpec `json:",omitempty"`
- PreviousSpec *ServiceSpec `json:",omitempty"`
- Endpoint Endpoint `json:",omitempty"`
- UpdateStatus *UpdateStatus `json:",omitempty"`
-
- // ServiceStatus is an optional, extra field indicating the number of
- // desired and running tasks. It is provided primarily as a shortcut to
- // calculating these values client-side, which otherwise would require
- // listing all tasks for a service, an operation that could be
- // computation and network expensive.
- ServiceStatus *ServiceStatus `json:",omitempty"`
-
- // JobStatus is the status of a Service which is in one of ReplicatedJob or
- // GlobalJob modes. It is absent on Replicated and Global services.
- JobStatus *JobStatus `json:",omitempty"`
-}
-
-// ServiceSpec represents the spec of a service.
-type ServiceSpec struct {
- Annotations
-
- // TaskTemplate defines how the service should construct new tasks when
- // orchestrating this service.
- TaskTemplate TaskSpec `json:",omitempty"`
- Mode ServiceMode `json:",omitempty"`
- UpdateConfig *UpdateConfig `json:",omitempty"`
- RollbackConfig *UpdateConfig `json:",omitempty"`
-
- // Networks field in ServiceSpec is deprecated. The
- // same field in TaskSpec should be used instead.
- // This field will be removed in a future release.
- Networks []NetworkAttachmentConfig `json:",omitempty"`
- EndpointSpec *EndpointSpec `json:",omitempty"`
-}
-
-// ServiceMode represents the mode of a service.
-type ServiceMode struct {
- Replicated *ReplicatedService `json:",omitempty"`
- Global *GlobalService `json:",omitempty"`
- ReplicatedJob *ReplicatedJob `json:",omitempty"`
- GlobalJob *GlobalJob `json:",omitempty"`
-}
-
-// UpdateState is the state of a service update.
-type UpdateState string
-
-const (
- // UpdateStateUpdating is the updating state.
- UpdateStateUpdating UpdateState = "updating"
- // UpdateStatePaused is the paused state.
- UpdateStatePaused UpdateState = "paused"
- // UpdateStateCompleted is the completed state.
- UpdateStateCompleted UpdateState = "completed"
- // UpdateStateRollbackStarted is the state with a rollback in progress.
- UpdateStateRollbackStarted UpdateState = "rollback_started"
- // UpdateStateRollbackPaused is the state with a rollback in progress.
- UpdateStateRollbackPaused UpdateState = "rollback_paused"
- // UpdateStateRollbackCompleted is the state with a rollback in progress.
- UpdateStateRollbackCompleted UpdateState = "rollback_completed"
-)
-
-// UpdateStatus reports the status of a service update.
-type UpdateStatus struct {
- State UpdateState `json:",omitempty"`
- StartedAt *time.Time `json:",omitempty"`
- CompletedAt *time.Time `json:",omitempty"`
- Message string `json:",omitempty"`
-}
-
-// ReplicatedService is a kind of ServiceMode.
-type ReplicatedService struct {
- Replicas *uint64 `json:",omitempty"`
-}
-
-// GlobalService is a kind of ServiceMode.
-type GlobalService struct{}
-
-// ReplicatedJob is the a type of Service which executes a defined Tasks
-// in parallel until the specified number of Tasks have succeeded.
-type ReplicatedJob struct {
- // MaxConcurrent indicates the maximum number of Tasks that should be
- // executing simultaneously for this job at any given time. There may be
- // fewer Tasks that MaxConcurrent executing simultaneously; for example, if
- // there are fewer than MaxConcurrent tasks needed to reach
- // TotalCompletions.
- //
- // If this field is empty, it will default to a max concurrency of 1.
- MaxConcurrent *uint64 `json:",omitempty"`
-
- // TotalCompletions is the total number of Tasks desired to run to
- // completion.
- //
- // If this field is empty, the value of MaxConcurrent will be used.
- TotalCompletions *uint64 `json:",omitempty"`
-}
-
-// GlobalJob is the type of a Service which executes a Task on every Node
-// matching the Service's placement constraints. These tasks run to completion
-// and then exit.
-//
-// This type is deliberately empty.
-type GlobalJob struct{}
-
-const (
- // UpdateFailureActionPause PAUSE
- UpdateFailureActionPause = "pause"
- // UpdateFailureActionContinue CONTINUE
- UpdateFailureActionContinue = "continue"
- // UpdateFailureActionRollback ROLLBACK
- UpdateFailureActionRollback = "rollback"
-
- // UpdateOrderStopFirst STOP_FIRST
- UpdateOrderStopFirst = "stop-first"
- // UpdateOrderStartFirst START_FIRST
- UpdateOrderStartFirst = "start-first"
-)
-
-// UpdateConfig represents the update configuration.
-type UpdateConfig struct {
- // Maximum number of tasks to be updated in one iteration.
- // 0 means unlimited parallelism.
- Parallelism uint64
-
- // Amount of time between updates.
- Delay time.Duration `json:",omitempty"`
-
- // FailureAction is the action to take when an update failures.
- FailureAction string `json:",omitempty"`
-
- // Monitor indicates how long to monitor a task for failure after it is
- // created. If the task fails by ending up in one of the states
- // REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
- // this counts as a failure. If it fails after Monitor, it does not
- // count as a failure. If Monitor is unspecified, a default value will
- // be used.
- Monitor time.Duration `json:",omitempty"`
-
- // MaxFailureRatio is the fraction of tasks that may fail during
- // an update before the failure action is invoked. Any task created by
- // the current update which ends up in one of the states REJECTED,
- // COMPLETED or FAILED within Monitor from its creation counts as a
- // failure. The number of failures is divided by the number of tasks
- // being updated, and if this fraction is greater than
- // MaxFailureRatio, the failure action is invoked.
- //
- // If the failure action is CONTINUE, there is no effect.
- // If the failure action is PAUSE, no more tasks will be updated until
- // another update is started.
- MaxFailureRatio float32
-
- // Order indicates the order of operations when rolling out an updated
- // task. Either the old task is shut down before the new task is
- // started, or the new task is started before the old task is shut down.
- Order string
-}
-
-// ServiceStatus represents the number of running tasks in a service and the
-// number of tasks desired to be running.
-type ServiceStatus struct {
- // RunningTasks is the number of tasks for the service actually in the
- // Running state
- RunningTasks uint64
-
- // DesiredTasks is the number of tasks desired to be running by the
- // service. For replicated services, this is the replica count. For global
- // services, this is computed by taking the number of tasks with desired
- // state of not-Shutdown.
- DesiredTasks uint64
-
- // CompletedTasks is the number of tasks in the state Completed, if this
- // service is in ReplicatedJob or GlobalJob mode. This field must be
- // cross-referenced with the service type, because the default value of 0
- // may mean that a service is not in a job mode, or it may mean that the
- // job has yet to complete any tasks.
- CompletedTasks uint64
-}
-
-// JobStatus is the status of a job-type service.
-type JobStatus struct {
- // JobIteration is a value increased each time a Job is executed,
- // successfully or otherwise. "Executed", in this case, means the job as a
- // whole has been started, not that an individual Task has been launched. A
- // job is "Executed" when its ServiceSpec is updated. JobIteration can be
- // used to disambiguate Tasks belonging to different executions of a job.
- //
- // Though JobIteration will increase with each subsequent execution, it may
- // not necessarily increase by 1, and so JobIteration should not be used to
- // keep track of the number of times a job has been executed.
- JobIteration Version
-
- // LastExecution is the time that the job was last executed, as observed by
- // Swarm manager.
- LastExecution time.Time `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go
deleted file mode 100644
index b25f999..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import (
- "time"
-)
-
-// ClusterInfo represents info about the cluster for outputting in "info"
-// it contains the same information as "Swarm", but without the JoinTokens
-type ClusterInfo struct {
- ID string
- Meta
- Spec Spec
- TLSInfo TLSInfo
- RootRotationInProgress bool
- DefaultAddrPool []string
- SubnetSize uint32
- DataPathPort uint32
-}
-
-// Swarm represents a swarm.
-type Swarm struct {
- ClusterInfo
- JoinTokens JoinTokens
-}
-
-// JoinTokens contains the tokens workers and managers need to join the swarm.
-type JoinTokens struct {
- // Worker is the join token workers may use to join the swarm.
- Worker string
- // Manager is the join token managers may use to join the swarm.
- Manager string
-}
-
-// Spec represents the spec of a swarm.
-type Spec struct {
- Annotations
-
- Orchestration OrchestrationConfig `json:",omitempty"`
- Raft RaftConfig `json:",omitempty"`
- Dispatcher DispatcherConfig `json:",omitempty"`
- CAConfig CAConfig `json:",omitempty"`
- TaskDefaults TaskDefaults `json:",omitempty"`
- EncryptionConfig EncryptionConfig `json:",omitempty"`
-}
-
-// OrchestrationConfig represents orchestration configuration.
-type OrchestrationConfig struct {
- // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
- // node. If negative, never remove completed or failed tasks.
- TaskHistoryRetentionLimit *int64 `json:",omitempty"`
-}
-
-// TaskDefaults parameterizes cluster-level task creation with default values.
-type TaskDefaults struct {
- // LogDriver selects the log driver to use for tasks created in the
- // orchestrator if unspecified by a service.
- //
- // Updating this value will only have an affect on new tasks. Old tasks
- // will continue use their previously configured log driver until
- // recreated.
- LogDriver *Driver `json:",omitempty"`
-}
-
-// EncryptionConfig controls at-rest encryption of data and keys.
-type EncryptionConfig struct {
- // AutoLockManagers specifies whether or not managers TLS keys and raft data
- // should be encrypted at rest in such a way that they must be unlocked
- // before the manager node starts up again.
- AutoLockManagers bool
-}
-
-// RaftConfig represents raft configuration.
-type RaftConfig struct {
- // SnapshotInterval is the number of log entries between snapshots.
- SnapshotInterval uint64 `json:",omitempty"`
-
- // KeepOldSnapshots is the number of snapshots to keep beyond the
- // current snapshot.
- KeepOldSnapshots *uint64 `json:",omitempty"`
-
- // LogEntriesForSlowFollowers is the number of log entries to keep
- // around to sync up slow followers after a snapshot is created.
- LogEntriesForSlowFollowers uint64 `json:",omitempty"`
-
- // ElectionTick is the number of ticks that a follower will wait for a message
- // from the leader before becoming a candidate and starting an election.
- // ElectionTick must be greater than HeartbeatTick.
- //
- // A tick currently defaults to one second, so these translate directly to
- // seconds currently, but this is NOT guaranteed.
- ElectionTick int
-
- // HeartbeatTick is the number of ticks between heartbeats. Every
- // HeartbeatTick ticks, the leader will send a heartbeat to the
- // followers.
- //
- // A tick currently defaults to one second, so these translate directly to
- // seconds currently, but this is NOT guaranteed.
- HeartbeatTick int
-}
-
-// DispatcherConfig represents dispatcher configuration.
-type DispatcherConfig struct {
- // HeartbeatPeriod defines how often agent should send heartbeats to
- // dispatcher.
- HeartbeatPeriod time.Duration `json:",omitempty"`
-}
-
-// CAConfig represents CA configuration.
-type CAConfig struct {
- // NodeCertExpiry is the duration certificates should be issued for
- NodeCertExpiry time.Duration `json:",omitempty"`
-
- // ExternalCAs is a list of CAs to which a manager node will make
- // certificate signing requests for node certificates.
- ExternalCAs []*ExternalCA `json:",omitempty"`
-
- // SigningCACert and SigningCAKey specify the desired signing root CA and
- // root CA key for the swarm. When inspecting the cluster, the key will
- // be redacted.
- SigningCACert string `json:",omitempty"`
- SigningCAKey string `json:",omitempty"`
-
- // If this value changes, and there is no specified signing cert and key,
- // then the swarm is forced to generate a new root certificate ane key.
- ForceRotate uint64 `json:",omitempty"`
-}
-
-// ExternalCAProtocol represents type of external CA.
-type ExternalCAProtocol string
-
-// ExternalCAProtocolCFSSL CFSSL
-const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
-
-// ExternalCA defines external CA to be used by the cluster.
-type ExternalCA struct {
- // Protocol is the protocol used by this external CA.
- Protocol ExternalCAProtocol
-
- // URL is the URL where the external CA can be reached.
- URL string
-
- // Options is a set of additional key/value pairs whose interpretation
- // depends on the specified CA type.
- Options map[string]string `json:",omitempty"`
-
- // CACert specifies which root CA is used by this external CA. This certificate must
- // be in PEM format.
- CACert string
-}
-
-// InitRequest is the request used to init a swarm.
-type InitRequest struct {
- ListenAddr string
- AdvertiseAddr string
- DataPathAddr string
- DataPathPort uint32
- ForceNewCluster bool
- Spec Spec
- AutoLockManagers bool
- Availability NodeAvailability
- DefaultAddrPool []string
- SubnetSize uint32
-}
-
-// JoinRequest is the request used to join a swarm.
-type JoinRequest struct {
- ListenAddr string
- AdvertiseAddr string
- DataPathAddr string
- RemoteAddrs []string
- JoinToken string // accept by secret
- Availability NodeAvailability
-}
-
-// UnlockRequest is the request used to unlock a swarm.
-type UnlockRequest struct {
- // UnlockKey is the unlock key in ASCII-armored format.
- UnlockKey string
-}
-
-// LocalNodeState represents the state of the local node.
-type LocalNodeState string
-
-const (
- // LocalNodeStateInactive INACTIVE
- LocalNodeStateInactive LocalNodeState = "inactive"
- // LocalNodeStatePending PENDING
- LocalNodeStatePending LocalNodeState = "pending"
- // LocalNodeStateActive ACTIVE
- LocalNodeStateActive LocalNodeState = "active"
- // LocalNodeStateError ERROR
- LocalNodeStateError LocalNodeState = "error"
- // LocalNodeStateLocked LOCKED
- LocalNodeStateLocked LocalNodeState = "locked"
-)
-
-// Info represents generic information about swarm.
-type Info struct {
- NodeID string
- NodeAddr string
-
- LocalNodeState LocalNodeState
- ControlAvailable bool
- Error string
-
- RemoteManagers []Peer
- Nodes int `json:",omitempty"`
- Managers int `json:",omitempty"`
-
- Cluster *ClusterInfo `json:",omitempty"`
-
- Warnings []string `json:",omitempty"`
-}
-
-// Peer represents a peer.
-type Peer struct {
- NodeID string
- Addr string
-}
-
-// UpdateFlags contains flags for SwarmUpdate.
-type UpdateFlags struct {
- RotateWorkerToken bool
- RotateManagerToken bool
- RotateManagerUnlockKey bool
-}
diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go
deleted file mode 100644
index a6f7ab7..0000000
--- a/vendor/github.com/docker/docker/api/types/swarm/task.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package swarm // import "github.com/docker/docker/api/types/swarm"
-
-import (
- "time"
-
- "github.com/docker/docker/api/types/swarm/runtime"
-)
-
-// TaskState represents the state of a task.
-type TaskState string
-
-const (
- // TaskStateNew NEW
- TaskStateNew TaskState = "new"
- // TaskStateAllocated ALLOCATED
- TaskStateAllocated TaskState = "allocated"
- // TaskStatePending PENDING
- TaskStatePending TaskState = "pending"
- // TaskStateAssigned ASSIGNED
- TaskStateAssigned TaskState = "assigned"
- // TaskStateAccepted ACCEPTED
- TaskStateAccepted TaskState = "accepted"
- // TaskStatePreparing PREPARING
- TaskStatePreparing TaskState = "preparing"
- // TaskStateReady READY
- TaskStateReady TaskState = "ready"
- // TaskStateStarting STARTING
- TaskStateStarting TaskState = "starting"
- // TaskStateRunning RUNNING
- TaskStateRunning TaskState = "running"
- // TaskStateComplete COMPLETE
- TaskStateComplete TaskState = "complete"
- // TaskStateShutdown SHUTDOWN
- TaskStateShutdown TaskState = "shutdown"
- // TaskStateFailed FAILED
- TaskStateFailed TaskState = "failed"
- // TaskStateRejected REJECTED
- TaskStateRejected TaskState = "rejected"
- // TaskStateRemove REMOVE
- TaskStateRemove TaskState = "remove"
- // TaskStateOrphaned ORPHANED
- TaskStateOrphaned TaskState = "orphaned"
-)
-
-// Task represents a task.
-type Task struct {
- ID string
- Meta
- Annotations
-
- Spec TaskSpec `json:",omitempty"`
- ServiceID string `json:",omitempty"`
- Slot int `json:",omitempty"`
- NodeID string `json:",omitempty"`
- Status TaskStatus `json:",omitempty"`
- DesiredState TaskState `json:",omitempty"`
- NetworksAttachments []NetworkAttachment `json:",omitempty"`
- GenericResources []GenericResource `json:",omitempty"`
-
- // JobIteration is the JobIteration of the Service that this Task was
- // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is
- // used to determine which Tasks belong to which run of the job. This field
- // is absent if the Service mode is Replicated or Global.
- JobIteration *Version `json:",omitempty"`
-}
-
-// TaskSpec represents the spec of a task.
-type TaskSpec struct {
- // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive.
- // PluginSpec is only used when the `Runtime` field is set to `plugin`
- // NetworkAttachmentSpec is used if the `Runtime` field is set to
- // `attachment`.
- ContainerSpec *ContainerSpec `json:",omitempty"`
- PluginSpec *runtime.PluginSpec `json:",omitempty"`
- NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"`
-
- Resources *ResourceRequirements `json:",omitempty"`
- RestartPolicy *RestartPolicy `json:",omitempty"`
- Placement *Placement `json:",omitempty"`
- Networks []NetworkAttachmentConfig `json:",omitempty"`
-
- // LogDriver specifies the LogDriver to use for tasks created from this
- // spec. If not present, the one on cluster default on swarm.Spec will be
- // used, finally falling back to the engine default if not specified.
- LogDriver *Driver `json:",omitempty"`
-
- // ForceUpdate is a counter that triggers an update even if no relevant
- // parameters have been changed.
- ForceUpdate uint64
-
- Runtime RuntimeType `json:",omitempty"`
-}
-
-// Resources represents resources (CPU/Memory) which can be advertised by a
-// node and requested to be reserved for a task.
-type Resources struct {
- NanoCPUs int64 `json:",omitempty"`
- MemoryBytes int64 `json:",omitempty"`
- GenericResources []GenericResource `json:",omitempty"`
-}
-
-// Limit describes limits on resources which can be requested by a task.
-type Limit struct {
- NanoCPUs int64 `json:",omitempty"`
- MemoryBytes int64 `json:",omitempty"`
- Pids int64 `json:",omitempty"`
-}
-
-// GenericResource represents a "user defined" resource which can
-// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
-type GenericResource struct {
- NamedResourceSpec *NamedGenericResource `json:",omitempty"`
- DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
-}
-
-// NamedGenericResource represents a "user defined" resource which is defined
-// as a string.
-// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
-// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
-type NamedGenericResource struct {
- Kind string `json:",omitempty"`
- Value string `json:",omitempty"`
-}
-
-// DiscreteGenericResource represents a "user defined" resource which is defined
-// as an integer
-// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
-// Value is used to count the resource (SSD=5, HDD=3, ...)
-type DiscreteGenericResource struct {
- Kind string `json:",omitempty"`
- Value int64 `json:",omitempty"`
-}
-
-// ResourceRequirements represents resources requirements.
-type ResourceRequirements struct {
- Limits *Limit `json:",omitempty"`
- Reservations *Resources `json:",omitempty"`
-}
-
-// Placement represents orchestration parameters.
-type Placement struct {
- Constraints []string `json:",omitempty"`
- Preferences []PlacementPreference `json:",omitempty"`
- MaxReplicas uint64 `json:",omitempty"`
-
- // Platforms stores all the platforms that the image can run on.
- // This field is used in the platform filter for scheduling. If empty,
- // then the platform filter is off, meaning there are no scheduling restrictions.
- Platforms []Platform `json:",omitempty"`
-}
-
-// PlacementPreference provides a way to make the scheduler aware of factors
-// such as topology.
-type PlacementPreference struct {
- Spread *SpreadOver
-}
-
-// SpreadOver is a scheduling preference that instructs the scheduler to spread
-// tasks evenly over groups of nodes identified by labels.
-type SpreadOver struct {
- // label descriptor, such as engine.labels.az
- SpreadDescriptor string
-}
-
-// RestartPolicy represents the restart policy.
-type RestartPolicy struct {
- Condition RestartPolicyCondition `json:",omitempty"`
- Delay *time.Duration `json:",omitempty"`
- MaxAttempts *uint64 `json:",omitempty"`
- Window *time.Duration `json:",omitempty"`
-}
-
-// RestartPolicyCondition represents when to restart.
-type RestartPolicyCondition string
-
-const (
- // RestartPolicyConditionNone NONE
- RestartPolicyConditionNone RestartPolicyCondition = "none"
- // RestartPolicyConditionOnFailure ON_FAILURE
- RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
- // RestartPolicyConditionAny ANY
- RestartPolicyConditionAny RestartPolicyCondition = "any"
-)
-
-// TaskStatus represents the status of a task.
-type TaskStatus struct {
- Timestamp time.Time `json:",omitempty"`
- State TaskState `json:",omitempty"`
- Message string `json:",omitempty"`
- Err string `json:",omitempty"`
- ContainerStatus *ContainerStatus `json:",omitempty"`
- PortStatus PortStatus `json:",omitempty"`
-}
-
-// ContainerStatus represents the status of a container.
-type ContainerStatus struct {
- ContainerID string
- PID int
- ExitCode int
-}
-
-// PortStatus represents the port status of a task's host ports whose
-// service has published host ports
-type PortStatus struct {
- Ports []PortConfig `json:",omitempty"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go
deleted file mode 100644
index 84b6f07..0000000
--- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package time // import "github.com/docker/docker/api/types/time"
-
-import (
- "strconv"
- "time"
-)
-
-// DurationToSecondsString converts the specified duration to the number
-// seconds it represents, formatted as a string.
-func DurationToSecondsString(duration time.Duration) string {
- return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64)
-}
diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go
deleted file mode 100644
index ea3495e..0000000
--- a/vendor/github.com/docker/docker/api/types/time/timestamp.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package time // import "github.com/docker/docker/api/types/time"
-
-import (
- "fmt"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-// These are additional predefined layouts for use in Time.Format and Time.Parse
-// with --since and --until parameters for `docker logs` and `docker events`
-const (
- rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
- rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
- dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
- dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
-)
-
-// GetTimestamp tries to parse given string as golang duration,
-// then RFC3339 time and finally as a Unix timestamp. If
-// any of these were successful, it returns a Unix timestamp
-// as string otherwise returns the given value back.
-// In case of duration input, the returned timestamp is computed
-// as the given reference time minus the amount of the duration.
-func GetTimestamp(value string, reference time.Time) (string, error) {
- if d, err := time.ParseDuration(value); value != "0" && err == nil {
- return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil
- }
-
- var format string
- // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
- parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
-
- if strings.Contains(value, ".") {
- if parseInLocation {
- format = rFC3339NanoLocal
- } else {
- format = time.RFC3339Nano
- }
- } else if strings.Contains(value, "T") {
- // we want the number of colons in the T portion of the timestamp
- tcolons := strings.Count(value, ":")
- // if parseInLocation is off and we have a +/- zone offset (not Z) then
- // there will be an extra colon in the input for the tz offset subtract that
- // colon from the tcolons count
- if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 {
- tcolons--
- }
- if parseInLocation {
- switch tcolons {
- case 0:
- format = "2006-01-02T15"
- case 1:
- format = "2006-01-02T15:04"
- default:
- format = rFC3339Local
- }
- } else {
- switch tcolons {
- case 0:
- format = "2006-01-02T15Z07:00"
- case 1:
- format = "2006-01-02T15:04Z07:00"
- default:
- format = time.RFC3339
- }
- }
- } else if parseInLocation {
- format = dateLocal
- } else {
- format = dateWithZone
- }
-
- var t time.Time
- var err error
-
- if parseInLocation {
- t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone()))
- } else {
- t, err = time.Parse(format, value)
- }
-
- if err != nil {
- // if there is a `-` then it's an RFC3339 like timestamp
- if strings.Contains(value, "-") {
- return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
- }
- if _, _, err := parseTimestamp(value); err != nil {
- return "", fmt.Errorf("failed to parse value as time or duration: %q", value)
- }
- return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
- }
-
- return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil
-}
-
-// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
-// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
-// if the incoming nanosecond portion is longer or shorter than 9 digits it is
-// converted to nanoseconds. The expectation is that the seconds and
-// seconds will be used to create a time variable. For example:
-// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
-// if err == nil since := time.Unix(seconds, nanoseconds)
-// returns seconds as def(aultSeconds) if value == ""
-func ParseTimestamps(value string, def int64) (int64, int64, error) {
- if value == "" {
- return def, 0, nil
- }
- return parseTimestamp(value)
-}
-
-func parseTimestamp(value string) (int64, int64, error) {
- sa := strings.SplitN(value, ".", 2)
- s, err := strconv.ParseInt(sa[0], 10, 64)
- if err != nil {
- return s, 0, err
- }
- if len(sa) != 2 {
- return s, 0, nil
- }
- n, err := strconv.ParseInt(sa[1], 10, 64)
- if err != nil {
- return s, n, err
- }
- // should already be in nanoseconds but just in case convert n to nanoseconds
- n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1]))))
- return s, n, nil
-}
diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go
deleted file mode 100644
index e3a1599..0000000
--- a/vendor/github.com/docker/docker/api/types/types.go
+++ /dev/null
@@ -1,635 +0,0 @@
-package types // import "github.com/docker/docker/api/types"
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "strings"
- "time"
-
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/mount"
- "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/api/types/swarm"
- "github.com/docker/go-connections/nat"
-)
-
-// RootFS returns Image's RootFS description including the layer IDs.
-type RootFS struct {
- Type string
- Layers []string `json:",omitempty"`
- BaseLayer string `json:",omitempty"`
-}
-
-// ImageInspect contains response of Engine API:
-// GET "/images/{name:.*}/json"
-type ImageInspect struct {
- ID string `json:"Id"`
- RepoTags []string
- RepoDigests []string
- Parent string
- Comment string
- Created string
- Container string
- ContainerConfig *container.Config
- DockerVersion string
- Author string
- Config *container.Config
- Architecture string
- Variant string `json:",omitempty"`
- Os string
- OsVersion string `json:",omitempty"`
- Size int64
- VirtualSize int64
- GraphDriver GraphDriverData
- RootFS RootFS
- Metadata ImageMetadata
-}
-
-// ImageMetadata contains engine-local data about the image
-type ImageMetadata struct {
- LastTagTime time.Time `json:",omitempty"`
-}
-
-// Container contains response of Engine API:
-// GET "/containers/json"
-type Container struct {
- ID string `json:"Id"`
- Names []string
- Image string
- ImageID string
- Command string
- Created int64
- Ports []Port
- SizeRw int64 `json:",omitempty"`
- SizeRootFs int64 `json:",omitempty"`
- Labels map[string]string
- State string
- Status string
- HostConfig struct {
- NetworkMode string `json:",omitempty"`
- }
- NetworkSettings *SummaryNetworkSettings
- Mounts []MountPoint
-}
-
-// CopyConfig contains request body of Engine API:
-// POST "/containers/"+containerID+"/copy"
-type CopyConfig struct {
- Resource string
-}
-
-// ContainerPathStat is used to encode the header from
-// GET "/containers/{name:.*}/archive"
-// "Name" is the file or directory name.
-type ContainerPathStat struct {
- Name string `json:"name"`
- Size int64 `json:"size"`
- Mode os.FileMode `json:"mode"`
- Mtime time.Time `json:"mtime"`
- LinkTarget string `json:"linkTarget"`
-}
-
-// ContainerStats contains response of Engine API:
-// GET "/stats"
-type ContainerStats struct {
- Body io.ReadCloser `json:"body"`
- OSType string `json:"ostype"`
-}
-
-// Ping contains response of Engine API:
-// GET "/_ping"
-type Ping struct {
- APIVersion string
- OSType string
- Experimental bool
- BuilderVersion BuilderVersion
-}
-
-// ComponentVersion describes the version information for a specific component.
-type ComponentVersion struct {
- Name string
- Version string
- Details map[string]string `json:",omitempty"`
-}
-
-// Version contains response of Engine API:
-// GET "/version"
-type Version struct {
- Platform struct{ Name string } `json:",omitempty"`
- Components []ComponentVersion `json:",omitempty"`
-
- // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
-
- Version string
- APIVersion string `json:"ApiVersion"`
- MinAPIVersion string `json:"MinAPIVersion,omitempty"`
- GitCommit string
- GoVersion string
- Os string
- Arch string
- KernelVersion string `json:",omitempty"`
- Experimental bool `json:",omitempty"`
- BuildTime string `json:",omitempty"`
-}
-
-// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
-// in the version-string of external tools, such as containerd, or runC.
-type Commit struct {
- ID string // ID is the actual commit ID of external tool.
- Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
-}
-
-// Info contains response of Engine API:
-// GET "/info"
-type Info struct {
- ID string
- Containers int
- ContainersRunning int
- ContainersPaused int
- ContainersStopped int
- Images int
- Driver string
- DriverStatus [][2]string
- SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API
- Plugins PluginsInfo
- MemoryLimit bool
- SwapLimit bool
- KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes
- KernelMemoryTCP bool
- CPUCfsPeriod bool `json:"CpuCfsPeriod"`
- CPUCfsQuota bool `json:"CpuCfsQuota"`
- CPUShares bool
- CPUSet bool
- PidsLimit bool
- IPv4Forwarding bool
- BridgeNfIptables bool
- BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
- Debug bool
- NFd int
- OomKillDisable bool
- NGoroutines int
- SystemTime string
- LoggingDriver string
- CgroupDriver string
- CgroupVersion string `json:",omitempty"`
- NEventsListener int
- KernelVersion string
- OperatingSystem string
- OSVersion string
- OSType string
- Architecture string
- IndexServerAddress string
- RegistryConfig *registry.ServiceConfig
- NCPU int
- MemTotal int64
- GenericResources []swarm.GenericResource
- DockerRootDir string
- HTTPProxy string `json:"HttpProxy"`
- HTTPSProxy string `json:"HttpsProxy"`
- NoProxy string
- Name string
- Labels []string
- ExperimentalBuild bool
- ServerVersion string
- ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
- ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
- Runtimes map[string]Runtime
- DefaultRuntime string
- Swarm swarm.Info
- // LiveRestoreEnabled determines whether containers should be kept
- // running when the daemon is shutdown or upon daemon start if
- // running containers are detected
- LiveRestoreEnabled bool
- Isolation container.Isolation
- InitBinary string
- ContainerdCommit Commit
- RuncCommit Commit
- InitCommit Commit
- SecurityOptions []string
- ProductLicense string `json:",omitempty"`
- DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
- Warnings []string
-}
-
-// KeyValue holds a key/value pair
-type KeyValue struct {
- Key, Value string
-}
-
-// NetworkAddressPool is a temp struct used by Info struct
-type NetworkAddressPool struct {
- Base string
- Size int
-}
-
-// SecurityOpt contains the name and options of a security option
-type SecurityOpt struct {
- Name string
- Options []KeyValue
-}
-
-// DecodeSecurityOptions decodes a security options string slice to a type safe
-// SecurityOpt
-func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
- so := []SecurityOpt{}
- for _, opt := range opts {
- // support output from a < 1.13 docker daemon
- if !strings.Contains(opt, "=") {
- so = append(so, SecurityOpt{Name: opt})
- continue
- }
- secopt := SecurityOpt{}
- split := strings.Split(opt, ",")
- for _, s := range split {
- kv := strings.SplitN(s, "=", 2)
- if len(kv) != 2 {
- return nil, fmt.Errorf("invalid security option %q", s)
- }
- if kv[0] == "" || kv[1] == "" {
- return nil, errors.New("invalid empty security option")
- }
- if kv[0] == "name" {
- secopt.Name = kv[1]
- continue
- }
- secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
- }
- so = append(so, secopt)
- }
- return so, nil
-}
-
-// PluginsInfo is a temp struct holding Plugins name
-// registered with docker daemon. It is used by Info struct
-type PluginsInfo struct {
- // List of Volume plugins registered
- Volume []string
- // List of Network plugins registered
- Network []string
- // List of Authorization plugins registered
- Authorization []string
- // List of Log plugins registered
- Log []string
-}
-
-// ExecStartCheck is a temp struct used by execStart
-// Config fields is part of ExecConfig in runconfig package
-type ExecStartCheck struct {
- // ExecStart will first check if it's detached
- Detach bool
- // Check if there's a tty
- Tty bool
-}
-
-// HealthcheckResult stores information about a single run of a healthcheck probe
-type HealthcheckResult struct {
- Start time.Time // Start is the time this check started
- End time.Time // End is the time this check ended
- ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
- Output string // Output from last check
-}
-
-// Health states
-const (
- NoHealthcheck = "none" // Indicates there is no healthcheck
- Starting = "starting" // Starting indicates that the container is not yet ready
- Healthy = "healthy" // Healthy indicates that the container is running correctly
- Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
-)
-
-// Health stores information about the container's healthcheck results
-type Health struct {
- Status string // Status is one of Starting, Healthy or Unhealthy
- FailingStreak int // FailingStreak is the number of consecutive failures
- Log []*HealthcheckResult // Log contains the last few results (oldest first)
-}
-
-// ContainerState stores container's running state
-// it's part of ContainerJSONBase and will return by "inspect" command
-type ContainerState struct {
- Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
- Running bool
- Paused bool
- Restarting bool
- OOMKilled bool
- Dead bool
- Pid int
- ExitCode int
- Error string
- StartedAt string
- FinishedAt string
- Health *Health `json:",omitempty"`
-}
-
-// ContainerNode stores information about the node that a container
-// is running on. It's only used by the Docker Swarm standalone API
-type ContainerNode struct {
- ID string
- IPAddress string `json:"IP"`
- Addr string
- Name string
- Cpus int
- Memory int64
- Labels map[string]string
-}
-
-// ContainerJSONBase contains response of Engine API:
-// GET "/containers/{name:.*}/json"
-type ContainerJSONBase struct {
- ID string `json:"Id"`
- Created string
- Path string
- Args []string
- State *ContainerState
- Image string
- ResolvConfPath string
- HostnamePath string
- HostsPath string
- LogPath string
- Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API
- Name string
- RestartCount int
- Driver string
- Platform string
- MountLabel string
- ProcessLabel string
- AppArmorProfile string
- ExecIDs []string
- HostConfig *container.HostConfig
- GraphDriver GraphDriverData
- SizeRw *int64 `json:",omitempty"`
- SizeRootFs *int64 `json:",omitempty"`
-}
-
-// ContainerJSON is newly used struct along with MountPoint
-type ContainerJSON struct {
- *ContainerJSONBase
- Mounts []MountPoint
- Config *container.Config
- NetworkSettings *NetworkSettings
-}
-
-// NetworkSettings exposes the network settings in the api
-type NetworkSettings struct {
- NetworkSettingsBase
- DefaultNetworkSettings
- Networks map[string]*network.EndpointSettings
-}
-
-// SummaryNetworkSettings provides a summary of container's networks
-// in /containers/json
-type SummaryNetworkSettings struct {
- Networks map[string]*network.EndpointSettings
-}
-
-// NetworkSettingsBase holds basic information about networks
-type NetworkSettingsBase struct {
- Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
- SandboxID string // SandboxID uniquely represents a container's network stack
- HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
- LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
- LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
- Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
- SandboxKey string // SandboxKey identifies the sandbox
- SecondaryIPAddresses []network.Address
- SecondaryIPv6Addresses []network.Address
-}
-
-// DefaultNetworkSettings holds network information
-// during the 2 release deprecation period.
-// It will be removed in Docker 1.11.
-type DefaultNetworkSettings struct {
- EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
- Gateway string // Gateway holds the gateway address for the network
- GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
- GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
- IPAddress string // IPAddress holds the IPv4 address for the network
- IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
- IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
- MacAddress string // MacAddress holds the MAC address for the network
-}
-
-// MountPoint represents a mount point configuration inside the container.
-// This is used for reporting the mountpoints in use by a container.
-type MountPoint struct {
- Type mount.Type `json:",omitempty"`
- Name string `json:",omitempty"`
- Source string
- Destination string
- Driver string `json:",omitempty"`
- Mode string
- RW bool
- Propagation mount.Propagation
-}
-
-// NetworkResource is the body of the "get network" http response message
-type NetworkResource struct {
- Name string // Name is the requested name of the network
- ID string `json:"Id"` // ID uniquely identifies a network on a single machine
- Created time.Time // Created is the time the network created
- Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
- Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
- EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
- IPAM network.IPAM // IPAM is the network's IP Address Management
- Internal bool // Internal represents if the network is used internal only
- Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
- Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
- ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
- ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
- Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
- Options map[string]string // Options holds the network specific options to use for when creating the network
- Labels map[string]string // Labels holds metadata specific to the network being created
- Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
- Services map[string]network.ServiceInfo `json:",omitempty"`
-}
-
-// EndpointResource contains network resources allocated and used for a container in a network
-type EndpointResource struct {
- Name string
- EndpointID string
- MacAddress string
- IPv4Address string
- IPv6Address string
-}
-
-// NetworkCreate is the expected body of the "create network" http request message
-type NetworkCreate struct {
- // Check for networks with duplicate names.
- // Network is primarily keyed based on a random ID and not on the name.
- // Network name is strictly a user-friendly alias to the network
- // which is uniquely identified using ID.
- // And there is no guaranteed way to check for duplicates.
- // Option CheckDuplicate is there to provide a best effort checking of any networks
- // which has the same name but it is not guaranteed to catch all name collisions.
- CheckDuplicate bool
- Driver string
- Scope string
- EnableIPv6 bool
- IPAM *network.IPAM
- Internal bool
- Attachable bool
- Ingress bool
- ConfigOnly bool
- ConfigFrom *network.ConfigReference
- Options map[string]string
- Labels map[string]string
-}
-
-// NetworkCreateRequest is the request message sent to the server for network create call.
-type NetworkCreateRequest struct {
- NetworkCreate
- Name string
-}
-
-// NetworkCreateResponse is the response message sent by the server for network create call
-type NetworkCreateResponse struct {
- ID string `json:"Id"`
- Warning string
-}
-
-// NetworkConnect represents the data to be used to connect a container to the network
-type NetworkConnect struct {
- Container string
- EndpointConfig *network.EndpointSettings `json:",omitempty"`
-}
-
-// NetworkDisconnect represents the data to be used to disconnect a container from the network
-type NetworkDisconnect struct {
- Container string
- Force bool
-}
-
-// NetworkInspectOptions holds parameters to inspect network
-type NetworkInspectOptions struct {
- Scope string
- Verbose bool
-}
-
-// Checkpoint represents the details of a checkpoint
-type Checkpoint struct {
- Name string // Name is the name of the checkpoint
-}
-
-// Runtime describes an OCI runtime
-type Runtime struct {
- Path string `json:"path"`
- Args []string `json:"runtimeArgs,omitempty"`
-
- // This is exposed here only for internal use
- // It is not currently supported to specify custom shim configs
- Shim *ShimConfig `json:"-"`
-}
-
-// ShimConfig is used by runtime to configure containerd shims
-type ShimConfig struct {
- Binary string
- Opts interface{}
-}
-
-// DiskUsage contains response of Engine API:
-// GET "/system/df"
-type DiskUsage struct {
- LayersSize int64
- Images []*ImageSummary
- Containers []*Container
- Volumes []*Volume
- BuildCache []*BuildCache
- BuilderSize int64 // deprecated
-}
-
-// ContainersPruneReport contains the response for Engine API:
-// POST "/containers/prune"
-type ContainersPruneReport struct {
- ContainersDeleted []string
- SpaceReclaimed uint64
-}
-
-// VolumesPruneReport contains the response for Engine API:
-// POST "/volumes/prune"
-type VolumesPruneReport struct {
- VolumesDeleted []string
- SpaceReclaimed uint64
-}
-
-// ImagesPruneReport contains the response for Engine API:
-// POST "/images/prune"
-type ImagesPruneReport struct {
- ImagesDeleted []ImageDeleteResponseItem
- SpaceReclaimed uint64
-}
-
-// BuildCachePruneReport contains the response for Engine API:
-// POST "/build/prune"
-type BuildCachePruneReport struct {
- CachesDeleted []string
- SpaceReclaimed uint64
-}
-
-// NetworksPruneReport contains the response for Engine API:
-// POST "/networks/prune"
-type NetworksPruneReport struct {
- NetworksDeleted []string
-}
-
-// SecretCreateResponse contains the information returned to a client
-// on the creation of a new secret.
-type SecretCreateResponse struct {
- // ID is the id of the created secret.
- ID string
-}
-
-// SecretListOptions holds parameters to list secrets
-type SecretListOptions struct {
- Filters filters.Args
-}
-
-// ConfigCreateResponse contains the information returned to a client
-// on the creation of a new config.
-type ConfigCreateResponse struct {
- // ID is the id of the created config.
- ID string
-}
-
-// ConfigListOptions holds parameters to list configs
-type ConfigListOptions struct {
- Filters filters.Args
-}
-
-// PushResult contains the tag, manifest digest, and manifest size from the
-// push. It's used to signal this information to the trust code in the client
-// so it can sign the manifest if necessary.
-type PushResult struct {
- Tag string
- Digest string
- Size int
-}
-
-// BuildResult contains the image id of a successful build
-type BuildResult struct {
- ID string
-}
-
-// BuildCache contains information about a build cache record
-type BuildCache struct {
- ID string
- Parent string
- Type string
- Description string
- InUse bool
- Shared bool
- Size int64
- CreatedAt time.Time
- LastUsedAt *time.Time
- UsageCount int
-}
-
-// BuildCachePruneOptions hold parameters to prune the build cache
-type BuildCachePruneOptions struct {
- All bool
- KeepStorage int64
- Filters filters.Args
-}
diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md
deleted file mode 100644
index 1ef911e..0000000
--- a/vendor/github.com/docker/docker/api/types/versions/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Legacy API type versions
-
-This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
-
-Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
-
-## Package name conventions
-
-The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
-
-1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
-2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
-
-For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go
deleted file mode 100644
index 8ccb0aa..0000000
--- a/vendor/github.com/docker/docker/api/types/versions/compare.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package versions // import "github.com/docker/docker/api/types/versions"
-
-import (
- "strconv"
- "strings"
-)
-
-// compare compares two version strings
-// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
-func compare(v1, v2 string) int {
- var (
- currTab = strings.Split(v1, ".")
- otherTab = strings.Split(v2, ".")
- )
-
- max := len(currTab)
- if len(otherTab) > max {
- max = len(otherTab)
- }
- for i := 0; i < max; i++ {
- var currInt, otherInt int
-
- if len(currTab) > i {
- currInt, _ = strconv.Atoi(currTab[i])
- }
- if len(otherTab) > i {
- otherInt, _ = strconv.Atoi(otherTab[i])
- }
- if currInt > otherInt {
- return 1
- }
- if otherInt > currInt {
- return -1
- }
- }
- return 0
-}
-
-// LessThan checks if a version is less than another
-func LessThan(v, other string) bool {
- return compare(v, other) == -1
-}
-
-// LessThanOrEqualTo checks if a version is less than or equal to another
-func LessThanOrEqualTo(v, other string) bool {
- return compare(v, other) <= 0
-}
-
-// GreaterThan checks if a version is greater than another
-func GreaterThan(v, other string) bool {
- return compare(v, other) == 1
-}
-
-// GreaterThanOrEqualTo checks if a version is greater than or equal to another
-func GreaterThanOrEqualTo(v, other string) bool {
- return compare(v, other) >= 0
-}
-
-// Equal checks if a version is equal to another
-func Equal(v, other string) bool {
- return compare(v, other) == 0
-}
diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go
deleted file mode 100644
index c69b084..0000000
--- a/vendor/github.com/docker/docker/api/types/volume.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package types
-
-// This file was generated by the swagger tool.
-// Editing this file might prove futile when you re-run the swagger generate command
-
-// Volume volume
-// swagger:model Volume
-type Volume struct {
-
- // Date/Time the volume was created.
- CreatedAt string `json:"CreatedAt,omitempty"`
-
- // Name of the volume driver used by the volume.
- // Required: true
- Driver string `json:"Driver"`
-
- // User-defined key/value metadata.
- // Required: true
- Labels map[string]string `json:"Labels"`
-
- // Mount path of the volume on the host.
- // Required: true
- Mountpoint string `json:"Mountpoint"`
-
- // Name of the volume.
- // Required: true
- Name string `json:"Name"`
-
- // The driver specific options used when creating the volume.
- //
- // Required: true
- Options map[string]string `json:"Options"`
-
- // The level at which the volume exists. Either `global` for cluster-wide,
- // or `local` for machine level.
- //
- // Required: true
- Scope string `json:"Scope"`
-
- // Low-level details about the volume, provided by the volume driver.
- // Details are returned as a map with key/value pairs:
- // `{"key":"value","key2":"value2"}`.
- //
- // The `Status` field is optional, and is omitted if the volume driver
- // does not support this feature.
- //
- Status map[string]interface{} `json:"Status,omitempty"`
-
- // usage data
- UsageData *VolumeUsageData `json:"UsageData,omitempty"`
-}
-
-// VolumeUsageData Usage details about the volume. This information is used by the
-// `GET /system/df` endpoint, and omitted in other endpoints.
-//
-// swagger:model VolumeUsageData
-type VolumeUsageData struct {
-
- // The number of containers referencing this volume. This field
- // is set to `-1` if the reference-count is not available.
- //
- // Required: true
- RefCount int64 `json:"RefCount"`
-
- // Amount of disk space used by the volume (in bytes). This information
- // is only available for volumes created with the `"local"` volume
- // driver. For volumes created with other volume drivers, this field
- // is set to `-1` ("not available")
- //
- // Required: true
- Size int64 `json:"Size"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go
deleted file mode 100644
index 8538078..0000000
--- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package volume // import "github.com/docker/docker/api/types/volume"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-// VolumeCreateBody Volume configuration
-// swagger:model VolumeCreateBody
-type VolumeCreateBody struct {
-
- // Name of the volume driver to use.
- // Required: true
- Driver string `json:"Driver"`
-
- // A mapping of driver options and values. These options are
- // passed directly to the driver and are driver specific.
- //
- // Required: true
- DriverOpts map[string]string `json:"DriverOpts"`
-
- // User-defined key/value metadata.
- // Required: true
- Labels map[string]string `json:"Labels"`
-
- // The new volume's name. If not specified, Docker generates a name.
- //
- // Required: true
- Name string `json:"Name"`
-}
diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go
deleted file mode 100644
index be06179..0000000
--- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package volume // import "github.com/docker/docker/api/types/volume"
-
-// ----------------------------------------------------------------------------
-// Code generated by `swagger generate operation`. DO NOT EDIT.
-//
-// See hack/generate-swagger-api.sh
-// ----------------------------------------------------------------------------
-
-import "github.com/docker/docker/api/types"
-
-// VolumeListOKBody Volume list response
-// swagger:model VolumeListOKBody
-type VolumeListOKBody struct {
-
- // List of volumes
- // Required: true
- Volumes []*types.Volume `json:"Volumes"`
-
- // Warnings that occurred when fetching the list of volumes.
- //
- // Required: true
- Warnings []string `json:"Warnings"`
-}
diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md
deleted file mode 100644
index 992f181..0000000
--- a/vendor/github.com/docker/docker/client/README.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Go client for the Docker Engine API
-
-The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc.
-
-For example, to list running containers (the equivalent of `docker ps`):
-
-```go
-package main
-
-import (
- "context"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/client"
-)
-
-func main() {
- cli, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- panic(err)
- }
-
- containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
- if err != nil {
- panic(err)
- }
-
- for _, container := range containers {
- fmt.Printf("%s %s\n", container.ID[:10], container.Image)
- }
-}
-```
-
-[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client)
diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go
deleted file mode 100644
index 3aae43e..0000000
--- a/vendor/github.com/docker/docker/client/build_cancel.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// BuildCancel requests the daemon to cancel ongoing build request
-func (cli *Client) BuildCancel(ctx context.Context, id string) error {
- query := url.Values{}
- query.Set("id", id)
-
- serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil)
- ensureReaderClosed(serverResp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go
deleted file mode 100644
index 397d67c..0000000
--- a/vendor/github.com/docker/docker/client/build_prune.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/pkg/errors"
-)
-
-// BuildCachePrune requests the daemon to delete unused cache data
-func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
- if err := cli.NewVersionError("1.31", "build prune"); err != nil {
- return nil, err
- }
-
- report := types.BuildCachePruneReport{}
-
- query := url.Values{}
- if opts.All {
- query.Set("all", "1")
- }
- query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage))
- filters, err := filters.ToJSON(opts.Filters)
- if err != nil {
- return nil, errors.Wrap(err, "prune could not marshal filters option")
- }
- query.Set("filters", filters)
-
- serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
-
- if err != nil {
- return nil, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return &report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go
deleted file mode 100644
index 921024f..0000000
--- a/vendor/github.com/docker/docker/client/checkpoint_create.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
-)
-
-// CheckpointCreate creates a checkpoint from the given container with the given name
-func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error {
- resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go
deleted file mode 100644
index 54f55fa..0000000
--- a/vendor/github.com/docker/docker/client/checkpoint_delete.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// CheckpointDelete deletes the checkpoint with the given name from the given container
-func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error {
- query := url.Values{}
- if options.CheckpointDir != "" {
- query.Set("dir", options.CheckpointDir)
- }
-
- resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go
deleted file mode 100644
index 66d46dd..0000000
--- a/vendor/github.com/docker/docker/client/checkpoint_list.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// CheckpointList returns the checkpoints of the given container in the docker host
-func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) {
- var checkpoints []types.Checkpoint
-
- query := url.Values{}
- if options.CheckpointDir != "" {
- query.Set("dir", options.CheckpointDir)
- }
-
- resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return checkpoints, wrapResponseError(err, resp, "container", container)
- }
-
- err = json.NewDecoder(resp.body).Decode(&checkpoints)
- return checkpoints, err
-}
diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go
deleted file mode 100644
index 21edf1f..0000000
--- a/vendor/github.com/docker/docker/client/client.go
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
-Package client is a Go client for the Docker Engine API.
-
-For more information about the Engine API, see the documentation:
-https://docs.docker.com/engine/api/
-
-Usage
-
-You use the library by creating a client object and calling methods on it. The
-client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
-or configured manually with NewClient().
-
-For example, to list running containers (the equivalent of "docker ps"):
-
- package main
-
- import (
- "context"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/client"
- )
-
- func main() {
- cli, err := client.NewClientWithOpts(client.FromEnv)
- if err != nil {
- panic(err)
- }
-
- containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
- if err != nil {
- panic(err)
- }
-
- for _, container := range containers {
- fmt.Printf("%s %s\n", container.ID[:10], container.Image)
- }
- }
-
-*/
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "fmt"
- "net"
- "net/http"
- "net/url"
- "path"
- "strings"
-
- "github.com/docker/docker/api"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/go-connections/sockets"
- "github.com/pkg/errors"
-)
-
-// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
-var ErrRedirect = errors.New("unexpected redirect in response")
-
-// Client is the API client that performs all operations
-// against a docker server.
-type Client struct {
- // scheme sets the scheme for the client
- scheme string
- // host holds the server address to connect to
- host string
- // proto holds the client protocol i.e. unix.
- proto string
- // addr holds the client address.
- addr string
- // basePath holds the path to prepend to the requests.
- basePath string
- // client used to send and receive http requests.
- client *http.Client
- // version of the server to talk to.
- version string
- // custom http headers configured by users.
- customHTTPHeaders map[string]string
- // manualOverride is set to true when the version was set by users.
- manualOverride bool
-
- // negotiateVersion indicates if the client should automatically negotiate
- // the API version to use when making requests. API version negotiation is
- // performed on the first request, after which negotiated is set to "true"
- // so that subsequent requests do not re-negotiate.
- negotiateVersion bool
-
- // negotiated indicates that API version negotiation took place
- negotiated bool
-}
-
-// CheckRedirect specifies the policy for dealing with redirect responses:
-// If the request is non-GET return `ErrRedirect`. Otherwise use the last response.
-//
-// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client .
-// The Docker client (and by extension docker API client) can be made to send a request
-// like POST /containers//start where what would normally be in the name section of the URL is empty.
-// This triggers an HTTP 301 from the daemon.
-// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon.
-// This behavior change manifests in the client in that before the 301 was not followed and
-// the client did not generate an error, but now results in a message like Error response from daemon: page not found.
-func CheckRedirect(req *http.Request, via []*http.Request) error {
- if via[0].Method == http.MethodGet {
- return http.ErrUseLastResponse
- }
- return ErrRedirect
-}
-
-// NewClientWithOpts initializes a new API client with default values. It takes functors
-// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))`
-// It also initializes the custom http headers to add to each request.
-//
-// It won't send any version information if the version number is empty. It is
-// highly recommended that you set a version or your client may break if the
-// server is upgraded.
-func NewClientWithOpts(ops ...Opt) (*Client, error) {
- client, err := defaultHTTPClient(DefaultDockerHost)
- if err != nil {
- return nil, err
- }
- c := &Client{
- host: DefaultDockerHost,
- version: api.DefaultVersion,
- client: client,
- proto: defaultProto,
- addr: defaultAddr,
- }
-
- for _, op := range ops {
- if err := op(c); err != nil {
- return nil, err
- }
- }
-
- if _, ok := c.client.Transport.(http.RoundTripper); !ok {
- return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport)
- }
- if c.scheme == "" {
- c.scheme = "http"
-
- tlsConfig := resolveTLSConfig(c.client.Transport)
- if tlsConfig != nil {
- // TODO(stevvooe): This isn't really the right way to write clients in Go.
- // `NewClient` should probably only take an `*http.Client` and work from there.
- // Unfortunately, the model of having a host-ish/url-thingy as the connection
- // string has us confusing protocol and transport layers. We continue doing
- // this to avoid breaking existing clients but this should be addressed.
- c.scheme = "https"
- }
- }
-
- return c, nil
-}
-
-func defaultHTTPClient(host string) (*http.Client, error) {
- url, err := ParseHostURL(host)
- if err != nil {
- return nil, err
- }
- transport := new(http.Transport)
- sockets.ConfigureTransport(transport, url.Scheme, url.Host)
- return &http.Client{
- Transport: transport,
- CheckRedirect: CheckRedirect,
- }, nil
-}
-
-// Close the transport used by the client
-func (cli *Client) Close() error {
- if t, ok := cli.client.Transport.(*http.Transport); ok {
- t.CloseIdleConnections()
- }
- return nil
-}
-
-// getAPIPath returns the versioned request path to call the api.
-// It appends the query parameters to the path if they are not empty.
-func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string {
- var apiPath string
- if cli.negotiateVersion && !cli.negotiated {
- cli.NegotiateAPIVersion(ctx)
- }
- if cli.version != "" {
- v := strings.TrimPrefix(cli.version, "v")
- apiPath = path.Join(cli.basePath, "/v"+v, p)
- } else {
- apiPath = path.Join(cli.basePath, p)
- }
- return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
-}
-
-// ClientVersion returns the API version used by this client.
-func (cli *Client) ClientVersion() string {
- return cli.version
-}
-
-// NegotiateAPIVersion queries the API and updates the version to match the
-// API version. Any errors are silently ignored. If a manual override is in place,
-// either through the `DOCKER_API_VERSION` environment variable, or if the client
-// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation
-// will be performed.
-func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
- if !cli.manualOverride {
- ping, _ := cli.Ping(ctx)
- cli.negotiateAPIVersionPing(ping)
- }
-}
-
-// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
-// if the ping version is less than the default version. If a manual override is
-// in place, either through the `DOCKER_API_VERSION` environment variable, or if
-// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no
-// negotiation is performed.
-func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
- if !cli.manualOverride {
- cli.negotiateAPIVersionPing(p)
- }
-}
-
-// negotiateAPIVersionPing queries the API and updates the version to match the
-// API version. Any errors are silently ignored.
-func (cli *Client) negotiateAPIVersionPing(p types.Ping) {
- // try the latest version before versioning headers existed
- if p.APIVersion == "" {
- p.APIVersion = "1.24"
- }
-
- // if the client is not initialized with a version, start with the latest supported version
- if cli.version == "" {
- cli.version = api.DefaultVersion
- }
-
- // if server version is lower than the client version, downgrade
- if versions.LessThan(p.APIVersion, cli.version) {
- cli.version = p.APIVersion
- }
-
- // Store the results, so that automatic API version negotiation (if enabled)
- // won't be performed on the next request.
- if cli.negotiateVersion {
- cli.negotiated = true
- }
-}
-
-// DaemonHost returns the host address used by the client
-func (cli *Client) DaemonHost() string {
- return cli.host
-}
-
-// HTTPClient returns a copy of the HTTP client bound to the server
-func (cli *Client) HTTPClient() *http.Client {
- c := *cli.client
- return &c
-}
-
-// ParseHostURL parses a url string, validates the string is a host url, and
-// returns the parsed URL
-func ParseHostURL(host string) (*url.URL, error) {
- protoAddrParts := strings.SplitN(host, "://", 2)
- if len(protoAddrParts) == 1 {
- return nil, fmt.Errorf("unable to parse docker host `%s`", host)
- }
-
- var basePath string
- proto, addr := protoAddrParts[0], protoAddrParts[1]
- if proto == "tcp" {
- parsed, err := url.Parse("tcp://" + addr)
- if err != nil {
- return nil, err
- }
- addr = parsed.Host
- basePath = parsed.Path
- }
- return &url.URL{
- Scheme: proto,
- Host: addr,
- Path: basePath,
- }, nil
-}
-
-// CustomHTTPHeaders returns the custom http headers stored by the client.
-func (cli *Client) CustomHTTPHeaders() map[string]string {
- m := make(map[string]string)
- for k, v := range cli.customHTTPHeaders {
- m[k] = v
- }
- return m
-}
-
-// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
-// Deprecated: use WithHTTPHeaders when creating the client.
-func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
- cli.customHTTPHeaders = headers
-}
-
-// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection.
-// Used by `docker dial-stdio` (docker/cli#889).
-func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
- return func(ctx context.Context) (net.Conn, error) {
- if transport, ok := cli.client.Transport.(*http.Transport); ok {
- if transport.DialContext != nil && transport.TLSClientConfig == nil {
- return transport.DialContext(ctx, cli.proto, cli.addr)
- }
- }
- return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
- }
-}
diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go
deleted file mode 100644
index 54cdfc2..0000000
--- a/vendor/github.com/docker/docker/client/client_deprecated.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client
-
-import "net/http"
-
-// NewClient initializes a new API client for the given host and API version.
-// It uses the given http client as transport.
-// It also initializes the custom http headers to add to each request.
-//
-// It won't send any version information if the version number is empty. It is
-// highly recommended that you set a version or your client may break if the
-// server is upgraded.
-// Deprecated: use NewClientWithOpts
-func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
- return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders))
-}
-
-// NewEnvClient initializes a new API client based on environment variables.
-// See FromEnv for a list of support environment variables.
-//
-// Deprecated: use NewClientWithOpts(FromEnv)
-func NewEnvClient() (*Client, error) {
- return NewClientWithOpts(FromEnv)
-}
diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go
deleted file mode 100644
index 9d0f0dc..0000000
--- a/vendor/github.com/docker/docker/client/client_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly
-
-package client // import "github.com/docker/docker/client"
-
-// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
-const DefaultDockerHost = "unix:///var/run/docker.sock"
-
-const defaultProto = "unix"
-const defaultAddr = "/var/run/docker.sock"
diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go
deleted file mode 100644
index c649e54..0000000
--- a/vendor/github.com/docker/docker/client/client_windows.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
-const DefaultDockerHost = "npipe:////./pipe/docker_engine"
-
-const defaultProto = "npipe"
-const defaultAddr = "//./pipe/docker_engine"
diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go
deleted file mode 100644
index ee7d411..0000000
--- a/vendor/github.com/docker/docker/client/config_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigCreate creates a new Config.
-func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
- var response types.ConfigCreateResponse
- if err := cli.NewVersionError("1.30", "config create"); err != nil {
- return response, err
- }
- resp, err := cli.post(ctx, "/configs/create", nil, config, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go
deleted file mode 100644
index 7d0ce3e..0000000
--- a/vendor/github.com/docker/docker/client/config_inspect.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigInspectWithRaw returns the config information with raw data
-func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) {
- if id == "" {
- return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id}
- }
- if err := cli.NewVersionError("1.30", "config inspect"); err != nil {
- return swarm.Config{}, nil, err
- }
- resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return swarm.Config{}, nil, err
- }
-
- var config swarm.Config
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&config)
-
- return config, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go
deleted file mode 100644
index 565acc6..0000000
--- a/vendor/github.com/docker/docker/client/config_list.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigList returns the list of configs.
-func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
- if err := cli.NewVersionError("1.30", "config list"); err != nil {
- return nil, err
- }
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/configs", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var configs []swarm.Config
- err = json.NewDecoder(resp.body).Decode(&configs)
- return configs, err
-}
diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go
deleted file mode 100644
index a708fca..0000000
--- a/vendor/github.com/docker/docker/client/config_remove.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ConfigRemove removes a Config.
-func (cli *Client) ConfigRemove(ctx context.Context, id string) error {
- if err := cli.NewVersionError("1.30", "config remove"); err != nil {
- return err
- }
- resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "config", id)
-}
diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go
deleted file mode 100644
index 39e59cf..0000000
--- a/vendor/github.com/docker/docker/client/config_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ConfigUpdate attempts to update a Config
-func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error {
- if err := cli.NewVersionError("1.30", "config update"); err != nil {
- return err
- }
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go
deleted file mode 100644
index 88ba1ef..0000000
--- a/vendor/github.com/docker/docker/client/container_attach.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerAttach attaches a connection to a container in the server.
-// It returns a types.HijackedConnection with the hijacked connection
-// and the a reader to get output. It's up to the called to close
-// the hijacked connection by calling types.HijackedResponse.Close.
-//
-// The stream format on the response will be in one of two formats:
-//
-// If the container is using a TTY, there is only a single stream (stdout), and
-// data is copied directly from the container output stream, no extra
-// multiplexing or headers.
-//
-// If the container is *not* using a TTY, streams for stdout and stderr are
-// multiplexed.
-// The format of the multiplexed stream is as follows:
-//
-// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
-//
-// STREAM_TYPE can be 1 for stdout and 2 for stderr
-//
-// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
-// This is the size of OUTPUT.
-//
-// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
-// stream.
-func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) {
- query := url.Values{}
- if options.Stream {
- query.Set("stream", "1")
- }
- if options.Stdin {
- query.Set("stdin", "1")
- }
- if options.Stdout {
- query.Set("stdout", "1")
- }
- if options.Stderr {
- query.Set("stderr", "1")
- }
- if options.DetachKeys != "" {
- query.Set("detachKeys", options.DetachKeys)
- }
- if options.Logs {
- query.Set("logs", "1")
- }
-
- headers := map[string][]string{"Content-Type": {"text/plain"}}
- return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go
deleted file mode 100644
index 2966e88..0000000
--- a/vendor/github.com/docker/docker/client/container_commit.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "errors"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
-)
-
-// ContainerCommit applies changes into a container and creates a new tagged image.
-func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) {
- var repository, tag string
- if options.Reference != "" {
- ref, err := reference.ParseNormalizedNamed(options.Reference)
- if err != nil {
- return types.IDResponse{}, err
- }
-
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
- return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference")
- }
- ref = reference.TagNameOnly(ref)
-
- if tagged, ok := ref.(reference.Tagged); ok {
- tag = tagged.Tag()
- }
- repository = reference.FamiliarName(ref)
- }
-
- query := url.Values{}
- query.Set("container", container)
- query.Set("repo", repository)
- query.Set("tag", tag)
- query.Set("comment", options.Comment)
- query.Set("author", options.Author)
- for _, change := range options.Changes {
- query.Add("changes", change)
- }
- if !options.Pause {
- query.Set("pause", "0")
- }
-
- var response types.IDResponse
- resp, err := cli.post(ctx, "/commit", query, options.Config, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go
deleted file mode 100644
index bb278bf..0000000
--- a/vendor/github.com/docker/docker/client/container_copy.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "path/filepath"
- "strings"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerStatPath returns Stat information about a path inside the container filesystem.
-func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) {
- query := url.Values{}
- query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
-
- urlStr := "/containers/" + containerID + "/archive"
- response, err := cli.head(ctx, urlStr, query, nil)
- defer ensureReaderClosed(response)
- if err != nil {
- return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path)
- }
- return getContainerPathStatFromHeader(response.header)
-}
-
-// CopyToContainer copies content into the container filesystem.
-// Note that `content` must be a Reader for a TAR archive
-func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error {
- query := url.Values{}
- query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API.
- // Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
- if !options.AllowOverwriteDirWithFile {
- query.Set("noOverwriteDirNonDir", "true")
- }
-
- if options.CopyUIDGID {
- query.Set("copyUIDGID", "true")
- }
-
- apiPath := "/containers/" + containerID + "/archive"
-
- response, err := cli.putRaw(ctx, apiPath, query, content, nil)
- defer ensureReaderClosed(response)
- if err != nil {
- return wrapResponseError(err, response, "container:path", containerID+":"+dstPath)
- }
-
- // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior
- if response.statusCode != http.StatusOK {
- return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
- }
-
- return nil
-}
-
-// CopyFromContainer gets the content from the container and returns it as a Reader
-// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader.
-func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) {
- query := make(url.Values, 1)
- query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
-
- apiPath := "/containers/" + containerID + "/archive"
- response, err := cli.get(ctx, apiPath, query, nil)
- if err != nil {
- return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath)
- }
-
- // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior
- if response.statusCode != http.StatusOK {
- return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
- }
-
- // In order to get the copy behavior right, we need to know information
- // about both the source and the destination. The response headers include
- // stat info about the source that we can use in deciding exactly how to
- // copy it locally. Along with the stat info about the local destination,
- // we have everything we need to handle the multiple possibilities there
- // can be when copying a file/dir from one location to another file/dir.
- stat, err := getContainerPathStatFromHeader(response.header)
- if err != nil {
- return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err)
- }
- return response.body, stat, err
-}
-
-func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) {
- var stat types.ContainerPathStat
-
- encodedStat := header.Get("X-Docker-Container-Path-Stat")
- statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat))
-
- err := json.NewDecoder(statDecoder).Decode(&stat)
- if err != nil {
- err = fmt.Errorf("unable to decode container path stat header: %s", err)
- }
-
- return stat, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go
deleted file mode 100644
index b1d5fea..0000000
--- a/vendor/github.com/docker/docker/client/container_create.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/containerd/containerd/platforms"
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/api/types/versions"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-type configWrapper struct {
- *container.Config
- HostConfig *container.HostConfig
- NetworkingConfig *network.NetworkingConfig
- Platform *specs.Platform
-}
-
-// ContainerCreate creates a new container based in the given configuration.
-// It can be associated with a name, but it's not mandatory.
-func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) {
- var response container.ContainerCreateCreatedBody
-
- if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
- return response, err
- }
-
- // When using API 1.24 and under, the client is responsible for removing the container
- if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") {
- hostConfig.AutoRemove = false
- }
-
- if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
- return response, err
- }
-
- query := url.Values{}
- if platform != nil {
- query.Set("platform", platforms.Format(*platform))
- }
-
- if containerName != "" {
- query.Set("name", containerName)
- }
-
- body := configWrapper{
- Config: config,
- HostConfig: hostConfig,
- NetworkingConfig: networkingConfig,
- }
-
- serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go
deleted file mode 100644
index 29dac84..0000000
--- a/vendor/github.com/docker/docker/client/container_diff.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/container"
-)
-
-// ContainerDiff shows differences in a container filesystem since it was started.
-func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) {
- var changes []container.ContainerChangeResponseItem
-
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return changes, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&changes)
- return changes, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go
deleted file mode 100644
index e3ee755..0000000
--- a/vendor/github.com/docker/docker/client/container_exec.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerExecCreate creates a new exec configuration to run an exec process.
-func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) {
- var response types.IDResponse
-
- if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil {
- return response, err
- }
-
- resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
-
-// ContainerExecStart starts an exec process already created in the docker host.
-func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error {
- resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil)
- ensureReaderClosed(resp)
- return err
-}
-
-// ContainerExecAttach attaches a connection to an exec process in the server.
-// It returns a types.HijackedConnection with the hijacked connection
-// and the a reader to get output. It's up to the called to close
-// the hijacked connection by calling types.HijackedResponse.Close.
-func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
- headers := map[string][]string{"Content-Type": {"application/json"}}
- return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
-}
-
-// ContainerExecInspect returns information about a specific exec process on the docker host.
-func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) {
- var response types.ContainerExecInspect
- resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- ensureReaderClosed(resp)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go
deleted file mode 100644
index d0c0a5c..0000000
--- a/vendor/github.com/docker/docker/client/container_export.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-)
-
-// ContainerExport retrieves the raw contents of a container
-// and returns them as an io.ReadCloser. It's up to the caller
-// to close the stream.
-func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) {
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil)
- if err != nil {
- return nil, err
- }
-
- return serverResp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go
deleted file mode 100644
index c496bcf..0000000
--- a/vendor/github.com/docker/docker/client/container_inspect.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerInspect returns the container information.
-func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
- if containerID == "" {
- return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID}
- }
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
- }
-
- var response types.ContainerJSON
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
-
-// ContainerInspectWithRaw returns the container information and its raw representation.
-func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
- if containerID == "" {
- return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID}
- }
- query := url.Values{}
- if getSize {
- query.Set("size", "1")
- }
- serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return types.ContainerJSON{}, nil, err
- }
-
- var response types.ContainerJSON
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go
deleted file mode 100644
index 4d6f1d2..0000000
--- a/vendor/github.com/docker/docker/client/container_kill.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// ContainerKill terminates the container process but does not remove the container from the docker host.
-func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error {
- query := url.Values{}
- query.Set("signal", signal)
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go
deleted file mode 100644
index a973de5..0000000
--- a/vendor/github.com/docker/docker/client/container_list.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// ContainerList returns the list of containers in the docker host.
-func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) {
- query := url.Values{}
-
- if options.All {
- query.Set("all", "1")
- }
-
- if options.Limit != -1 {
- query.Set("limit", strconv.Itoa(options.Limit))
- }
-
- if options.Since != "" {
- query.Set("since", options.Since)
- }
-
- if options.Before != "" {
- query.Set("before", options.Before)
- }
-
- if options.Size {
- query.Set("size", "1")
- }
-
- if options.Filters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
-
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/containers/json", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var containers []types.Container
- err = json.NewDecoder(resp.body).Decode(&containers)
- return containers, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go
deleted file mode 100644
index 5b6541f..0000000
--- a/vendor/github.com/docker/docker/client/container_logs.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- timetypes "github.com/docker/docker/api/types/time"
- "github.com/pkg/errors"
-)
-
-// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
-// It's up to the caller to close the stream.
-//
-// The stream format on the response will be in one of two formats:
-//
-// If the container is using a TTY, there is only a single stream (stdout), and
-// data is copied directly from the container output stream, no extra
-// multiplexing or headers.
-//
-// If the container is *not* using a TTY, streams for stdout and stderr are
-// multiplexed.
-// The format of the multiplexed stream is as follows:
-//
-// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
-//
-// STREAM_TYPE can be 1 for stdout and 2 for stderr
-//
-// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian.
-// This is the size of OUTPUT.
-//
-// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this
-// stream.
-func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, errors.Wrap(err, `invalid value for "since"`)
- }
- query.Set("since", ts)
- }
-
- if options.Until != "" {
- ts, err := timetypes.GetTimestamp(options.Until, time.Now())
- if err != nil {
- return nil, errors.Wrap(err, `invalid value for "until"`)
- }
- query.Set("until", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil)
- if err != nil {
- return nil, wrapResponseError(err, resp, "container", container)
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go
deleted file mode 100644
index 5e7271a..0000000
--- a/vendor/github.com/docker/docker/client/container_pause.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ContainerPause pauses the main process of a given container without terminating it.
-func (cli *Client) ContainerPause(ctx context.Context, containerID string) error {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go
deleted file mode 100644
index 04383de..0000000
--- a/vendor/github.com/docker/docker/client/container_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// ContainersPrune requests the daemon to delete unused data
-func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) {
- var report types.ContainersPruneReport
-
- if err := cli.NewVersionError("1.25", "container prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go
deleted file mode 100644
index df81461..0000000
--- a/vendor/github.com/docker/docker/client/container_remove.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerRemove kills and removes a container from the docker host.
-func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error {
- query := url.Values{}
- if options.RemoveVolumes {
- query.Set("v", "1")
- }
- if options.RemoveLinks {
- query.Set("link", "1")
- }
-
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "container", containerID)
-}
diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go
deleted file mode 100644
index 240fdf5..0000000
--- a/vendor/github.com/docker/docker/client/container_rename.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// ContainerRename changes the name of a given container.
-func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error {
- query := url.Values{}
- query.Set("name", newContainerName)
- resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go
deleted file mode 100644
index a9d4c0c..0000000
--- a/vendor/github.com/docker/docker/client/container_resize.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerResize changes the size of the tty for a container.
-func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error {
- return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width)
-}
-
-// ContainerExecResize changes the size of the tty for an exec process running inside a container.
-func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error {
- return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width)
-}
-
-func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error {
- query := url.Values{}
- query.Set("h", strconv.Itoa(int(height)))
- query.Set("w", strconv.Itoa(int(width)))
-
- resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go
deleted file mode 100644
index 41e4219..0000000
--- a/vendor/github.com/docker/docker/client/container_restart.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "time"
-
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// ContainerRestart stops and starts a container again.
-// It makes the daemon to wait for the container to be up again for
-// a specific amount of time, given the timeout.
-func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error {
- query := url.Values{}
- if timeout != nil {
- query.Set("t", timetypes.DurationToSecondsString(*timeout))
- }
- resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go
deleted file mode 100644
index c2e0b15..0000000
--- a/vendor/github.com/docker/docker/client/container_start.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerStart sends a request to the docker daemon to start a container.
-func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error {
- query := url.Values{}
- if len(options.CheckpointID) != 0 {
- query.Set("checkpoint", options.CheckpointID)
- }
- if len(options.CheckpointDir) != 0 {
- query.Set("checkpoint-dir", options.CheckpointDir)
- }
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go
deleted file mode 100644
index 0a6488d..0000000
--- a/vendor/github.com/docker/docker/client/container_stats.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ContainerStats returns near realtime stats for a given container.
-// It's up to the caller to close the io.ReadCloser returned.
-func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) {
- query := url.Values{}
- query.Set("stream", "0")
- if stream {
- query.Set("stream", "1")
- }
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
- if err != nil {
- return types.ContainerStats{}, err
- }
-
- osType := getDockerOS(resp.header.Get("Server"))
- return types.ContainerStats{Body: resp.body, OSType: osType}, err
-}
-
-// ContainerStatsOneShot gets a single stat entry from a container.
-// It differs from `ContainerStats` in that the API should not wait to prime the stats
-func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) {
- query := url.Values{}
- query.Set("stream", "0")
- query.Set("one-shot", "1")
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil)
- if err != nil {
- return types.ContainerStats{}, err
- }
-
- osType := getDockerOS(resp.header.Get("Server"))
- return types.ContainerStats{Body: resp.body, OSType: osType}, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go
deleted file mode 100644
index 629d7ab..0000000
--- a/vendor/github.com/docker/docker/client/container_stop.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "time"
-
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// ContainerStop stops a container. In case the container fails to stop
-// gracefully within a time frame specified by the timeout argument,
-// it is forcefully terminated (killed).
-//
-// If the timeout is nil, the container's StopTimeout value is used, if set,
-// otherwise the engine default. A negative timeout value can be specified,
-// meaning no timeout, i.e. no forceful termination is performed.
-func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error {
- query := url.Values{}
- if timeout != nil {
- query.Set("t", timetypes.DurationToSecondsString(*timeout))
- }
- resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go
deleted file mode 100644
index a5b7899..0000000
--- a/vendor/github.com/docker/docker/client/container_top.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strings"
-
- "github.com/docker/docker/api/types/container"
-)
-
-// ContainerTop shows process information from within a container.
-func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) {
- var response container.ContainerTopOKBody
- query := url.Values{}
- if len(arguments) > 0 {
- query.Set("ps_args", strings.Join(arguments, " "))
- }
-
- resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go
deleted file mode 100644
index 1d8f873..0000000
--- a/vendor/github.com/docker/docker/client/container_unpause.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ContainerUnpause resumes the process execution within a container
-func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go
deleted file mode 100644
index 6917cf9..0000000
--- a/vendor/github.com/docker/docker/client/container_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types/container"
-)
-
-// ContainerUpdate updates resources of a container
-func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) {
- var response container.ContainerUpdateOKBody
- serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go
deleted file mode 100644
index 6ab8c1d..0000000
--- a/vendor/github.com/docker/docker/client/container_wait.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/versions"
-)
-
-// ContainerWait waits until the specified container is in a certain state
-// indicated by the given condition, either "not-running" (default),
-// "next-exit", or "removed".
-//
-// If this client's API version is before 1.30, condition is ignored and
-// ContainerWait will return immediately with the two channels, as the server
-// will wait as if the condition were "not-running".
-//
-// If this client's API version is at least 1.30, ContainerWait blocks until
-// the request has been acknowledged by the server (with a response header),
-// then returns two channels on which the caller can wait for the exit status
-// of the container or an error if there was a problem either beginning the
-// wait request or in getting the response. This allows the caller to
-// synchronize ContainerWait with other calls, such as specifying a
-// "next-exit" condition before issuing a ContainerStart request.
-func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) {
- if versions.LessThan(cli.ClientVersion(), "1.30") {
- return cli.legacyContainerWait(ctx, containerID)
- }
-
- resultC := make(chan container.ContainerWaitOKBody)
- errC := make(chan error, 1)
-
- query := url.Values{}
- query.Set("condition", string(condition))
-
- resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
- if err != nil {
- defer ensureReaderClosed(resp)
- errC <- err
- return resultC, errC
- }
-
- go func() {
- defer ensureReaderClosed(resp)
- var res container.ContainerWaitOKBody
- if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
- errC <- err
- return
- }
-
- resultC <- res
- }()
-
- return resultC, errC
-}
-
-// legacyContainerWait returns immediately and doesn't have an option to wait
-// until the container is removed.
-func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) {
- resultC := make(chan container.ContainerWaitOKBody)
- errC := make(chan error)
-
- go func() {
- resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil)
- if err != nil {
- errC <- err
- return
- }
- defer ensureReaderClosed(resp)
-
- var res container.ContainerWaitOKBody
- if err := json.NewDecoder(resp.body).Decode(&res); err != nil {
- errC <- err
- return
- }
-
- resultC <- res
- }()
-
- return resultC, errC
-}
diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go
deleted file mode 100644
index 354cd36..0000000
--- a/vendor/github.com/docker/docker/client/disk_usage.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
-)
-
-// DiskUsage requests the current data usage from the daemon
-func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) {
- var du types.DiskUsage
-
- serverResp, err := cli.get(ctx, "/system/df", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return du, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil {
- return du, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return du, nil
-}
diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go
deleted file mode 100644
index f4e3794..0000000
--- a/vendor/github.com/docker/docker/client/distribution_inspect.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- registrytypes "github.com/docker/docker/api/types/registry"
-)
-
-// DistributionInspect returns the image digest with full Manifest
-func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) {
- // Contact the registry to retrieve digest and platform information
- var distributionInspect registrytypes.DistributionInspect
- if image == "" {
- return distributionInspect, objectNotFoundError{object: "distribution", id: image}
- }
-
- if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil {
- return distributionInspect, err
- }
- var headers map[string][]string
-
- if encodedRegistryAuth != "" {
- headers = map[string][]string{
- "X-Registry-Auth": {encodedRegistryAuth},
- }
- }
-
- resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
- defer ensureReaderClosed(resp)
- if err != nil {
- return distributionInspect, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&distributionInspect)
- return distributionInspect, err
-}
diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go
deleted file mode 100644
index 041bc8d..0000000
--- a/vendor/github.com/docker/docker/client/errors.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "fmt"
- "net/http"
-
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
- "github.com/pkg/errors"
-)
-
-// errConnectionFailed implements an error returned when connection failed.
-type errConnectionFailed struct {
- host string
-}
-
-// Error returns a string representation of an errConnectionFailed
-func (err errConnectionFailed) Error() string {
- if err.host == "" {
- return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?"
- }
- return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host)
-}
-
-// IsErrConnectionFailed returns true if the error is caused by connection failed.
-func IsErrConnectionFailed(err error) bool {
- return errors.As(err, &errConnectionFailed{})
-}
-
-// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
-func ErrorConnectionFailed(host string) error {
- return errConnectionFailed{host: host}
-}
-
-// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility
-type notFound interface {
- error
- NotFound() bool
-}
-
-// IsErrNotFound returns true if the error is a NotFound error, which is returned
-// by the API when some object is not found.
-func IsErrNotFound(err error) bool {
- var e notFound
- if errors.As(err, &e) {
- return true
- }
- return errdefs.IsNotFound(err)
-}
-
-type objectNotFoundError struct {
- object string
- id string
-}
-
-func (e objectNotFoundError) NotFound() {}
-
-func (e objectNotFoundError) Error() string {
- return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
-}
-
-func wrapResponseError(err error, resp serverResponse, object, id string) error {
- switch {
- case err == nil:
- return nil
- case resp.statusCode == http.StatusNotFound:
- return objectNotFoundError{object: object, id: id}
- case resp.statusCode == http.StatusNotImplemented:
- return errdefs.NotImplemented(err)
- default:
- return err
- }
-}
-
-// unauthorizedError represents an authorization error in a remote registry.
-type unauthorizedError struct {
- cause error
-}
-
-// Error returns a string representation of an unauthorizedError
-func (u unauthorizedError) Error() string {
- return u.cause.Error()
-}
-
-// IsErrUnauthorized returns true if the error is caused
-// when a remote registry authentication fails
-func IsErrUnauthorized(err error) bool {
- if _, ok := err.(unauthorizedError); ok {
- return ok
- }
- return errdefs.IsUnauthorized(err)
-}
-
-type pluginPermissionDenied struct {
- name string
-}
-
-func (e pluginPermissionDenied) Error() string {
- return "Permission denied while installing plugin " + e.name
-}
-
-// IsErrPluginPermissionDenied returns true if the error is caused
-// when a user denies a plugin's permissions
-func IsErrPluginPermissionDenied(err error) bool {
- _, ok := err.(pluginPermissionDenied)
- return ok
-}
-
-type notImplementedError struct {
- message string
-}
-
-func (e notImplementedError) Error() string {
- return e.message
-}
-
-func (e notImplementedError) NotImplemented() bool {
- return true
-}
-
-// IsErrNotImplemented returns true if the error is a NotImplemented error.
-// This is returned by the API when a requested feature has not been
-// implemented.
-func IsErrNotImplemented(err error) bool {
- if _, ok := err.(notImplementedError); ok {
- return ok
- }
- return errdefs.IsNotImplemented(err)
-}
-
-// NewVersionError returns an error if the APIVersion required
-// if less than the current supported version
-func (cli *Client) NewVersionError(APIrequired, feature string) error {
- if cli.version != "" && versions.LessThan(cli.version, APIrequired) {
- return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version)
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go
deleted file mode 100644
index f0dc9d9..0000000
--- a/vendor/github.com/docker/docker/client/events.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/events"
- "github.com/docker/docker/api/types/filters"
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// Events returns a stream of events in the daemon. It's up to the caller to close the stream
-// by cancelling the context. Once the stream has been completely read an io.EOF error will
-// be sent over the error channel. If an error is sent all processing will be stopped. It's up
-// to the caller to reopen the stream in the event of an error by reinvoking this method.
-func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
-
- messages := make(chan events.Message)
- errs := make(chan error, 1)
-
- started := make(chan struct{})
- go func() {
- defer close(errs)
-
- query, err := buildEventsQueryParams(cli.version, options)
- if err != nil {
- close(started)
- errs <- err
- return
- }
-
- resp, err := cli.get(ctx, "/events", query, nil)
- if err != nil {
- close(started)
- errs <- err
- return
- }
- defer resp.body.Close()
-
- decoder := json.NewDecoder(resp.body)
-
- close(started)
- for {
- select {
- case <-ctx.Done():
- errs <- ctx.Err()
- return
- default:
- var event events.Message
- if err := decoder.Decode(&event); err != nil {
- errs <- err
- return
- }
-
- select {
- case messages <- event:
- case <-ctx.Done():
- errs <- ctx.Err()
- return
- }
- }
- }
- }()
- <-started
-
- return messages, errs
-}
-
-func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) {
- query := url.Values{}
- ref := time.Now()
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, ref)
- if err != nil {
- return nil, err
- }
- query.Set("since", ts)
- }
-
- if options.Until != "" {
- ts, err := timetypes.GetTimestamp(options.Until, ref)
- if err != nil {
- return nil, err
- }
- query.Set("until", ts)
- }
-
- if options.Filters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters)
- if err != nil {
- return nil, err
- }
- query.Set("filters", filterJSON)
- }
-
- return query, nil
-}
diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go
deleted file mode 100644
index e1dc49e..0000000
--- a/vendor/github.com/docker/docker/client/hijack.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bufio"
- "context"
- "crypto/tls"
- "fmt"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/go-connections/sockets"
- "github.com/pkg/errors"
-)
-
-// postHijacked sends a POST request and hijacks the connection.
-func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) {
- bodyEncoded, err := encodeData(body)
- if err != nil {
- return types.HijackedResponse{}, err
- }
-
- apiPath := cli.getAPIPath(ctx, path, query)
- req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded)
- if err != nil {
- return types.HijackedResponse{}, err
- }
- req = cli.addHeaders(req, headers)
-
- conn, err := cli.setupHijackConn(ctx, req, "tcp")
- if err != nil {
- return types.HijackedResponse{}, err
- }
-
- return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
-}
-
-// DialHijack returns a hijacked connection with negotiated protocol proto.
-func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) {
- req, err := http.NewRequest(http.MethodPost, url, nil)
- if err != nil {
- return nil, err
- }
- req = cli.addHeaders(req, meta)
-
- return cli.setupHijackConn(ctx, req, proto)
-}
-
-// fallbackDial is used when WithDialer() was not called.
-// See cli.Dialer().
-func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
- if tlsConfig != nil && proto != "unix" && proto != "npipe" {
- return tls.Dial(proto, addr, tlsConfig)
- }
- if proto == "npipe" {
- return sockets.DialPipe(addr, 32*time.Second)
- }
- return net.Dial(proto, addr)
-}
-
-func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) {
- req.Host = cli.addr
- req.Header.Set("Connection", "Upgrade")
- req.Header.Set("Upgrade", proto)
-
- dialer := cli.Dialer()
- conn, err := dialer(ctx)
- if err != nil {
- return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
- }
-
- // When we set up a TCP connection for hijack, there could be long periods
- // of inactivity (a long running command with no output) that in certain
- // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
- // state. Setting TCP KeepAlive on the socket connection will prohibit
- // ECONNTIMEOUT unless the socket connection truly is broken
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- tcpConn.SetKeepAlive(true)
- tcpConn.SetKeepAlivePeriod(30 * time.Second)
- }
-
- clientconn := httputil.NewClientConn(conn, nil)
- defer clientconn.Close()
-
- // Server hijacks the connection, error 'connection closed' expected
- resp, err := clientconn.Do(req)
-
- //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons
- if err != httputil.ErrPersistEOF {
- if err != nil {
- return nil, err
- }
- if resp.StatusCode != http.StatusSwitchingProtocols {
- resp.Body.Close()
- return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
- }
- }
-
- c, br := clientconn.Hijack()
- if br.Buffered() > 0 {
- // If there is buffered content, wrap the connection. We return an
- // object that implements CloseWrite iff the underlying connection
- // implements it.
- if _, ok := c.(types.CloseWriter); ok {
- c = &hijackedConnCloseWriter{&hijackedConn{c, br}}
- } else {
- c = &hijackedConn{c, br}
- }
- } else {
- br.Reset(nil)
- }
-
- return c, nil
-}
-
-// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case
-// that a) there was already buffered data in the http layer when Hijack() was
-// called, and b) the underlying net.Conn does *not* implement CloseWrite().
-// hijackedConn does not implement CloseWrite() either.
-type hijackedConn struct {
- net.Conn
- r *bufio.Reader
-}
-
-func (c *hijackedConn) Read(b []byte) (int, error) {
- return c.r.Read(b)
-}
-
-// hijackedConnCloseWriter is a hijackedConn which additionally implements
-// CloseWrite(). It is returned by setupHijackConn in the case that a) there
-// was already buffered data in the http layer when Hijack() was called, and b)
-// the underlying net.Conn *does* implement CloseWrite().
-type hijackedConnCloseWriter struct {
- *hijackedConn
-}
-
-var _ types.CloseWriter = &hijackedConnCloseWriter{}
-
-func (c *hijackedConnCloseWriter) CloseWrite() error {
- conn := c.Conn.(types.CloseWriter)
- return conn.CloseWrite()
-}
diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go
deleted file mode 100644
index 8fcf995..0000000
--- a/vendor/github.com/docker/docker/client/image_build.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/container"
-)
-
-// ImageBuild sends request to the daemon to build images.
-// The Body in the response implement an io.ReadCloser and it's up to the caller to
-// close it.
-func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) {
- query, err := cli.imageBuildOptionsToQuery(options)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
-
- headers := http.Header(make(map[string][]string))
- buf, err := json.Marshal(options.AuthConfigs)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
- headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
-
- headers.Set("Content-Type", "application/x-tar")
-
- serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
- if err != nil {
- return types.ImageBuildResponse{}, err
- }
-
- osType := getDockerOS(serverResp.header.Get("Server"))
-
- return types.ImageBuildResponse{
- Body: serverResp.body,
- OSType: osType,
- }, nil
-}
-
-func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) {
- query := url.Values{
- "t": options.Tags,
- "securityopt": options.SecurityOpt,
- "extrahosts": options.ExtraHosts,
- }
- if options.SuppressOutput {
- query.Set("q", "1")
- }
- if options.RemoteContext != "" {
- query.Set("remote", options.RemoteContext)
- }
- if options.NoCache {
- query.Set("nocache", "1")
- }
- if options.Remove {
- query.Set("rm", "1")
- } else {
- query.Set("rm", "0")
- }
-
- if options.ForceRemove {
- query.Set("forcerm", "1")
- }
-
- if options.PullParent {
- query.Set("pull", "1")
- }
-
- if options.Squash {
- if err := cli.NewVersionError("1.25", "squash"); err != nil {
- return query, err
- }
- query.Set("squash", "1")
- }
-
- if !container.Isolation.IsDefault(options.Isolation) {
- query.Set("isolation", string(options.Isolation))
- }
-
- query.Set("cpusetcpus", options.CPUSetCPUs)
- query.Set("networkmode", options.NetworkMode)
- query.Set("cpusetmems", options.CPUSetMems)
- query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10))
- query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10))
- query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10))
- query.Set("memory", strconv.FormatInt(options.Memory, 10))
- query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10))
- query.Set("cgroupparent", options.CgroupParent)
- query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10))
- query.Set("dockerfile", options.Dockerfile)
- query.Set("target", options.Target)
-
- ulimitsJSON, err := json.Marshal(options.Ulimits)
- if err != nil {
- return query, err
- }
- query.Set("ulimits", string(ulimitsJSON))
-
- buildArgsJSON, err := json.Marshal(options.BuildArgs)
- if err != nil {
- return query, err
- }
- query.Set("buildargs", string(buildArgsJSON))
-
- labelsJSON, err := json.Marshal(options.Labels)
- if err != nil {
- return query, err
- }
- query.Set("labels", string(labelsJSON))
-
- cacheFromJSON, err := json.Marshal(options.CacheFrom)
- if err != nil {
- return query, err
- }
- query.Set("cachefrom", string(cacheFromJSON))
- if options.SessionID != "" {
- query.Set("session", options.SessionID)
- }
- if options.Platform != "" {
- if err := cli.NewVersionError("1.32", "platform"); err != nil {
- return query, err
- }
- query.Set("platform", strings.ToLower(options.Platform))
- }
- if options.BuildID != "" {
- query.Set("buildid", options.BuildID)
- }
- query.Set("version", string(options.Version))
-
- if options.Outputs != nil {
- outputsJSON, err := json.Marshal(options.Outputs)
- if err != nil {
- return query, err
- }
- query.Set("outputs", string(outputsJSON))
- }
- return query, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go
deleted file mode 100644
index 2393804..0000000
--- a/vendor/github.com/docker/docker/client/image_create.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
-)
-
-// ImageCreate creates a new image based in the parent options.
-// It returns the JSON content in the response body.
-func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
- ref, err := reference.ParseNormalizedNamed(parentReference)
- if err != nil {
- return nil, err
- }
-
- query := url.Values{}
- query.Set("fromImage", reference.FamiliarName(ref))
- query.Set("tag", getAPITagFromNamedRef(ref))
- if options.Platform != "" {
- query.Set("platform", strings.ToLower(options.Platform))
- }
- resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/images/create", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go
deleted file mode 100644
index b5bea10..0000000
--- a/vendor/github.com/docker/docker/client/image_history.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/image"
-)
-
-// ImageHistory returns the changes in an image in history format.
-func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) {
- var history []image.HistoryResponseItem
- serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return history, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&history)
- return history, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go
deleted file mode 100644
index d3336d4..0000000
--- a/vendor/github.com/docker/docker/client/image_import.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
-)
-
-// ImageImport creates a new image based in the source options.
-// It returns the JSON content in the response body.
-func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
- if ref != "" {
- // Check if the given image name can be resolved
- if _, err := reference.ParseNormalizedNamed(ref); err != nil {
- return nil, err
- }
- }
-
- query := url.Values{}
- query.Set("fromSrc", source.SourceName)
- query.Set("repo", ref)
- query.Set("tag", options.Tag)
- query.Set("message", options.Message)
- if options.Platform != "" {
- query.Set("platform", strings.ToLower(options.Platform))
- }
- for _, change := range options.Changes {
- query.Add("changes", change)
- }
-
- resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go
deleted file mode 100644
index 1eb8dce..0000000
--- a/vendor/github.com/docker/docker/client/image_inspect.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types"
-)
-
-// ImageInspectWithRaw returns the image information and its raw representation.
-func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
- if imageID == "" {
- return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID}
- }
- serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return types.ImageInspect{}, nil, err
- }
-
- var response types.ImageInspect
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go
deleted file mode 100644
index a4d7505..0000000
--- a/vendor/github.com/docker/docker/client/image_list.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/versions"
-)
-
-// ImageList returns a list of images in the docker host.
-func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) {
- var images []types.ImageSummary
- query := url.Values{}
-
- optionFilters := options.Filters
- referenceFilters := optionFilters.Get("reference")
- if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 {
- query.Set("filter", referenceFilters[0])
- for _, filterValue := range referenceFilters {
- optionFilters.Del("reference", filterValue)
- }
- }
- if optionFilters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters)
- if err != nil {
- return images, err
- }
- query.Set("filters", filterJSON)
- }
- if options.All {
- query.Set("all", "1")
- }
-
- serverResp, err := cli.get(ctx, "/images/json", query, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return images, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&images)
- return images, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go
deleted file mode 100644
index 91016e4..0000000
--- a/vendor/github.com/docker/docker/client/image_load.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ImageLoad loads an image in the docker host from the client host.
-// It's up to the caller to close the io.ReadCloser in the
-// ImageLoadResponse returned by this function.
-func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) {
- v := url.Values{}
- v.Set("quiet", "0")
- if quiet {
- v.Set("quiet", "1")
- }
- headers := map[string][]string{"Content-Type": {"application/x-tar"}}
- resp, err := cli.postRaw(ctx, "/images/load", v, input, headers)
- if err != nil {
- return types.ImageLoadResponse{}, err
- }
- return types.ImageLoadResponse{
- Body: resp.body,
- JSON: resp.header.Get("Content-Type") == "application/json",
- }, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go
deleted file mode 100644
index 56af6d7..0000000
--- a/vendor/github.com/docker/docker/client/image_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// ImagesPrune requests the daemon to delete unused data
-func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) {
- var report types.ImagesPruneReport
-
- if err := cli.NewVersionError("1.25", "image prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving disk usage: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go
deleted file mode 100644
index a239755..0000000
--- a/vendor/github.com/docker/docker/client/image_pull.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
-)
-
-// ImagePull requests the docker host to pull an image from a remote registry.
-// It executes the privileged function if the operation is unauthorized
-// and it tries one more time.
-// It's up to the caller to handle the io.ReadCloser and close it properly.
-//
-// FIXME(vdemeester): there is currently used in a few way in docker/docker
-// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
-// - if in trusted content, ref is used to pass the reference name, and tag for the digest
-func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
- ref, err := reference.ParseNormalizedNamed(refStr)
- if err != nil {
- return nil, err
- }
-
- query := url.Values{}
- query.Set("fromImage", reference.FamiliarName(ref))
- if !options.All {
- query.Set("tag", getAPITagFromNamedRef(ref))
- }
- if options.Platform != "" {
- query.Set("platform", strings.ToLower(options.Platform))
- }
-
- resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return nil, privilegeErr
- }
- resp, err = cli.tryImageCreate(ctx, query, newAuthHeader)
- }
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-// getAPITagFromNamedRef returns a tag from the specified reference.
-// This function is necessary as long as the docker "server" api expects
-// digests to be sent as tags and makes a distinction between the name
-// and tag/digest part of a reference.
-func getAPITagFromNamedRef(ref reference.Named) string {
- if digested, ok := ref.(reference.Digested); ok {
- return digested.Digest().String()
- }
- ref = reference.TagNameOnly(ref)
- if tagged, ok := ref.(reference.Tagged); ok {
- return tagged.Tag()
- }
- return ""
-}
diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go
deleted file mode 100644
index 845580d..0000000
--- a/vendor/github.com/docker/docker/client/image_push.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "errors"
- "io"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
-)
-
-// ImagePush requests the docker host to push an image to a remote registry.
-// It executes the privileged function if the operation is unauthorized
-// and it tries one more time.
-// It's up to the caller to handle the io.ReadCloser and close it properly.
-func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
- ref, err := reference.ParseNormalizedNamed(image)
- if err != nil {
- return nil, err
- }
-
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
- return nil, errors.New("cannot push a digest reference")
- }
-
- name := reference.FamiliarName(ref)
- query := url.Values{}
- if !options.All {
- ref = reference.TagNameOnly(ref)
- if tagged, ok := ref.(reference.Tagged); ok {
- query.Set("tag", tagged.Tag())
- }
- }
-
- resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return nil, privilegeErr
- }
- resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader)
- }
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go
deleted file mode 100644
index 84a41af..0000000
--- a/vendor/github.com/docker/docker/client/image_remove.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// ImageRemove removes an image from the docker host.
-func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) {
- query := url.Values{}
-
- if options.Force {
- query.Set("force", "1")
- }
- if !options.PruneChildren {
- query.Set("noprune", "1")
- }
-
- var dels []types.ImageDeleteResponseItem
- resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return dels, wrapResponseError(err, resp, "image", imageID)
- }
-
- err = json.NewDecoder(resp.body).Decode(&dels)
- return dels, err
-}
diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go
deleted file mode 100644
index d1314e4..0000000
--- a/vendor/github.com/docker/docker/client/image_save.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-)
-
-// ImageSave retrieves one or more images from the docker host as an io.ReadCloser.
-// It's up to the caller to store the images and close the stream.
-func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) {
- query := url.Values{
- "names": imageIDs,
- }
-
- resp, err := cli.get(ctx, "/images/get", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go
deleted file mode 100644
index 82955a7..0000000
--- a/vendor/github.com/docker/docker/client/image_search.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/errdefs"
-)
-
-// ImageSearch makes the docker host to search by a term in a remote registry.
-// The list of results is not sorted in any fashion.
-func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) {
- var results []registry.SearchResult
- query := url.Values{}
- query.Set("term", term)
- query.Set("limit", fmt.Sprintf("%d", options.Limit))
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return results, err
- }
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth)
- defer ensureReaderClosed(resp)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- return results, privilegeErr
- }
- resp, err = cli.tryImageSearch(ctx, query, newAuthHeader)
- }
- if err != nil {
- return results, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&results)
- return results, err
-}
-
-func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.get(ctx, "/images/search", query, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go
deleted file mode 100644
index 5652bfc..0000000
--- a/vendor/github.com/docker/docker/client/image_tag.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/pkg/errors"
-)
-
-// ImageTag tags an image in the docker host
-func (cli *Client) ImageTag(ctx context.Context, source, target string) error {
- if _, err := reference.ParseAnyReference(source); err != nil {
- return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source)
- }
-
- ref, err := reference.ParseNormalizedNamed(target)
- if err != nil {
- return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target)
- }
-
- if _, isCanonical := ref.(reference.Canonical); isCanonical {
- return errors.New("refusing to create a tag with a digest reference")
- }
-
- ref = reference.TagNameOnly(ref)
-
- query := url.Values{}
- query.Set("repo", reference.FamiliarName(ref))
- if tagged, ok := ref.(reference.Tagged); ok {
- query.Set("tag", tagged.Tag())
- }
-
- resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go
deleted file mode 100644
index c856704..0000000
--- a/vendor/github.com/docker/docker/client/info.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// Info returns information about the docker server.
-func (cli *Client) Info(ctx context.Context) (types.Info, error) {
- var info types.Info
- serverResp, err := cli.get(ctx, "/info", url.Values{}, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return info, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil {
- return info, fmt.Errorf("Error reading remote info: %v", err)
- }
-
- return info, nil
-}
diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go
deleted file mode 100644
index aabad4a..0000000
--- a/vendor/github.com/docker/docker/client/interface.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net"
- "net/http"
- "time"
-
- "github.com/docker/docker/api/types"
- containertypes "github.com/docker/docker/api/types/container"
- "github.com/docker/docker/api/types/events"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/image"
- networktypes "github.com/docker/docker/api/types/network"
- "github.com/docker/docker/api/types/registry"
- "github.com/docker/docker/api/types/swarm"
- volumetypes "github.com/docker/docker/api/types/volume"
- specs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-// CommonAPIClient is the common methods between stable and experimental versions of APIClient.
-type CommonAPIClient interface {
- ConfigAPIClient
- ContainerAPIClient
- DistributionAPIClient
- ImageAPIClient
- NodeAPIClient
- NetworkAPIClient
- PluginAPIClient
- ServiceAPIClient
- SwarmAPIClient
- SecretAPIClient
- SystemAPIClient
- VolumeAPIClient
- ClientVersion() string
- DaemonHost() string
- HTTPClient() *http.Client
- ServerVersion(ctx context.Context) (types.Version, error)
- NegotiateAPIVersion(ctx context.Context)
- NegotiateAPIVersionPing(types.Ping)
- DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error)
- Dialer() func(context.Context) (net.Conn, error)
- Close() error
-}
-
-// ContainerAPIClient defines API client methods for the containers
-type ContainerAPIClient interface {
- ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error)
- ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
- ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error)
- ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error)
- ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error)
- ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
- ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
- ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
- ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error
- ContainerExport(ctx context.Context, container string) (io.ReadCloser, error)
- ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error)
- ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error)
- ContainerKill(ctx context.Context, container, signal string) error
- ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error)
- ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- ContainerPause(ctx context.Context, container string) error
- ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error
- ContainerRename(ctx context.Context, container, newContainerName string) error
- ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error
- ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error
- ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error)
- ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error)
- ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error)
- ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error
- ContainerStop(ctx context.Context, container string, timeout *time.Duration) error
- ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error)
- ContainerUnpause(ctx context.Context, container string) error
- ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error)
- ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error)
- CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error)
- CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error
- ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error)
-}
-
-// DistributionAPIClient defines API client methods for the registry
-type DistributionAPIClient interface {
- DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error)
-}
-
-// ImageAPIClient defines API client methods for the images
-type ImageAPIClient interface {
- ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
- BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
- BuildCancel(ctx context.Context, id string) error
- ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
- ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
- ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
- ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
- ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error)
- ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
- ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
- ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
- ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error)
- ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
- ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
- ImageTag(ctx context.Context, image, ref string) error
- ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error)
-}
-
-// NetworkAPIClient defines API client methods for the networks
-type NetworkAPIClient interface {
- NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error
- NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error)
- NetworkDisconnect(ctx context.Context, network, container string, force bool) error
- NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error)
- NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error)
- NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error)
- NetworkRemove(ctx context.Context, network string) error
- NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error)
-}
-
-// NodeAPIClient defines API client methods for the nodes
-type NodeAPIClient interface {
- NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
- NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
- NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
- NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
-}
-
-// PluginAPIClient defines API client methods for the plugins
-type PluginAPIClient interface {
- PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error)
- PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error
- PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error
- PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error
- PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
- PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error)
- PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error)
- PluginSet(ctx context.Context, name string, args []string) error
- PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error)
- PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error
-}
-
-// ServiceAPIClient defines API client methods for the services
-type ServiceAPIClient interface {
- ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error)
- ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
- ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
- ServiceRemove(ctx context.Context, serviceID string) error
- ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error)
- ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error)
- TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
- TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
-}
-
-// SwarmAPIClient defines API client methods for the swarm
-type SwarmAPIClient interface {
- SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
- SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
- SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
- SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
- SwarmLeave(ctx context.Context, force bool) error
- SwarmInspect(ctx context.Context) (swarm.Swarm, error)
- SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error
-}
-
-// SystemAPIClient defines API client methods for the system
-type SystemAPIClient interface {
- Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error)
- Info(ctx context.Context) (types.Info, error)
- RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error)
- DiskUsage(ctx context.Context) (types.DiskUsage, error)
- Ping(ctx context.Context) (types.Ping, error)
-}
-
-// VolumeAPIClient defines API client methods for the volumes
-type VolumeAPIClient interface {
- VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error)
- VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error)
- VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error)
- VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error)
- VolumeRemove(ctx context.Context, volumeID string, force bool) error
- VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error)
-}
-
-// SecretAPIClient defines API client methods for secrets
-type SecretAPIClient interface {
- SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
- SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
- SecretRemove(ctx context.Context, id string) error
- SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
- SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
-}
-
-// ConfigAPIClient defines API client methods for configs
-type ConfigAPIClient interface {
- ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
- ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
- ConfigRemove(ctx context.Context, id string) error
- ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
- ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
-}
diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go
deleted file mode 100644
index 402ffb5..0000000
--- a/vendor/github.com/docker/docker/client/interface_experimental.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
-)
-
-type apiClientExperimental interface {
- CheckpointAPIClient
-}
-
-// CheckpointAPIClient defines API client methods for the checkpoints
-type CheckpointAPIClient interface {
- CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error
- CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error
- CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error)
-}
diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go
deleted file mode 100644
index 5502cd7..0000000
--- a/vendor/github.com/docker/docker/client/interface_stable.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-// APIClient is an interface that clients that talk with a docker server must implement.
-type APIClient interface {
- CommonAPIClient
- apiClientExperimental
-}
-
-// Ensure that Client always implements APIClient.
-var _ APIClient = &Client{}
diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go
deleted file mode 100644
index f058520..0000000
--- a/vendor/github.com/docker/docker/client/login.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/registry"
-)
-
-// RegistryLogin authenticates the docker server with a given docker registry.
-// It returns unauthorizedError when the authentication fails.
-func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) {
- resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil)
- defer ensureReaderClosed(resp)
-
- if err != nil {
- return registry.AuthenticateOKBody{}, err
- }
-
- var response registry.AuthenticateOKBody
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go
deleted file mode 100644
index 5718946..0000000
--- a/vendor/github.com/docker/docker/client/network_connect.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/network"
-)
-
-// NetworkConnect connects a container to an existent network in the docker host.
-func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error {
- nc := types.NetworkConnect{
- Container: containerID,
- EndpointConfig: config,
- }
- resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go
deleted file mode 100644
index 278d938..0000000
--- a/vendor/github.com/docker/docker/client/network_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// NetworkCreate creates a new network in the docker host.
-func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) {
- networkCreateRequest := types.NetworkCreateRequest{
- NetworkCreate: options,
- Name: name,
- }
- var response types.NetworkCreateResponse
- serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go
deleted file mode 100644
index dd15676..0000000
--- a/vendor/github.com/docker/docker/client/network_disconnect.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types"
-)
-
-// NetworkDisconnect disconnects a container from an existent network in the docker host.
-func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {
- nd := types.NetworkDisconnect{Container: containerID, Force: force}
- resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go
deleted file mode 100644
index 89a05b3..0000000
--- a/vendor/github.com/docker/docker/client/network_inspect.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// NetworkInspect returns the information for a specific network configured in the docker host.
-func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) {
- networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options)
- return networkResource, err
-}
-
-// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
-func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) {
- if networkID == "" {
- return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID}
- }
- var (
- networkResource types.NetworkResource
- resp serverResponse
- err error
- )
- query := url.Values{}
- if options.Verbose {
- query.Set("verbose", "true")
- }
- if options.Scope != "" {
- query.Set("scope", options.Scope)
- }
- resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return networkResource, nil, err
- }
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&networkResource)
- return networkResource, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go
deleted file mode 100644
index ed2acb5..0000000
--- a/vendor/github.com/docker/docker/client/network_list.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// NetworkList returns the list of networks configured in the docker host.
-func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) {
- query := url.Values{}
- if options.Filters.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
- var networkResources []types.NetworkResource
- resp, err := cli.get(ctx, "/networks", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return networkResources, err
- }
- err = json.NewDecoder(resp.body).Decode(&networkResources)
- return networkResources, err
-}
diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go
deleted file mode 100644
index cebb188..0000000
--- a/vendor/github.com/docker/docker/client/network_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// NetworksPrune requests the daemon to delete unused networks
-func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) {
- var report types.NetworksPruneReport
-
- if err := cli.NewVersionError("1.25", "network prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving network prune report: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go
deleted file mode 100644
index e71b16d..0000000
--- a/vendor/github.com/docker/docker/client/network_remove.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// NetworkRemove removes an existent network from the docker host.
-func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
- resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "network", networkID)
-}
diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go
deleted file mode 100644
index d296c9f..0000000
--- a/vendor/github.com/docker/docker/client/node_inspect.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// NodeInspectWithRaw returns the node information.
-func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
- if nodeID == "" {
- return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID}
- }
- serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Node{}, nil, err
- }
-
- var response swarm.Node
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go
deleted file mode 100644
index c212906..0000000
--- a/vendor/github.com/docker/docker/client/node_list.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// NodeList returns the list of nodes.
-func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) {
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
-
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/nodes", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var nodes []swarm.Node
- err = json.NewDecoder(resp.body).Decode(&nodes)
- return nodes, err
-}
diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go
deleted file mode 100644
index 03ab878..0000000
--- a/vendor/github.com/docker/docker/client/node_remove.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// NodeRemove removes a Node.
-func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "node", nodeID)
-}
diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go
deleted file mode 100644
index de32a61..0000000
--- a/vendor/github.com/docker/docker/client/node_update.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// NodeUpdate updates a Node.
-func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error {
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go
deleted file mode 100644
index 6f77f09..0000000
--- a/vendor/github.com/docker/docker/client/options.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package client
-
-import (
- "context"
- "net"
- "net/http"
- "os"
- "path/filepath"
- "time"
-
- "github.com/docker/go-connections/sockets"
- "github.com/docker/go-connections/tlsconfig"
- "github.com/pkg/errors"
-)
-
-// Opt is a configuration option to initialize a client
-type Opt func(*Client) error
-
-// FromEnv configures the client with values from environment variables.
-//
-// Supported environment variables:
-// DOCKER_HOST to set the url to the docker server.
-// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest.
-// DOCKER_CERT_PATH to load the TLS certificates from.
-// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
-func FromEnv(c *Client) error {
- if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
- options := tlsconfig.Options{
- CAFile: filepath.Join(dockerCertPath, "ca.pem"),
- CertFile: filepath.Join(dockerCertPath, "cert.pem"),
- KeyFile: filepath.Join(dockerCertPath, "key.pem"),
- InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
- }
- tlsc, err := tlsconfig.Client(options)
- if err != nil {
- return err
- }
-
- c.client = &http.Client{
- Transport: &http.Transport{TLSClientConfig: tlsc},
- CheckRedirect: CheckRedirect,
- }
- }
-
- if host := os.Getenv("DOCKER_HOST"); host != "" {
- if err := WithHost(host)(c); err != nil {
- return err
- }
- }
-
- if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
- if err := WithVersion(version)(c); err != nil {
- return err
- }
- }
- return nil
-}
-
-// WithDialer applies the dialer.DialContext to the client transport. This can be
-// used to set the Timeout and KeepAlive settings of the client.
-// Deprecated: use WithDialContext
-func WithDialer(dialer *net.Dialer) Opt {
- return WithDialContext(dialer.DialContext)
-}
-
-// WithDialContext applies the dialer to the client transport. This can be
-// used to set the Timeout and KeepAlive settings of the client.
-func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
- return func(c *Client) error {
- if transport, ok := c.client.Transport.(*http.Transport); ok {
- transport.DialContext = dialContext
- return nil
- }
- return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport)
- }
-}
-
-// WithHost overrides the client host with the specified one.
-func WithHost(host string) Opt {
- return func(c *Client) error {
- hostURL, err := ParseHostURL(host)
- if err != nil {
- return err
- }
- c.host = host
- c.proto = hostURL.Scheme
- c.addr = hostURL.Host
- c.basePath = hostURL.Path
- if transport, ok := c.client.Transport.(*http.Transport); ok {
- return sockets.ConfigureTransport(transport, c.proto, c.addr)
- }
- return errors.Errorf("cannot apply host to transport: %T", c.client.Transport)
- }
-}
-
-// WithHTTPClient overrides the client http client with the specified one
-func WithHTTPClient(client *http.Client) Opt {
- return func(c *Client) error {
- if client != nil {
- c.client = client
- }
- return nil
- }
-}
-
-// WithTimeout configures the time limit for requests made by the HTTP client
-func WithTimeout(timeout time.Duration) Opt {
- return func(c *Client) error {
- c.client.Timeout = timeout
- return nil
- }
-}
-
-// WithHTTPHeaders overrides the client default http headers
-func WithHTTPHeaders(headers map[string]string) Opt {
- return func(c *Client) error {
- c.customHTTPHeaders = headers
- return nil
- }
-}
-
-// WithScheme overrides the client scheme with the specified one
-func WithScheme(scheme string) Opt {
- return func(c *Client) error {
- c.scheme = scheme
- return nil
- }
-}
-
-// WithTLSClientConfig applies a tls config to the client transport.
-func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {
- return func(c *Client) error {
- opts := tlsconfig.Options{
- CAFile: cacertPath,
- CertFile: certPath,
- KeyFile: keyPath,
- ExclusiveRootPools: true,
- }
- config, err := tlsconfig.Client(opts)
- if err != nil {
- return errors.Wrap(err, "failed to create tls config")
- }
- if transport, ok := c.client.Transport.(*http.Transport); ok {
- transport.TLSClientConfig = config
- return nil
- }
- return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport)
- }
-}
-
-// WithVersion overrides the client version with the specified one. If an empty
-// version is specified, the value will be ignored to allow version negotiation.
-func WithVersion(version string) Opt {
- return func(c *Client) error {
- if version != "" {
- c.version = version
- c.manualOverride = true
- }
- return nil
- }
-}
-
-// WithAPIVersionNegotiation enables automatic API version negotiation for the client.
-// With this option enabled, the client automatically negotiates the API version
-// to use when making requests. API version negotiation is performed on the first
-// request; subsequent requests will not re-negotiate.
-func WithAPIVersionNegotiation() Opt {
- return func(c *Client) error {
- c.negotiateVersion = true
- return nil
- }
-}
diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go
deleted file mode 100644
index a9af001..0000000
--- a/vendor/github.com/docker/docker/client/ping.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/http"
- "path"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
-)
-
-// Ping pings the server and returns the value of the "Docker-Experimental",
-// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use
-// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported
-// by the daemon.
-func (cli *Client) Ping(ctx context.Context) (types.Ping, error) {
- var ping types.Ping
-
- // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
- // because ping requests are used during API version negotiation, so we want
- // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
- req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
- if err != nil {
- return ping, err
- }
- serverResp, err := cli.doRequest(ctx, req)
- if err == nil {
- defer ensureReaderClosed(serverResp)
- switch serverResp.statusCode {
- case http.StatusOK, http.StatusInternalServerError:
- // Server handled the request, so parse the response
- return parsePingResponse(cli, serverResp)
- }
- } else if IsErrConnectionFailed(err) {
- return ping, err
- }
-
- req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil)
- if err != nil {
- return ping, err
- }
- serverResp, err = cli.doRequest(ctx, req)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return ping, err
- }
- return parsePingResponse(cli, serverResp)
-}
-
-func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) {
- var ping types.Ping
- if resp.header == nil {
- err := cli.checkResponseErr(resp)
- return ping, errdefs.FromStatusCode(err, resp.statusCode)
- }
- ping.APIVersion = resp.header.Get("API-Version")
- ping.OSType = resp.header.Get("OSType")
- if resp.header.Get("Docker-Experimental") == "true" {
- ping.Experimental = true
- }
- if bv := resp.header.Get("Builder-Version"); bv != "" {
- ping.BuilderVersion = types.BuilderVersion(bv)
- }
- err := cli.checkResponseErr(resp)
- return ping, errdefs.FromStatusCode(err, resp.statusCode)
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go
deleted file mode 100644
index b95dbaf..0000000
--- a/vendor/github.com/docker/docker/client/plugin_create.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginCreate creates a plugin
-func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error {
- headers := http.Header(make(map[string][]string))
- headers.Set("Content-Type", "application/x-tar")
-
- query := url.Values{}
- query.Set("name", createOptions.RepoName)
-
- resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go
deleted file mode 100644
index 01f6574..0000000
--- a/vendor/github.com/docker/docker/client/plugin_disable.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginDisable disables a plugin
-func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
- resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go
deleted file mode 100644
index 736da48..0000000
--- a/vendor/github.com/docker/docker/client/plugin_enable.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginEnable enables a plugin
-func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error {
- query := url.Values{}
- query.Set("timeout", strconv.Itoa(options.Timeout))
-
- resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go
deleted file mode 100644
index 81b8973..0000000
--- a/vendor/github.com/docker/docker/client/plugin_inspect.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginInspectWithRaw inspects an existing plugin
-func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
- if name == "" {
- return nil, nil, objectNotFoundError{object: "plugin", id: name}
- }
- resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, nil, wrapResponseError(err, resp, "plugin", name)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return nil, nil, err
- }
- var p types.Plugin
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&p)
- return &p, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go
deleted file mode 100644
index 012afe6..0000000
--- a/vendor/github.com/docker/docker/client/plugin_install.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "io"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/errdefs"
- "github.com/pkg/errors"
-)
-
-// PluginInstall installs a plugin
-func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
- query := url.Values{}
- if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
- return nil, errors.Wrap(err, "invalid remote reference")
- }
- query.Set("remote", options.RemoteRef)
-
- privileges, err := cli.checkPluginPermissions(ctx, query, options)
- if err != nil {
- return nil, err
- }
-
- // set name for plugin pull, if empty should default to remote reference
- query.Set("name", name)
-
- resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
-
- name = resp.header.Get("Docker-Plugin-Name")
-
- pr, pw := io.Pipe()
- go func() { // todo: the client should probably be designed more around the actual api
- _, err := io.Copy(pw, resp.body)
- if err != nil {
- pw.CloseWithError(err)
- return
- }
- defer func() {
- if err != nil {
- delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil)
- ensureReaderClosed(delResp)
- }
- }()
- if len(options.Args) > 0 {
- if err := cli.PluginSet(ctx, name, options.Args); err != nil {
- pw.CloseWithError(err)
- return
- }
- }
-
- if options.Disabled {
- pw.Close()
- return
- }
-
- enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0})
- pw.CloseWithError(enableErr)
- }()
- return pr, nil
-}
-
-func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.get(ctx, "/plugins/privileges", query, headers)
-}
-
-func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/plugins/pull", query, privileges, headers)
-}
-
-func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) {
- resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
- if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil {
- // todo: do inspect before to check existing name before checking privileges
- newAuthHeader, privilegeErr := options.PrivilegeFunc()
- if privilegeErr != nil {
- ensureReaderClosed(resp)
- return nil, privilegeErr
- }
- options.RegistryAuth = newAuthHeader
- resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth)
- }
- if err != nil {
- ensureReaderClosed(resp)
- return nil, err
- }
-
- var privileges types.PluginPrivileges
- if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
- ensureReaderClosed(resp)
- return nil, err
- }
- ensureReaderClosed(resp)
-
- if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 {
- accept, err := options.AcceptPermissionsFunc(privileges)
- if err != nil {
- return nil, err
- }
- if !accept {
- return nil, pluginPermissionDenied{options.RemoteRef}
- }
- }
- return privileges, nil
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go
deleted file mode 100644
index cf1935e..0000000
--- a/vendor/github.com/docker/docker/client/plugin_list.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// PluginList returns the installed plugins
-func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) {
- var plugins types.PluginsListResponse
- query := url.Values{}
-
- if filter.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
- if err != nil {
- return plugins, err
- }
- query.Set("filters", filterJSON)
- }
- resp, err := cli.get(ctx, "/plugins", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return plugins, wrapResponseError(err, resp, "plugin", "")
- }
-
- err = json.NewDecoder(resp.body).Decode(&plugins)
- return plugins, err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go
deleted file mode 100644
index d20bfe8..0000000
--- a/vendor/github.com/docker/docker/client/plugin_push.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
-)
-
-// PluginPush pushes a plugin to a registry
-func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go
deleted file mode 100644
index 51ca104..0000000
--- a/vendor/github.com/docker/docker/client/plugin_remove.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types"
-)
-
-// PluginRemove removes a plugin
-func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error {
- query := url.Values{}
- if options.Force {
- query.Set("force", "1")
- }
-
- resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "plugin", name)
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go
deleted file mode 100644
index dcf5752..0000000
--- a/vendor/github.com/docker/docker/client/plugin_set.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-)
-
-// PluginSet modifies settings for an existing plugin
-func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error {
- resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go
deleted file mode 100644
index 115cea9..0000000
--- a/vendor/github.com/docker/docker/client/plugin_upgrade.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/pkg/errors"
-)
-
-// PluginUpgrade upgrades a plugin
-func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) {
- if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil {
- return nil, err
- }
- query := url.Values{}
- if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil {
- return nil, errors.Wrap(err, "invalid remote reference")
- }
- query.Set("remote", options.RemoteRef)
-
- privileges, err := cli.checkPluginPermissions(ctx, query, options)
- if err != nil {
- return nil, err
- }
-
- resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
-
-func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) {
- headers := map[string][]string{"X-Registry-Auth": {registryAuth}}
- return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers)
-}
diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go
deleted file mode 100644
index 813eac2..0000000
--- a/vendor/github.com/docker/docker/client/request.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "strings"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/versions"
- "github.com/docker/docker/errdefs"
- "github.com/pkg/errors"
-)
-
-// serverResponse is a wrapper for http API responses.
-type serverResponse struct {
- body io.ReadCloser
- header http.Header
- statusCode int
- reqURL *url.URL
-}
-
-// head sends an http request to the docker API using the method HEAD.
-func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers)
-}
-
-// get sends an http request to the docker API using the method GET with a specific Go context.
-func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers)
-}
-
-// post sends an http request to the docker API using the method POST with a specific Go context.
-func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) {
- body, headers, err := encodeBody(obj, headers)
- if err != nil {
- return serverResponse{}, err
- }
- return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)
-}
-
-func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers)
-}
-
-// putRaw sends an http request to the docker API using the method PUT.
-func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers)
-}
-
-// delete sends an http request to the docker API using the method DELETE.
-func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) {
- return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers)
-}
-
-type headers map[string][]string
-
-func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) {
- if obj == nil {
- return nil, headers, nil
- }
-
- body, err := encodeData(obj)
- if err != nil {
- return nil, headers, err
- }
- if headers == nil {
- headers = make(map[string][]string)
- }
- headers["Content-Type"] = []string{"application/json"}
- return body, headers, nil
-}
-
-func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) {
- expectedPayload := (method == http.MethodPost || method == http.MethodPut)
- if expectedPayload && body == nil {
- body = bytes.NewReader([]byte{})
- }
-
- req, err := http.NewRequest(method, path, body)
- if err != nil {
- return nil, err
- }
- req = cli.addHeaders(req, headers)
-
- if cli.proto == "unix" || cli.proto == "npipe" {
- // For local communications, it doesn't matter what the host is. We just
- // need a valid and meaningful host name. (See #189)
- req.Host = "docker"
- }
-
- req.URL.Host = cli.addr
- req.URL.Scheme = cli.scheme
-
- if expectedPayload && req.Header.Get("Content-Type") == "" {
- req.Header.Set("Content-Type", "text/plain")
- }
- return req, nil
-}
-
-func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) {
- req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers)
- if err != nil {
- return serverResponse{}, err
- }
- resp, err := cli.doRequest(ctx, req)
- if err != nil {
- return resp, errdefs.FromStatusCode(err, resp.statusCode)
- }
- err = cli.checkResponseErr(resp)
- return resp, errdefs.FromStatusCode(err, resp.statusCode)
-}
-
-func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) {
- serverResp := serverResponse{statusCode: -1, reqURL: req.URL}
-
- req = req.WithContext(ctx)
- resp, err := cli.client.Do(req)
- if err != nil {
- if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
- return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
- }
-
- if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
- return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings")
- }
-
- // Don't decorate context sentinel errors; users may be comparing to
- // them directly.
- if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return serverResp, err
- }
-
- if nErr, ok := err.(*url.Error); ok {
- if nErr, ok := nErr.Err.(*net.OpError); ok {
- if os.IsPermission(nErr.Err) {
- return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
- }
- }
- }
-
- if err, ok := err.(net.Error); ok {
- if err.Timeout() {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
- if !err.Temporary() {
- if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
- return serverResp, ErrorConnectionFailed(cli.host)
- }
- }
- }
-
- // Although there's not a strongly typed error for this in go-winio,
- // lots of people are using the default configuration for the docker
- // daemon on Windows where the daemon is listening on a named pipe
- // `//./pipe/docker_engine, and the client must be running elevated.
- // Give users a clue rather than the not-overly useful message
- // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
- // open //./pipe/docker_engine: The system cannot find the file specified.`.
- // Note we can't string compare "The system cannot find the file specified" as
- // this is localised - for example in French the error would be
- // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
- if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
- // Checks if client is running with elevated privileges
- if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil {
- err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.")
- } else {
- f.Close()
- err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.")
- }
- }
-
- return serverResp, errors.Wrap(err, "error during connect")
- }
-
- if resp != nil {
- serverResp.statusCode = resp.StatusCode
- serverResp.body = resp.Body
- serverResp.header = resp.Header
- }
- return serverResp, nil
-}
-
-func (cli *Client) checkResponseErr(serverResp serverResponse) error {
- if serverResp.statusCode >= 200 && serverResp.statusCode < 400 {
- return nil
- }
-
- var body []byte
- var err error
- if serverResp.body != nil {
- bodyMax := 1 * 1024 * 1024 // 1 MiB
- bodyR := &io.LimitedReader{
- R: serverResp.body,
- N: int64(bodyMax),
- }
- body, err = ioutil.ReadAll(bodyR)
- if err != nil {
- return err
- }
- if bodyR.N == 0 {
- return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL)
- }
- }
- if len(body) == 0 {
- return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
- }
-
- var ct string
- if serverResp.header != nil {
- ct = serverResp.header.Get("Content-Type")
- }
-
- var errorMessage string
- if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" {
- var errorResponse types.ErrorResponse
- if err := json.Unmarshal(body, &errorResponse); err != nil {
- return errors.Wrap(err, "Error reading JSON")
- }
- errorMessage = strings.TrimSpace(errorResponse.Message)
- } else {
- errorMessage = strings.TrimSpace(string(body))
- }
-
- return errors.Wrap(errors.New(errorMessage), "Error response from daemon")
-}
-
-func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request {
- // Add CLI Config's HTTP Headers BEFORE we set the Docker headers
- // then the user can't change OUR headers
- for k, v := range cli.customHTTPHeaders {
- if versions.LessThan(cli.version, "1.25") && k == "User-Agent" {
- continue
- }
- req.Header.Set(k, v)
- }
-
- if headers != nil {
- for k, v := range headers {
- req.Header[k] = v
- }
- }
- return req
-}
-
-func encodeData(data interface{}) (*bytes.Buffer, error) {
- params := bytes.NewBuffer(nil)
- if data != nil {
- if err := json.NewEncoder(params).Encode(data); err != nil {
- return nil, err
- }
- }
- return params, nil
-}
-
-func ensureReaderClosed(response serverResponse) {
- if response.body != nil {
- // Drain up to 512 bytes and close the body to let the Transport reuse the connection
- io.CopyN(ioutil.Discard, response.body, 512)
- response.body.Close()
- }
-}
diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go
deleted file mode 100644
index fd5b914..0000000
--- a/vendor/github.com/docker/docker/client/secret_create.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretCreate creates a new Secret.
-func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) {
- var response types.SecretCreateResponse
- if err := cli.NewVersionError("1.25", "secret create"); err != nil {
- return response, err
- }
- resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go
deleted file mode 100644
index d093916..0000000
--- a/vendor/github.com/docker/docker/client/secret_inspect.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretInspectWithRaw returns the secret information with raw data
-func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) {
- if err := cli.NewVersionError("1.25", "secret inspect"); err != nil {
- return swarm.Secret{}, nil, err
- }
- if id == "" {
- return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id}
- }
- resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return swarm.Secret{}, nil, err
- }
-
- var secret swarm.Secret
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&secret)
-
- return secret, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go
deleted file mode 100644
index a0289c9..0000000
--- a/vendor/github.com/docker/docker/client/secret_list.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretList returns the list of secrets.
-func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) {
- if err := cli.NewVersionError("1.25", "secret list"); err != nil {
- return nil, err
- }
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/secrets", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var secrets []swarm.Secret
- err = json.NewDecoder(resp.body).Decode(&secrets)
- return secrets, err
-}
diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go
deleted file mode 100644
index c16f555..0000000
--- a/vendor/github.com/docker/docker/client/secret_remove.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// SecretRemove removes a Secret.
-func (cli *Client) SecretRemove(ctx context.Context, id string) error {
- if err := cli.NewVersionError("1.25", "secret remove"); err != nil {
- return err
- }
- resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "secret", id)
-}
diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go
deleted file mode 100644
index 164256b..0000000
--- a/vendor/github.com/docker/docker/client/secret_update.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SecretUpdate attempts to update a Secret
-func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error {
- if err := cli.NewVersionError("1.25", "secret update"); err != nil {
- return err
- }
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go
deleted file mode 100644
index e0428bf..0000000
--- a/vendor/github.com/docker/docker/client/service_create.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "strings"
-
- "github.com/docker/distribution/reference"
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
- digest "github.com/opencontainers/go-digest"
- "github.com/pkg/errors"
-)
-
-// ServiceCreate creates a new Service.
-func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) {
- var response types.ServiceCreateResponse
- headers := map[string][]string{
- "version": {cli.version},
- }
-
- if options.EncodedRegistryAuth != "" {
- headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
- }
-
- // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container
- if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) {
- service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{}
- }
-
- if err := validateServiceSpec(service); err != nil {
- return response, err
- }
-
- // ensure that the image is tagged
- var resolveWarning string
- switch {
- case service.TaskTemplate.ContainerSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
- service.TaskTemplate.ContainerSpec.Image = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- case service.TaskTemplate.PluginSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
- service.TaskTemplate.PluginSpec.Remote = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- }
-
- resp, err := cli.post(ctx, "/services/create", nil, service, headers)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- if resolveWarning != "" {
- response.Warnings = append(response.Warnings, resolveWarning)
- }
-
- return response, err
-}
-
-func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
- var warning string
- if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil {
- warning = digestWarning(taskSpec.ContainerSpec.Image)
- } else {
- taskSpec.ContainerSpec.Image = img
- if len(imgPlatforms) > 0 {
- if taskSpec.Placement == nil {
- taskSpec.Placement = &swarm.Placement{}
- }
- taskSpec.Placement.Platforms = imgPlatforms
- }
- }
- return warning
-}
-
-func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string {
- var warning string
- if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil {
- warning = digestWarning(taskSpec.PluginSpec.Remote)
- } else {
- taskSpec.PluginSpec.Remote = img
- if len(imgPlatforms) > 0 {
- if taskSpec.Placement == nil {
- taskSpec.Placement = &swarm.Placement{}
- }
- taskSpec.Placement.Platforms = imgPlatforms
- }
- }
- return warning
-}
-
-func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
- distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
- var platforms []swarm.Platform
- if err != nil {
- return "", nil, err
- }
-
- imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
-
- if len(distributionInspect.Platforms) > 0 {
- platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
- for _, p := range distributionInspect.Platforms {
- // clear architecture field for arm. This is a temporary patch to address
- // https://github.com/docker/swarmkit/issues/2294. The issue is that while
- // image manifests report "arm" as the architecture, the node reports
- // something like "armv7l" (includes the variant), which causes arm images
- // to stop working with swarm mode. This patch removes the architecture
- // constraint for arm images to ensure tasks get scheduled.
- arch := p.Architecture
- if strings.ToLower(arch) == "arm" {
- arch = ""
- }
- platforms = append(platforms, swarm.Platform{
- Architecture: arch,
- OS: p.OS,
- })
- }
- }
- return imageWithDigest, platforms, err
-}
-
-// imageWithDigestString takes an image string and a digest, and updates
-// the image string if it didn't originally contain a digest. It returns
-// image unmodified in other situations.
-func imageWithDigestString(image string, dgst digest.Digest) string {
- namedRef, err := reference.ParseNormalizedNamed(image)
- if err == nil {
- if _, isCanonical := namedRef.(reference.Canonical); !isCanonical {
- // ensure that image gets a default tag if none is provided
- img, err := reference.WithDigest(namedRef, dgst)
- if err == nil {
- return reference.FamiliarString(img)
- }
- }
- }
- return image
-}
-
-// imageWithTagString takes an image string, and returns a tagged image
-// string, adding a 'latest' tag if one was not provided. It returns an
-// empty string if a canonical reference was provided
-func imageWithTagString(image string) string {
- namedRef, err := reference.ParseNormalizedNamed(image)
- if err == nil {
- return reference.FamiliarString(reference.TagNameOnly(namedRef))
- }
- return ""
-}
-
-// digestWarning constructs a formatted warning string using the
-// image name that could not be pinned by digest. The formatting
-// is hardcoded, but could me made smarter in the future
-func digestWarning(image string) string {
- return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image)
-}
-
-func validateServiceSpec(s swarm.ServiceSpec) error {
- if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil {
- return errors.New("must not specify both a container spec and a plugin spec in the task template")
- }
- if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin {
- return errors.New("mismatched runtime with plugin spec")
- }
- if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) {
- return errors.New("mismatched runtime with container spec")
- }
- return nil
-}
diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go
deleted file mode 100644
index 2801483..0000000
--- a/vendor/github.com/docker/docker/client/service_inspect.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ServiceInspectWithRaw returns the service information and the raw data.
-func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) {
- if serviceID == "" {
- return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID}
- }
- query := url.Values{}
- query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
- serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Service{}, nil, err
- }
-
- var response swarm.Service
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go
deleted file mode 100644
index f97ec75..0000000
--- a/vendor/github.com/docker/docker/client/service_list.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ServiceList returns the list of services.
-func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) {
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- if options.Status {
- query.Set("status", "true")
- }
-
- resp, err := cli.get(ctx, "/services", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var services []swarm.Service
- err = json.NewDecoder(resp.body).Decode(&services)
- return services, err
-}
diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go
deleted file mode 100644
index 906fd40..0000000
--- a/vendor/github.com/docker/docker/client/service_logs.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- timetypes "github.com/docker/docker/api/types/time"
- "github.com/pkg/errors"
-)
-
-// ServiceLogs returns the logs generated by a service in an io.ReadCloser.
-// It's up to the caller to close the stream.
-func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, errors.Wrap(err, `invalid value for "since"`)
- }
- query.Set("since", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go
deleted file mode 100644
index 953a2ad..0000000
--- a/vendor/github.com/docker/docker/client/service_remove.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import "context"
-
-// ServiceRemove kills and removes a service.
-func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
- resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "service", serviceID)
-}
diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go
deleted file mode 100644
index c63895f..0000000
--- a/vendor/github.com/docker/docker/client/service_update.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes.
-// It should be the value as set *before* the update. You can find this value in the Meta field
-// of swarm.Service, which can be found using ServiceInspectWithRaw.
-func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) {
- var (
- query = url.Values{}
- response = types.ServiceUpdateResponse{}
- )
-
- headers := map[string][]string{
- "version": {cli.version},
- }
-
- if options.EncodedRegistryAuth != "" {
- headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth}
- }
-
- if options.RegistryAuthFrom != "" {
- query.Set("registryAuthFrom", options.RegistryAuthFrom)
- }
-
- if options.Rollback != "" {
- query.Set("rollback", options.Rollback)
- }
-
- query.Set("version", strconv.FormatUint(version.Index, 10))
-
- if err := validateServiceSpec(service); err != nil {
- return response, err
- }
-
- // ensure that the image is tagged
- var resolveWarning string
- switch {
- case service.TaskTemplate.ContainerSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" {
- service.TaskTemplate.ContainerSpec.Image = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- case service.TaskTemplate.PluginSpec != nil:
- if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" {
- service.TaskTemplate.PluginSpec.Remote = taggedImg
- }
- if options.QueryRegistry {
- resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth)
- }
- }
-
- resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers)
- defer ensureReaderClosed(resp)
- if err != nil {
- return response, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&response)
- if resolveWarning != "" {
- response.Warnings = append(response.Warnings, resolveWarning)
- }
-
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
deleted file mode 100644
index 19f59dd..0000000
--- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// SwarmGetUnlockKey retrieves the swarm's unlock key.
-func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) {
- serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return types.SwarmUnlockKeyResponse{}, err
- }
-
- var response types.SwarmUnlockKeyResponse
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go
deleted file mode 100644
index da3c163..0000000
--- a/vendor/github.com/docker/docker/client/swarm_init.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmInit initializes the swarm.
-func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) {
- serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return "", err
- }
-
- var response string
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go
deleted file mode 100644
index b52b67a..0000000
--- a/vendor/github.com/docker/docker/client/swarm_inspect.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmInspect inspects the swarm.
-func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) {
- serverResp, err := cli.get(ctx, "/swarm", nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Swarm{}, err
- }
-
- var response swarm.Swarm
- err = json.NewDecoder(serverResp.body).Decode(&response)
- return response, err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go
deleted file mode 100644
index a1cf045..0000000
--- a/vendor/github.com/docker/docker/client/swarm_join.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmJoin joins the swarm.
-func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error {
- resp, err := cli.post(ctx, "/swarm/join", nil, req, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go
deleted file mode 100644
index 90ca84b..0000000
--- a/vendor/github.com/docker/docker/client/swarm_leave.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-)
-
-// SwarmLeave leaves the swarm.
-func (cli *Client) SwarmLeave(ctx context.Context, force bool) error {
- query := url.Values{}
- if force {
- query.Set("force", "1")
- }
- resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go
deleted file mode 100644
index d2412f7..0000000
--- a/vendor/github.com/docker/docker/client/swarm_unlock.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmUnlock unlocks locked swarm.
-func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error {
- serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil)
- ensureReaderClosed(serverResp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go
deleted file mode 100644
index 56a5bea..0000000
--- a/vendor/github.com/docker/docker/client/swarm_update.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "fmt"
- "net/url"
- "strconv"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// SwarmUpdate updates the swarm.
-func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error {
- query := url.Values{}
- query.Set("version", strconv.FormatUint(version.Index, 10))
- query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken))
- query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken))
- query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey))
- resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil)
- ensureReaderClosed(resp)
- return err
-}
diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go
deleted file mode 100644
index 44d40ba..0000000
--- a/vendor/github.com/docker/docker/client/task_inspect.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types/swarm"
-)
-
-// TaskInspectWithRaw returns the task information and its raw representation..
-func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
- if taskID == "" {
- return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID}
- }
- serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
- }
-
- body, err := ioutil.ReadAll(serverResp.body)
- if err != nil {
- return swarm.Task{}, nil, err
- }
-
- var response swarm.Task
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&response)
- return response, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go
deleted file mode 100644
index 4869b44..0000000
--- a/vendor/github.com/docker/docker/client/task_list.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
- "github.com/docker/docker/api/types/swarm"
-)
-
-// TaskList returns the list of tasks.
-func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) {
- query := url.Values{}
-
- if options.Filters.Len() > 0 {
- filterJSON, err := filters.ToJSON(options.Filters)
- if err != nil {
- return nil, err
- }
-
- query.Set("filters", filterJSON)
- }
-
- resp, err := cli.get(ctx, "/tasks", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return nil, err
- }
-
- var tasks []swarm.Task
- err = json.NewDecoder(resp.body).Decode(&tasks)
- return tasks, err
-}
diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go
deleted file mode 100644
index 6222fab..0000000
--- a/vendor/github.com/docker/docker/client/task_logs.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "io"
- "net/url"
- "time"
-
- "github.com/docker/docker/api/types"
- timetypes "github.com/docker/docker/api/types/time"
-)
-
-// TaskLogs returns the logs generated by a task in an io.ReadCloser.
-// It's up to the caller to close the stream.
-func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) {
- query := url.Values{}
- if options.ShowStdout {
- query.Set("stdout", "1")
- }
-
- if options.ShowStderr {
- query.Set("stderr", "1")
- }
-
- if options.Since != "" {
- ts, err := timetypes.GetTimestamp(options.Since, time.Now())
- if err != nil {
- return nil, err
- }
- query.Set("since", ts)
- }
-
- if options.Timestamps {
- query.Set("timestamps", "1")
- }
-
- if options.Details {
- query.Set("details", "1")
- }
-
- if options.Follow {
- query.Set("follow", "1")
- }
- query.Set("tail", options.Tail)
-
- resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil)
- if err != nil {
- return nil, err
- }
- return resp.body, nil
-}
diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go
deleted file mode 100644
index 5541344..0000000
--- a/vendor/github.com/docker/docker/client/transport.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "crypto/tls"
- "net/http"
-)
-
-// resolveTLSConfig attempts to resolve the TLS configuration from the
-// RoundTripper.
-func resolveTLSConfig(transport http.RoundTripper) *tls.Config {
- switch tr := transport.(type) {
- case *http.Transport:
- return tr.TLSClientConfig
- default:
- return nil
- }
-}
diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go
deleted file mode 100644
index 7f3ff44..0000000
--- a/vendor/github.com/docker/docker/client/utils.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "net/url"
- "regexp"
-
- "github.com/docker/docker/api/types/filters"
-)
-
-var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`)
-
-// getDockerOS returns the operating system based on the server header from the daemon.
-func getDockerOS(serverHeader string) string {
- var osType string
- matches := headerRegexp.FindStringSubmatch(serverHeader)
- if len(matches) > 0 {
- osType = matches[1]
- }
- return osType
-}
-
-// getFiltersQuery returns a url query with "filters" query term, based on the
-// filters provided.
-func getFiltersQuery(f filters.Args) (url.Values, error) {
- query := url.Values{}
- if f.Len() > 0 {
- filterJSON, err := filters.ToJSON(f)
- if err != nil {
- return query, err
- }
- query.Set("filters", filterJSON)
- }
- return query, nil
-}
diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go
deleted file mode 100644
index 8f17ff4..0000000
--- a/vendor/github.com/docker/docker/client/version.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
-)
-
-// ServerVersion returns information of the docker client and server host.
-func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) {
- resp, err := cli.get(ctx, "/version", nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return types.Version{}, err
- }
-
- var server types.Version
- err = json.NewDecoder(resp.body).Decode(&server)
- return server, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go
deleted file mode 100644
index 92761b3..0000000
--- a/vendor/github.com/docker/docker/client/volume_create.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
-
- "github.com/docker/docker/api/types"
- volumetypes "github.com/docker/docker/api/types/volume"
-)
-
-// VolumeCreate creates a volume in the docker host.
-func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) {
- var volume types.Volume
- resp, err := cli.post(ctx, "/volumes/create", nil, options, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return volume, err
- }
- err = json.NewDecoder(resp.body).Decode(&volume)
- return volume, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go
deleted file mode 100644
index e20b2c6..0000000
--- a/vendor/github.com/docker/docker/client/volume_inspect.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "io/ioutil"
-
- "github.com/docker/docker/api/types"
-)
-
-// VolumeInspect returns the information about a specific volume in the docker host.
-func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) {
- volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID)
- return volume, err
-}
-
-// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
-func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
- if volumeID == "" {
- return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
- }
-
- var volume types.Volume
- resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
- }
-
- body, err := ioutil.ReadAll(resp.body)
- if err != nil {
- return volume, nil, err
- }
- rdr := bytes.NewReader(body)
- err = json.NewDecoder(rdr).Decode(&volume)
- return volume, body, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go
deleted file mode 100644
index 942498d..0000000
--- a/vendor/github.com/docker/docker/client/volume_list.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "net/url"
-
- "github.com/docker/docker/api/types/filters"
- volumetypes "github.com/docker/docker/api/types/volume"
-)
-
-// VolumeList returns the volumes configured in the docker host.
-func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) {
- var volumes volumetypes.VolumeListOKBody
- query := url.Values{}
-
- if filter.Len() > 0 {
- //nolint:staticcheck // ignore SA1019 for old code
- filterJSON, err := filters.ToParamWithVersion(cli.version, filter)
- if err != nil {
- return volumes, err
- }
- query.Set("filters", filterJSON)
- }
- resp, err := cli.get(ctx, "/volumes", query, nil)
- defer ensureReaderClosed(resp)
- if err != nil {
- return volumes, err
- }
-
- err = json.NewDecoder(resp.body).Decode(&volumes)
- return volumes, err
-}
diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go
deleted file mode 100644
index 6e32470..0000000
--- a/vendor/github.com/docker/docker/client/volume_prune.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "encoding/json"
- "fmt"
-
- "github.com/docker/docker/api/types"
- "github.com/docker/docker/api/types/filters"
-)
-
-// VolumesPrune requests the daemon to delete unused data
-func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) {
- var report types.VolumesPruneReport
-
- if err := cli.NewVersionError("1.25", "volume prune"); err != nil {
- return report, err
- }
-
- query, err := getFiltersQuery(pruneFilters)
- if err != nil {
- return report, err
- }
-
- serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil)
- defer ensureReaderClosed(serverResp)
- if err != nil {
- return report, err
- }
-
- if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
- return report, fmt.Errorf("Error retrieving volume prune report: %v", err)
- }
-
- return report, nil
-}
diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go
deleted file mode 100644
index 79decda..0000000
--- a/vendor/github.com/docker/docker/client/volume_remove.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package client // import "github.com/docker/docker/client"
-
-import (
- "context"
- "net/url"
-
- "github.com/docker/docker/api/types/versions"
-)
-
-// VolumeRemove removes a volume from the docker host.
-func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error {
- query := url.Values{}
- if versions.GreaterThanOrEqualTo(cli.version, "1.25") {
- if force {
- query.Set("force", "1")
- }
- }
- resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
- defer ensureReaderClosed(resp)
- return wrapResponseError(err, resp, "volume", volumeID)
-}
diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go
deleted file mode 100644
index 61e7456..0000000
--- a/vendor/github.com/docker/docker/errdefs/defs.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package errdefs // import "github.com/docker/docker/errdefs"
-
-// ErrNotFound signals that the requested object doesn't exist
-type ErrNotFound interface {
- NotFound()
-}
-
-// ErrInvalidParameter signals that the user input is invalid
-type ErrInvalidParameter interface {
- InvalidParameter()
-}
-
-// ErrConflict signals that some internal state conflicts with the requested action and can't be performed.
-// A change in state should be able to clear this error.
-type ErrConflict interface {
- Conflict()
-}
-
-// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action
-type ErrUnauthorized interface {
- Unauthorized()
-}
-
-// ErrUnavailable signals that the requested action/subsystem is not available.
-type ErrUnavailable interface {
- Unavailable()
-}
-
-// ErrForbidden signals that the requested action cannot be performed under any circumstances.
-// When a ErrForbidden is returned, the caller should never retry the action.
-type ErrForbidden interface {
- Forbidden()
-}
-
-// ErrSystem signals that some internal error occurred.
-// An example of this would be a failed mount request.
-type ErrSystem interface {
- System()
-}
-
-// ErrNotModified signals that an action can't be performed because it's already in the desired state
-type ErrNotModified interface {
- NotModified()
-}
-
-// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured.
-type ErrNotImplemented interface {
- NotImplemented()
-}
-
-// ErrUnknown signals that the kind of error that occurred is not known.
-type ErrUnknown interface {
- Unknown()
-}
-
-// ErrCancelled signals that the action was cancelled.
-type ErrCancelled interface {
- Cancelled()
-}
-
-// ErrDeadline signals that the deadline was reached before the action completed.
-type ErrDeadline interface {
- DeadlineExceeded()
-}
-
-// ErrDataLoss indicates that data was lost or there is data corruption.
-type ErrDataLoss interface {
- DataLoss()
-}
diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go
deleted file mode 100644
index c211f17..0000000
--- a/vendor/github.com/docker/docker/errdefs/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors.
-// Errors that cross the package boundary should implement one (and only one) of these interfaces.
-//
-// Packages should not reference these interfaces directly, only implement them.
-// To check if a particular error implements one of these interfaces, there are helper
-// functions provided (e.g. `Is<SomeError>`) which can be used rather than asserting the interfaces directly.
-// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`).
-package errdefs // import "github.com/docker/docker/errdefs"
diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go
deleted file mode 100644
index fe06fb6..0000000
--- a/vendor/github.com/docker/docker/errdefs/helpers.go
+++ /dev/null
@@ -1,279 +0,0 @@
-package errdefs // import "github.com/docker/docker/errdefs"
-
-import "context"
-
-type errNotFound struct{ error }
-
-func (errNotFound) NotFound() {}
-
-func (e errNotFound) Cause() error {
- return e.error
-}
-
-func (e errNotFound) Unwrap() error {
- return e.error
-}
-
-// NotFound is a helper to create an error of the class with the same name from any error type
-func NotFound(err error) error {
- if err == nil || IsNotFound(err) {
- return err
- }
- return errNotFound{err}
-}
-
-type errInvalidParameter struct{ error }
-
-func (errInvalidParameter) InvalidParameter() {}
-
-func (e errInvalidParameter) Cause() error {
- return e.error
-}
-
-func (e errInvalidParameter) Unwrap() error {
- return e.error
-}
-
-// InvalidParameter is a helper to create an error of the class with the same name from any error type
-func InvalidParameter(err error) error {
- if err == nil || IsInvalidParameter(err) {
- return err
- }
- return errInvalidParameter{err}
-}
-
-type errConflict struct{ error }
-
-func (errConflict) Conflict() {}
-
-func (e errConflict) Cause() error {
- return e.error
-}
-
-func (e errConflict) Unwrap() error {
- return e.error
-}
-
-// Conflict is a helper to create an error of the class with the same name from any error type
-func Conflict(err error) error {
- if err == nil || IsConflict(err) {
- return err
- }
- return errConflict{err}
-}
-
-type errUnauthorized struct{ error }
-
-func (errUnauthorized) Unauthorized() {}
-
-func (e errUnauthorized) Cause() error {
- return e.error
-}
-
-func (e errUnauthorized) Unwrap() error {
- return e.error
-}
-
-// Unauthorized is a helper to create an error of the class with the same name from any error type
-func Unauthorized(err error) error {
- if err == nil || IsUnauthorized(err) {
- return err
- }
- return errUnauthorized{err}
-}
-
-type errUnavailable struct{ error }
-
-func (errUnavailable) Unavailable() {}
-
-func (e errUnavailable) Cause() error {
- return e.error
-}
-
-func (e errUnavailable) Unwrap() error {
- return e.error
-}
-
-// Unavailable is a helper to create an error of the class with the same name from any error type
-func Unavailable(err error) error {
- if err == nil || IsUnavailable(err) {
- return err
- }
- return errUnavailable{err}
-}
-
-type errForbidden struct{ error }
-
-func (errForbidden) Forbidden() {}
-
-func (e errForbidden) Cause() error {
- return e.error
-}
-
-func (e errForbidden) Unwrap() error {
- return e.error
-}
-
-// Forbidden is a helper to create an error of the class with the same name from any error type
-func Forbidden(err error) error {
- if err == nil || IsForbidden(err) {
- return err
- }
- return errForbidden{err}
-}
-
-type errSystem struct{ error }
-
-func (errSystem) System() {}
-
-func (e errSystem) Cause() error {
- return e.error
-}
-
-func (e errSystem) Unwrap() error {
- return e.error
-}
-
-// System is a helper to create an error of the class with the same name from any error type
-func System(err error) error {
- if err == nil || IsSystem(err) {
- return err
- }
- return errSystem{err}
-}
-
-type errNotModified struct{ error }
-
-func (errNotModified) NotModified() {}
-
-func (e errNotModified) Cause() error {
- return e.error
-}
-
-func (e errNotModified) Unwrap() error {
- return e.error
-}
-
-// NotModified is a helper to create an error of the class with the same name from any error type
-func NotModified(err error) error {
- if err == nil || IsNotModified(err) {
- return err
- }
- return errNotModified{err}
-}
-
-type errNotImplemented struct{ error }
-
-func (errNotImplemented) NotImplemented() {}
-
-func (e errNotImplemented) Cause() error {
- return e.error
-}
-
-func (e errNotImplemented) Unwrap() error {
- return e.error
-}
-
-// NotImplemented is a helper to create an error of the class with the same name from any error type
-func NotImplemented(err error) error {
- if err == nil || IsNotImplemented(err) {
- return err
- }
- return errNotImplemented{err}
-}
-
-type errUnknown struct{ error }
-
-func (errUnknown) Unknown() {}
-
-func (e errUnknown) Cause() error {
- return e.error
-}
-
-func (e errUnknown) Unwrap() error {
- return e.error
-}
-
-// Unknown is a helper to create an error of the class with the same name from any error type
-func Unknown(err error) error {
- if err == nil || IsUnknown(err) {
- return err
- }
- return errUnknown{err}
-}
-
-type errCancelled struct{ error }
-
-func (errCancelled) Cancelled() {}
-
-func (e errCancelled) Cause() error {
- return e.error
-}
-
-func (e errCancelled) Unwrap() error {
- return e.error
-}
-
-// Cancelled is a helper to create an error of the class with the same name from any error type
-func Cancelled(err error) error {
- if err == nil || IsCancelled(err) {
- return err
- }
- return errCancelled{err}
-}
-
-type errDeadline struct{ error }
-
-func (errDeadline) DeadlineExceeded() {}
-
-func (e errDeadline) Cause() error {
- return e.error
-}
-
-func (e errDeadline) Unwrap() error {
- return e.error
-}
-
-// Deadline is a helper to create an error of the class with the same name from any error type
-func Deadline(err error) error {
- if err == nil || IsDeadline(err) {
- return err
- }
- return errDeadline{err}
-}
-
-type errDataLoss struct{ error }
-
-func (errDataLoss) DataLoss() {}
-
-func (e errDataLoss) Cause() error {
- return e.error
-}
-
-func (e errDataLoss) Unwrap() error {
- return e.error
-}
-
-// DataLoss is a helper to create an error of the class with the same name from any error type
-func DataLoss(err error) error {
- if err == nil || IsDataLoss(err) {
- return err
- }
- return errDataLoss{err}
-}
-
-// FromContext returns the error class from the passed in context
-func FromContext(ctx context.Context) error {
- e := ctx.Err()
- if e == nil {
- return nil
- }
-
- if e == context.Canceled {
- return Cancelled(e)
- }
- if e == context.DeadlineExceeded {
- return Deadline(e)
- }
- return Unknown(e)
-}
diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go
deleted file mode 100644
index 07552f1..0000000
--- a/vendor/github.com/docker/docker/errdefs/http_helpers.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package errdefs // import "github.com/docker/docker/errdefs"
-
-import (
- "fmt"
- "net/http"
-
- containerderrors "github.com/containerd/containerd/errdefs"
- "github.com/docker/distribution/registry/api/errcode"
- "github.com/sirupsen/logrus"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// GetHTTPErrorStatusCode retrieves status code from error message.
-func GetHTTPErrorStatusCode(err error) int {
- if err == nil {
- logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling")
- return http.StatusInternalServerError
- }
-
- var statusCode int
-
- // Stop right there
- // Are you sure you should be adding a new error class here? Do one of the existing ones work?
-
- // Note that the below functions are already checking the error causal chain for matches.
- switch {
- case IsNotFound(err):
- statusCode = http.StatusNotFound
- case IsInvalidParameter(err):
- statusCode = http.StatusBadRequest
- case IsConflict(err):
- statusCode = http.StatusConflict
- case IsUnauthorized(err):
- statusCode = http.StatusUnauthorized
- case IsUnavailable(err):
- statusCode = http.StatusServiceUnavailable
- case IsForbidden(err):
- statusCode = http.StatusForbidden
- case IsNotModified(err):
- statusCode = http.StatusNotModified
- case IsNotImplemented(err):
- statusCode = http.StatusNotImplemented
- case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err):
- statusCode = http.StatusInternalServerError
- default:
- statusCode = statusCodeFromGRPCError(err)
- if statusCode != http.StatusInternalServerError {
- return statusCode
- }
- statusCode = statusCodeFromContainerdError(err)
- if statusCode != http.StatusInternalServerError {
- return statusCode
- }
- statusCode = statusCodeFromDistributionError(err)
- if statusCode != http.StatusInternalServerError {
- return statusCode
- }
- if e, ok := err.(causer); ok {
- return GetHTTPErrorStatusCode(e.Cause())
- }
-
- logrus.WithFields(logrus.Fields{
- "module": "api",
- "error_type": fmt.Sprintf("%T", err),
- }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err)
- }
-
- if statusCode == 0 {
- statusCode = http.StatusInternalServerError
- }
-
- return statusCode
-}
-
-// FromStatusCode creates an errdef error, based on the provided HTTP status-code
-func FromStatusCode(err error, statusCode int) error {
- if err == nil {
- return err
- }
- switch statusCode {
- case http.StatusNotFound:
- err = NotFound(err)
- case http.StatusBadRequest:
- err = InvalidParameter(err)
- case http.StatusConflict:
- err = Conflict(err)
- case http.StatusUnauthorized:
- err = Unauthorized(err)
- case http.StatusServiceUnavailable:
- err = Unavailable(err)
- case http.StatusForbidden:
- err = Forbidden(err)
- case http.StatusNotModified:
- err = NotModified(err)
- case http.StatusNotImplemented:
- err = NotImplemented(err)
- case http.StatusInternalServerError:
- if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) {
- err = System(err)
- }
- default:
- logrus.WithFields(logrus.Fields{
- "module": "api",
- "status_code": fmt.Sprintf("%d", statusCode),
- }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode)
-
- switch {
- case statusCode >= 200 && statusCode < 400:
- // it's a client error
- case statusCode >= 400 && statusCode < 500:
- err = InvalidParameter(err)
- case statusCode >= 500 && statusCode < 600:
- err = System(err)
- default:
- err = Unknown(err)
- }
- }
- return err
-}
-
-// statusCodeFromGRPCError returns status code according to gRPC error
-func statusCodeFromGRPCError(err error) int {
- switch status.Code(err) {
- case codes.InvalidArgument: // code 3
- return http.StatusBadRequest
- case codes.NotFound: // code 5
- return http.StatusNotFound
- case codes.AlreadyExists: // code 6
- return http.StatusConflict
- case codes.PermissionDenied: // code 7
- return http.StatusForbidden
- case codes.FailedPrecondition: // code 9
- return http.StatusBadRequest
- case codes.Unauthenticated: // code 16
- return http.StatusUnauthorized
- case codes.OutOfRange: // code 11
- return http.StatusBadRequest
- case codes.Unimplemented: // code 12
- return http.StatusNotImplemented
- case codes.Unavailable: // code 14
- return http.StatusServiceUnavailable
- default:
- // codes.Canceled(1)
- // codes.Unknown(2)
- // codes.DeadlineExceeded(4)
- // codes.ResourceExhausted(8)
- // codes.Aborted(10)
- // codes.Internal(13)
- // codes.DataLoss(15)
- return http.StatusInternalServerError
- }
-}
-
-// statusCodeFromDistributionError returns status code according to registry errcode
-// code is loosely based on errcode.ServeJSON() in docker/distribution
-func statusCodeFromDistributionError(err error) int {
- switch errs := err.(type) {
- case errcode.Errors:
- if len(errs) < 1 {
- return http.StatusInternalServerError
- }
- if _, ok := errs[0].(errcode.ErrorCoder); ok {
- return statusCodeFromDistributionError(errs[0])
- }
- case errcode.ErrorCoder:
- return errs.ErrorCode().Descriptor().HTTPStatusCode
- }
- return http.StatusInternalServerError
-}
-
-// statusCodeFromContainerdError returns status code for containerd errors when
-// consumed directly (not through gRPC)
-func statusCodeFromContainerdError(err error) int {
- switch {
- case containerderrors.IsInvalidArgument(err):
- return http.StatusBadRequest
- case containerderrors.IsNotFound(err):
- return http.StatusNotFound
- case containerderrors.IsAlreadyExists(err):
- return http.StatusConflict
- case containerderrors.IsFailedPrecondition(err):
- return http.StatusPreconditionFailed
- case containerderrors.IsUnavailable(err):
- return http.StatusServiceUnavailable
- case containerderrors.IsNotImplemented(err):
- return http.StatusNotImplemented
- default:
- return http.StatusInternalServerError
- }
-}
diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go
deleted file mode 100644
index 3abf07d..0000000
--- a/vendor/github.com/docker/docker/errdefs/is.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package errdefs // import "github.com/docker/docker/errdefs"
-
-type causer interface {
- Cause() error
-}
-
-func getImplementer(err error) error {
- switch e := err.(type) {
- case
- ErrNotFound,
- ErrInvalidParameter,
- ErrConflict,
- ErrUnauthorized,
- ErrUnavailable,
- ErrForbidden,
- ErrSystem,
- ErrNotModified,
- ErrNotImplemented,
- ErrCancelled,
- ErrDeadline,
- ErrDataLoss,
- ErrUnknown:
- return err
- case causer:
- return getImplementer(e.Cause())
- default:
- return err
- }
-}
-
-// IsNotFound returns if the passed in error is an ErrNotFound
-func IsNotFound(err error) bool {
- _, ok := getImplementer(err).(ErrNotFound)
- return ok
-}
-
-// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter
-func IsInvalidParameter(err error) bool {
- _, ok := getImplementer(err).(ErrInvalidParameter)
- return ok
-}
-
-// IsConflict returns if the passed in error is an ErrConflict
-func IsConflict(err error) bool {
- _, ok := getImplementer(err).(ErrConflict)
- return ok
-}
-
-// IsUnauthorized returns if the passed in error is an ErrUnauthorized
-func IsUnauthorized(err error) bool {
- _, ok := getImplementer(err).(ErrUnauthorized)
- return ok
-}
-
-// IsUnavailable returns if the passed in error is an ErrUnavailable
-func IsUnavailable(err error) bool {
- _, ok := getImplementer(err).(ErrUnavailable)
- return ok
-}
-
-// IsForbidden returns if the passed in error is an ErrForbidden
-func IsForbidden(err error) bool {
- _, ok := getImplementer(err).(ErrForbidden)
- return ok
-}
-
-// IsSystem returns if the passed in error is an ErrSystem
-func IsSystem(err error) bool {
- _, ok := getImplementer(err).(ErrSystem)
- return ok
-}
-
-// IsNotModified returns if the passed in error is a NotModified error
-func IsNotModified(err error) bool {
- _, ok := getImplementer(err).(ErrNotModified)
- return ok
-}
-
-// IsNotImplemented returns if the passed in error is an ErrNotImplemented
-func IsNotImplemented(err error) bool {
- _, ok := getImplementer(err).(ErrNotImplemented)
- return ok
-}
-
-// IsUnknown returns if the passed in error is an ErrUnknown
-func IsUnknown(err error) bool {
- _, ok := getImplementer(err).(ErrUnknown)
- return ok
-}
-
-// IsCancelled returns if the passed in error is an ErrCancelled
-func IsCancelled(err error) bool {
- _, ok := getImplementer(err).(ErrCancelled)
- return ok
-}
-
-// IsDeadline returns if the passed in error is an ErrDeadline
-func IsDeadline(err error) bool {
- _, ok := getImplementer(err).(ErrDeadline)
- return ok
-}
-
-// IsDataLoss returns if the passed in error is an ErrDataLoss
-func IsDataLoss(err error) bool {
- _, ok := getImplementer(err).(ErrDataLoss)
- return ok
-}
diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE
deleted file mode 100644
index b55b37b..0000000
--- a/vendor/github.com/docker/go-connections/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2015 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go
deleted file mode 100644
index bb7e4e3..0000000
--- a/vendor/github.com/docker/go-connections/nat/nat.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Package nat is a convenience package for manipulation of strings describing network ports.
-package nat
-
-import (
- "fmt"
- "net"
- "strconv"
- "strings"
-)
-
-const (
- // portSpecTemplate is the expected format for port specifications
- portSpecTemplate = "ip:hostPort:containerPort"
-)
-
-// PortBinding represents a binding between a Host IP address and a Host Port
-type PortBinding struct {
- // HostIP is the host IP Address
- HostIP string `json:"HostIp"`
- // HostPort is the host port number
- HostPort string
-}
-
-// PortMap is a collection of PortBinding indexed by Port
-type PortMap map[Port][]PortBinding
-
-// PortSet is a collection of structs indexed by Port
-type PortSet map[Port]struct{}
-
-// Port is a string containing port number and protocol in the format "80/tcp"
-type Port string
-
-// NewPort creates a new instance of a Port given a protocol and port number or port range
-func NewPort(proto, port string) (Port, error) {
- // Check for parsing issues on "port" now so we can avoid having
- // to check it later on.
-
- portStartInt, portEndInt, err := ParsePortRangeToInt(port)
- if err != nil {
- return "", err
- }
-
- if portStartInt == portEndInt {
- return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil
- }
- return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil
-}
-
-// ParsePort parses the port number string and returns an int
-func ParsePort(rawPort string) (int, error) {
- if len(rawPort) == 0 {
- return 0, nil
- }
- port, err := strconv.ParseUint(rawPort, 10, 16)
- if err != nil {
- return 0, err
- }
- return int(port), nil
-}
-
-// ParsePortRangeToInt parses the port range string and returns start/end ints
-func ParsePortRangeToInt(rawPort string) (int, int, error) {
- if len(rawPort) == 0 {
- return 0, 0, nil
- }
- start, end, err := ParsePortRange(rawPort)
- if err != nil {
- return 0, 0, err
- }
- return int(start), int(end), nil
-}
-
-// Proto returns the protocol of a Port
-func (p Port) Proto() string {
- proto, _ := SplitProtoPort(string(p))
- return proto
-}
-
-// Port returns the port number of a Port
-func (p Port) Port() string {
- _, port := SplitProtoPort(string(p))
- return port
-}
-
-// Int returns the port number of a Port as an int
-func (p Port) Int() int {
- portStr := p.Port()
- // We don't need to check for an error because we're going to
- // assume that any error would have been found, and reported, in NewPort()
- port, _ := ParsePort(portStr)
- return port
-}
-
-// Range returns the start/end port numbers of a Port range as ints
-func (p Port) Range() (int, int, error) {
- return ParsePortRangeToInt(p.Port())
-}
-
-// SplitProtoPort splits a port in the format of proto/port
-func SplitProtoPort(rawPort string) (string, string) {
- parts := strings.Split(rawPort, "/")
- l := len(parts)
- if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 {
- return "", ""
- }
- if l == 1 {
- return "tcp", rawPort
- }
- if len(parts[1]) == 0 {
- return "tcp", parts[0]
- }
- return parts[1], parts[0]
-}
-
-func validateProto(proto string) bool {
- for _, availableProto := range []string{"tcp", "udp", "sctp"} {
- if availableProto == proto {
- return true
- }
- }
- return false
-}
-
-// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses
-// these in to the internal types
-func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) {
- var (
- exposedPorts = make(map[Port]struct{}, len(ports))
- bindings = make(map[Port][]PortBinding)
- )
- for _, rawPort := range ports {
- portMappings, err := ParsePortSpec(rawPort)
- if err != nil {
- return nil, nil, err
- }
-
- for _, portMapping := range portMappings {
- port := portMapping.Port
- if _, exists := exposedPorts[port]; !exists {
- exposedPorts[port] = struct{}{}
- }
- bslice, exists := bindings[port]
- if !exists {
- bslice = []PortBinding{}
- }
- bindings[port] = append(bslice, portMapping.Binding)
- }
- }
- return exposedPorts, bindings, nil
-}
-
-// PortMapping is a data object mapping a Port to a PortBinding
-type PortMapping struct {
- Port Port
- Binding PortBinding
-}
-
-func splitParts(rawport string) (string, string, string) {
- parts := strings.Split(rawport, ":")
- n := len(parts)
- containerport := parts[n-1]
-
- switch n {
- case 1:
- return "", "", containerport
- case 2:
- return "", parts[0], containerport
- case 3:
- return parts[0], parts[1], containerport
- default:
- return strings.Join(parts[:n-2], ":"), parts[n-2], containerport
- }
-}
-
-// ParsePortSpec parses a port specification string into a slice of PortMappings
-func ParsePortSpec(rawPort string) ([]PortMapping, error) {
- var proto string
- rawIP, hostPort, containerPort := splitParts(rawPort)
- proto, containerPort = SplitProtoPort(containerPort)
-
- // Strip [] from IPV6 addresses
- ip, _, err := net.SplitHostPort(rawIP + ":")
- if err != nil {
- return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err)
- }
- if ip != "" && net.ParseIP(ip) == nil {
- return nil, fmt.Errorf("Invalid ip address: %s", ip)
- }
- if containerPort == "" {
- return nil, fmt.Errorf("No port specified: %s<empty>", rawPort)
- }
-
- startPort, endPort, err := ParsePortRange(containerPort)
- if err != nil {
- return nil, fmt.Errorf("Invalid containerPort: %s", containerPort)
- }
-
- var startHostPort, endHostPort uint64 = 0, 0
- if len(hostPort) > 0 {
- startHostPort, endHostPort, err = ParsePortRange(hostPort)
- if err != nil {
- return nil, fmt.Errorf("Invalid hostPort: %s", hostPort)
- }
- }
-
- if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) {
- // Allow host port range iff containerPort is not a range.
- // In this case, use the host port range as the dynamic
- // host port range to allocate into.
- if endPort != startPort {
- return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort)
- }
- }
-
- if !validateProto(strings.ToLower(proto)) {
- return nil, fmt.Errorf("Invalid proto: %s", proto)
- }
-
- ports := []PortMapping{}
- for i := uint64(0); i <= (endPort - startPort); i++ {
- containerPort = strconv.FormatUint(startPort+i, 10)
- if len(hostPort) > 0 {
- hostPort = strconv.FormatUint(startHostPort+i, 10)
- }
- // Set hostPort to a range only if there is a single container port
- // and a dynamic host port.
- if startPort == endPort && startHostPort != endHostPort {
- hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10))
- }
- port, err := NewPort(strings.ToLower(proto), containerPort)
- if err != nil {
- return nil, err
- }
-
- binding := PortBinding{
- HostIP: ip,
- HostPort: hostPort,
- }
- ports = append(ports, PortMapping{Port: port, Binding: binding})
- }
- return ports, nil
-}
diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go
deleted file mode 100644
index 892adf8..0000000
--- a/vendor/github.com/docker/go-connections/nat/parse.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package nat
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// PartParser parses and validates the specified string (data) using the specified template
-// e.g. ip:public:private -> 192.168.0.1:80:8000
-// DEPRECATED: do not use, this function may be removed in a future version
-func PartParser(template, data string) (map[string]string, error) {
- // ip:public:private
- var (
- templateParts = strings.Split(template, ":")
- parts = strings.Split(data, ":")
- out = make(map[string]string, len(templateParts))
- )
- if len(parts) != len(templateParts) {
- return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
- }
-
- for i, t := range templateParts {
- value := ""
- if len(parts) > i {
- value = parts[i]
- }
- out[t] = value
- }
- return out, nil
-}
-
-// ParsePortRange parses and validates the specified string as a port-range (8000-9000)
-func ParsePortRange(ports string) (uint64, uint64, error) {
- if ports == "" {
- return 0, 0, fmt.Errorf("Empty string specified for ports.")
- }
- if !strings.Contains(ports, "-") {
- start, err := strconv.ParseUint(ports, 10, 16)
- end := start
- return start, end, err
- }
-
- parts := strings.Split(ports, "-")
- start, err := strconv.ParseUint(parts[0], 10, 16)
- if err != nil {
- return 0, 0, err
- }
- end, err := strconv.ParseUint(parts[1], 10, 16)
- if err != nil {
- return 0, 0, err
- }
- if end < start {
- return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
- }
- return start, end, nil
-}
diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go
deleted file mode 100644
index ce95017..0000000
--- a/vendor/github.com/docker/go-connections/nat/sort.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package nat
-
-import (
- "sort"
- "strings"
-)
-
-type portSorter struct {
- ports []Port
- by func(i, j Port) bool
-}
-
-func (s *portSorter) Len() int {
- return len(s.ports)
-}
-
-func (s *portSorter) Swap(i, j int) {
- s.ports[i], s.ports[j] = s.ports[j], s.ports[i]
-}
-
-func (s *portSorter) Less(i, j int) bool {
- ip := s.ports[i]
- jp := s.ports[j]
-
- return s.by(ip, jp)
-}
-
-// Sort sorts a list of ports using the provided predicate
-// This function should compare `i` and `j`, returning true if `i` is
-// considered to be less than `j`
-func Sort(ports []Port, predicate func(i, j Port) bool) {
- s := &portSorter{ports, predicate}
- sort.Sort(s)
-}
-
-type portMapEntry struct {
- port Port
- binding PortBinding
-}
-
-type portMapSorter []portMapEntry
-
-func (s portMapSorter) Len() int { return len(s) }
-func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// sort the port so that the order is:
-// 1. port with larger specified bindings
-// 2. larger port
-// 3. port with tcp protocol
-func (s portMapSorter) Less(i, j int) bool {
- pi, pj := s[i].port, s[j].port
- hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort)
- return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp")
-}
-
-// SortPortMap sorts the list of ports and their respected mapping. The ports
-// will explicit HostPort will be placed first.
-func SortPortMap(ports []Port, bindings PortMap) {
- s := portMapSorter{}
- for _, p := range ports {
- if binding, ok := bindings[p]; ok {
- for _, b := range binding {
- s = append(s, portMapEntry{port: p, binding: b})
- }
- bindings[p] = []PortBinding{}
- } else {
- s = append(s, portMapEntry{port: p})
- }
- }
-
- sort.Sort(s)
- var (
- i int
- pm = make(map[Port]struct{})
- )
- // reorder ports
- for _, entry := range s {
- if _, ok := pm[entry.port]; !ok {
- ports[i] = entry.port
- pm[entry.port] = struct{}{}
- i++
- }
- // reorder bindings for this port
- if _, ok := bindings[entry.port]; ok {
- bindings[entry.port] = append(bindings[entry.port], entry.binding)
- }
- }
-}
-
-func toInt(s string) uint64 {
- i, _, err := ParsePortRange(s)
- if err != nil {
- i = 0
- }
- return i
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/docker/go-connections/sockets/README.md
+++ /dev/null
diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
deleted file mode 100644
index 99846ff..0000000
--- a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sockets
-
-import (
- "errors"
- "net"
- "sync"
-)
-
-var errClosed = errors.New("use of closed network connection")
-
-// InmemSocket implements net.Listener using in-memory only connections.
-type InmemSocket struct {
- chConn chan net.Conn
- chClose chan struct{}
- addr string
- mu sync.Mutex
-}
-
-// dummyAddr is used to satisfy net.Addr for the in-mem socket
-// it is just stored as a string and returns the string for all calls
-type dummyAddr string
-
-// NewInmemSocket creates an in-memory only net.Listener
-// The addr argument can be any string, but is used to satisfy the `Addr()` part
-// of the net.Listener interface
-func NewInmemSocket(addr string, bufSize int) *InmemSocket {
- return &InmemSocket{
- chConn: make(chan net.Conn, bufSize),
- chClose: make(chan struct{}),
- addr: addr,
- }
-}
-
-// Addr returns the socket's addr string to satisfy net.Listener
-func (s *InmemSocket) Addr() net.Addr {
- return dummyAddr(s.addr)
-}
-
-// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn.
-func (s *InmemSocket) Accept() (net.Conn, error) {
- select {
- case conn := <-s.chConn:
- return conn, nil
- case <-s.chClose:
- return nil, errClosed
- }
-}
-
-// Close closes the listener. It will be unavailable for use once closed.
-func (s *InmemSocket) Close() error {
- s.mu.Lock()
- defer s.mu.Unlock()
- select {
- case <-s.chClose:
- default:
- close(s.chClose)
- }
- return nil
-}
-
-// Dial is used to establish a connection with the in-mem server
-func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) {
- srvConn, clientConn := net.Pipe()
- select {
- case s.chConn <- srvConn:
- case <-s.chClose:
- return nil, errClosed
- }
-
- return clientConn, nil
-}
-
-// Network returns the addr string, satisfies net.Addr
-func (a dummyAddr) Network() string {
- return string(a)
-}
-
-// String returns the string form
-func (a dummyAddr) String() string {
- return string(a)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go
deleted file mode 100644
index 98e9a1d..0000000
--- a/vendor/github.com/docker/go-connections/sockets/proxy.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sockets
-
-import (
- "net"
- "net/url"
- "os"
- "strings"
-
- "golang.org/x/net/proxy"
-)
-
-// GetProxyEnv allows access to the uppercase and the lowercase forms of
-// proxy-related variables. See the Go specification for details on these
-// variables. https://golang.org/pkg/net/http/
-func GetProxyEnv(key string) string {
- proxyValue := os.Getenv(strings.ToUpper(key))
- if proxyValue == "" {
- return os.Getenv(strings.ToLower(key))
- }
- return proxyValue
-}
-
-// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a
-// proxy.Dialer which will route the connections through the proxy using the
-// given dialer.
-func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) {
- allProxy := GetProxyEnv("all_proxy")
- if len(allProxy) == 0 {
- return direct, nil
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return direct, err
- }
-
- proxyFromURL, err := proxy.FromURL(proxyURL, direct)
- if err != nil {
- return direct, err
- }
-
- noProxy := GetProxyEnv("no_proxy")
- if len(noProxy) == 0 {
- return proxyFromURL, nil
- }
-
- perHost := proxy.NewPerHost(proxyFromURL, direct)
- perHost.AddFromString(noProxy)
-
- return perHost, nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go
deleted file mode 100644
index a1d7beb..0000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Package sockets provides helper functions to create and configure Unix or TCP sockets.
-package sockets
-
-import (
- "errors"
- "net"
- "net/http"
- "time"
-)
-
-// Why 32? See https://github.com/docker/docker/pull/8035.
-const defaultTimeout = 32 * time.Second
-
-// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system.
-var ErrProtocolNotAvailable = errors.New("protocol not available")
-
-// ConfigureTransport configures the specified Transport according to the
-// specified proto and addr.
-// If the proto is unix (using a unix socket to communicate) or npipe the
-// compression is disabled.
-func ConfigureTransport(tr *http.Transport, proto, addr string) error {
- switch proto {
- case "unix":
- return configureUnixTransport(tr, proto, addr)
- case "npipe":
- return configureNpipeTransport(tr, proto, addr)
- default:
- tr.Proxy = http.ProxyFromEnvironment
- dialer, err := DialerFromEnvironment(&net.Dialer{
- Timeout: defaultTimeout,
- })
- if err != nil {
- return err
- }
- tr.Dial = dialer.Dial
- }
- return nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
deleted file mode 100644
index 386cf0d..0000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build !windows
-
-package sockets
-
-import (
- "fmt"
- "net"
- "net/http"
- "syscall"
- "time"
-)
-
-const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
-
-func configureUnixTransport(tr *http.Transport, proto, addr string) error {
- if len(addr) > maxUnixSocketPathSize {
- return fmt.Errorf("Unix socket path %q is too long", addr)
- }
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return net.DialTimeout(proto, addr, defaultTimeout)
- }
- return nil
-}
-
-func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
- return ErrProtocolNotAvailable
-}
-
-// DialPipe connects to a Windows named pipe.
-// This is not supported on other OSes.
-func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
- return nil, syscall.EAFNOSUPPORT
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
deleted file mode 100644
index 5c21644..0000000
--- a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package sockets
-
-import (
- "net"
- "net/http"
- "time"
-
- "github.com/Microsoft/go-winio"
-)
-
-func configureUnixTransport(tr *http.Transport, proto, addr string) error {
- return ErrProtocolNotAvailable
-}
-
-func configureNpipeTransport(tr *http.Transport, proto, addr string) error {
- // No need for compression in local communications.
- tr.DisableCompression = true
- tr.Dial = func(_, _ string) (net.Conn, error) {
- return DialPipe(addr, defaultTimeout)
- }
- return nil
-}
-
-// DialPipe connects to a Windows named pipe.
-func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
- return winio.DialPipe(addr, &timeout)
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
deleted file mode 100644
index 53cbb6c..0000000
--- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Package sockets provides helper functions to create and configure Unix or TCP sockets.
-package sockets
-
-import (
- "crypto/tls"
- "net"
-)
-
-// NewTCPSocket creates a TCP socket listener with the specified address and
-// the specified tls configuration. If TLSConfig is set, will encapsulate the
-// TCP listener inside a TLS one.
-func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) {
- l, err := net.Listen("tcp", addr)
- if err != nil {
- return nil, err
- }
- if tlsConfig != nil {
- tlsConfig.NextProtos = []string{"http/1.1"}
- l = tls.NewListener(l, tlsConfig)
- }
- return l, nil
-}
diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go
deleted file mode 100644
index a8b5dbb..0000000
--- a/vendor/github.com/docker/go-connections/sockets/unix_socket.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// +build !windows
-
-package sockets
-
-import (
- "net"
- "os"
- "syscall"
-)
-
-// NewUnixSocket creates a unix socket with the specified path and group.
-func NewUnixSocket(path string, gid int) (net.Listener, error) {
- if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) {
- return nil, err
- }
- mask := syscall.Umask(0777)
- defer syscall.Umask(mask)
-
- l, err := net.Listen("unix", path)
- if err != nil {
- return nil, err
- }
- if err := os.Chown(path, 0, gid); err != nil {
- l.Close()
- return nil, err
- }
- if err := os.Chmod(path, 0660); err != nil {
- l.Close()
- return nil, err
- }
- return l, nil
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
deleted file mode 100644
index 1ca0965..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build go1.7
-
-package tlsconfig
-
-import (
- "crypto/x509"
- "runtime"
-)
-
-// SystemCertPool returns a copy of the system cert pool,
-// returns an error if failed to load or empty pool on windows.
-func SystemCertPool() (*x509.CertPool, error) {
- certpool, err := x509.SystemCertPool()
- if err != nil && runtime.GOOS == "windows" {
- return x509.NewCertPool(), nil
- }
- return certpool, err
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
deleted file mode 100644
index 1ff81c3..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build !go1.7
-
-package tlsconfig
-
-import (
- "crypto/x509"
-)
-
-// SystemCertPool returns an new empty cert pool,
-// accessing system cert pool is supported in go 1.7
-func SystemCertPool() (*x509.CertPool, error) {
- return x509.NewCertPool(), nil
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go
deleted file mode 100644
index 0ef3fdc..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/config.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
-//
-// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
-// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
-// A Config may be reused; the tls package will also not modify it.
-package tlsconfig
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "os"
-
- "github.com/pkg/errors"
-)
-
-// Options represents the information needed to create client and server TLS configurations.
-type Options struct {
- CAFile string
-
- // If either CertFile or KeyFile is empty, Client() will not load them
- // preventing the client from authenticating to the server.
- // However, Server() requires them and will error out if they are empty.
- CertFile string
- KeyFile string
-
- // client-only option
- InsecureSkipVerify bool
- // server-only option
- ClientAuth tls.ClientAuthType
- // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS
- // creds will include exclusively the roots in that CA file. If no CA file is provided,
- // the system pool will be used.
- ExclusiveRootPools bool
- MinVersion uint16
- // If Passphrase is set, it will be used to decrypt a TLS private key
- // if the key is encrypted
- Passphrase string
-}
-
-// Extra (server-side) accepted CBC cipher suites - will phase out in the future
-var acceptedCBCCiphers = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
-}
-
-// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
-// options struct but wants to use a commonly accepted set of TLS cipher suites, with
-// known weak algorithms removed.
-var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
-
-// allTLSVersions lists all the TLS versions and is used by the code that validates
-// a uint16 value as a TLS version.
-var allTLSVersions = map[uint16]struct{}{
- tls.VersionSSL30: {},
- tls.VersionTLS10: {},
- tls.VersionTLS11: {},
- tls.VersionTLS12: {},
-}
-
-// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
-func ServerDefault(ops ...func(*tls.Config)) *tls.Config {
- tlsconfig := &tls.Config{
- // Avoid fallback by default to SSL protocols < TLS1.2
- MinVersion: tls.VersionTLS12,
- PreferServerCipherSuites: true,
- CipherSuites: DefaultServerAcceptedCiphers,
- }
-
- for _, op := range ops {
- op(tlsconfig)
- }
-
- return tlsconfig
-}
-
-// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
-func ClientDefault(ops ...func(*tls.Config)) *tls.Config {
- tlsconfig := &tls.Config{
- // Prefer TLS1.2 as the client minimum
- MinVersion: tls.VersionTLS12,
- CipherSuites: clientCipherSuites,
- }
-
- for _, op := range ops {
- op(tlsconfig)
- }
-
- return tlsconfig
-}
-
-// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
-func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) {
- // If we should verify the server, we need to load a trusted ca
- var (
- certPool *x509.CertPool
- err error
- )
- if exclusivePool {
- certPool = x509.NewCertPool()
- } else {
- certPool, err = SystemCertPool()
- if err != nil {
- return nil, fmt.Errorf("failed to read system certificates: %v", err)
- }
- }
- pem, err := ioutil.ReadFile(caFile)
- if err != nil {
- return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err)
- }
- if !certPool.AppendCertsFromPEM(pem) {
- return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
- }
- return certPool, nil
-}
-
-// isValidMinVersion checks that the input value is a valid tls minimum version
-func isValidMinVersion(version uint16) bool {
- _, ok := allTLSVersions[version]
- return ok
-}
-
-// adjustMinVersion sets the MinVersion on `config`, the input configuration.
-// It assumes the current MinVersion on the `config` is the lowest allowed.
-func adjustMinVersion(options Options, config *tls.Config) error {
- if options.MinVersion > 0 {
- if !isValidMinVersion(options.MinVersion) {
- return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion)
- }
- if options.MinVersion < config.MinVersion {
- return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion)
- }
- config.MinVersion = options.MinVersion
- }
-
- return nil
-}
-
-// IsErrEncryptedKey returns true if the 'err' is an error of incorrect
-// password when tryin to decrypt a TLS private key
-func IsErrEncryptedKey(err error) bool {
- return errors.Cause(err) == x509.IncorrectPasswordError
-}
-
-// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format.
-// If the private key is encrypted, 'passphrase' is used to decrypted the
-// private key.
-func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) {
- // this section makes some small changes to code from notary/tuf/utils/x509.go
- pemBlock, _ := pem.Decode(keyBytes)
- if pemBlock == nil {
- return nil, fmt.Errorf("no valid private key found")
- }
-
- var err error
- if x509.IsEncryptedPEMBlock(pemBlock) {
- keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase))
- if err != nil {
- return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
- }
- keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
- }
-
- return keyBytes, nil
-}
-
-// getCert returns a Certificate from the CertFile and KeyFile in 'options',
-// if the key is encrypted, the Passphrase in 'options' will be used to
-// decrypt it.
-func getCert(options Options) ([]tls.Certificate, error) {
- if options.CertFile == "" && options.KeyFile == "" {
- return nil, nil
- }
-
- errMessage := "Could not load X509 key pair"
-
- cert, err := ioutil.ReadFile(options.CertFile)
- if err != nil {
- return nil, errors.Wrap(err, errMessage)
- }
-
- prKeyBytes, err := ioutil.ReadFile(options.KeyFile)
- if err != nil {
- return nil, errors.Wrap(err, errMessage)
- }
-
- prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase)
- if err != nil {
- return nil, errors.Wrap(err, errMessage)
- }
-
- tlsCert, err := tls.X509KeyPair(cert, prKeyBytes)
- if err != nil {
- return nil, errors.Wrap(err, errMessage)
- }
-
- return []tls.Certificate{tlsCert}, nil
-}
-
-// Client returns a TLS configuration meant to be used by a client.
-func Client(options Options) (*tls.Config, error) {
- tlsConfig := ClientDefault()
- tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
- if !options.InsecureSkipVerify && options.CAFile != "" {
- CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
- if err != nil {
- return nil, err
- }
- tlsConfig.RootCAs = CAs
- }
-
- tlsCerts, err := getCert(options)
- if err != nil {
- return nil, err
- }
- tlsConfig.Certificates = tlsCerts
-
- if err := adjustMinVersion(options, tlsConfig); err != nil {
- return nil, err
- }
-
- return tlsConfig, nil
-}
-
-// Server returns a TLS configuration meant to be used by a server.
-func Server(options Options) (*tls.Config, error) {
- tlsConfig := ServerDefault()
- tlsConfig.ClientAuth = options.ClientAuth
- tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
- if err != nil {
- if os.IsNotExist(err) {
- return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
- }
- return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
- }
- tlsConfig.Certificates = []tls.Certificate{tlsCert}
- if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" {
- CAs, err := certPool(options.CAFile, options.ExclusiveRootPools)
- if err != nil {
- return nil, err
- }
- tlsConfig.ClientCAs = CAs
- }
-
- if err := adjustMinVersion(options, tlsConfig); err != nil {
- return nil, err
- }
-
- return tlsConfig, nil
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
deleted file mode 100644
index 6b4c6a7..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build go1.5
-
-// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
-//
-package tlsconfig
-
-import (
- "crypto/tls"
-)
-
-// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
-var clientCipherSuites = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
-}
diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
deleted file mode 100644
index ee22df4..0000000
--- a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !go1.5
-
-// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
-//
-package tlsconfig
-
-import (
- "crypto/tls"
-)
-
-// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
-var clientCipherSuites = []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
-}
diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md
deleted file mode 100644
index 9ea86d7..0000000
--- a/vendor/github.com/docker/go-units/CONTRIBUTING.md
+++ /dev/null
@@ -1,67 +0,0 @@
-# Contributing to go-units
-
-Want to hack on go-units? Awesome! Here are instructions to get you started.
-
-go-units is a part of the [Docker](https://www.docker.com) project, and follows
-the same rules and principles. If you're already familiar with the way
-Docker does things, you'll feel right at home.
-
-Otherwise, go read Docker's
-[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
-[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
-[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
-[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
-
-### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch. Your
-signature certifies that you wrote the patch or otherwise have the right to pass
-it on as an open-source patch. The rules are pretty simple: if you can certify
-the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-Then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith <joe.smith@email.com>
-
-Use your real name (sorry, no pseudonyms or anonymous contributions.)
-
-If you set your `user.name` and `user.email` git configs, you can sign your
-commit automatically with `git commit -s`.
diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE
deleted file mode 100644
index b55b37b..0000000
--- a/vendor/github.com/docker/go-units/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2015 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS
deleted file mode 100644
index 4aac7c7..0000000
--- a/vendor/github.com/docker/go-units/MAINTAINERS
+++ /dev/null
@@ -1,46 +0,0 @@
-# go-units maintainers file
-#
-# This file describes who runs the docker/go-units project and how.
-# This is a living document - if you see something out of date or missing, speak up!
-#
-# It is structured to be consumable by both humans and programs.
-# To extract its contents programmatically, use any TOML-compliant parser.
-#
-# This file is compiled into the MAINTAINERS file in docker/opensource.
-#
-[Org]
- [Org."Core maintainers"]
- people = [
- "akihirosuda",
- "dnephin",
- "thajeztah",
- "vdemeester",
- ]
-
-[people]
-
-# A reference list of all people associated with the project.
-# All other sections should refer to people by their canonical key
-# in the people section.
-
- # ADD YOURSELF HERE IN ALPHABETICAL ORDER
-
- [people.akihirosuda]
- Name = "Akihiro Suda"
- Email = "akihiro.suda.cz@hco.ntt.co.jp"
- GitHub = "AkihiroSuda"
-
- [people.dnephin]
- Name = "Daniel Nephin"
- Email = "dnephin@gmail.com"
- GitHub = "dnephin"
-
- [people.thajeztah]
- Name = "Sebastiaan van Stijn"
- Email = "github@gone.nl"
- GitHub = "thaJeztah"
-
- [people.vdemeester]
- Name = "Vincent Demeester"
- Email = "vincent@sbr.pm"
- GitHub = "vdemeester"
\ No newline at end of file
diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md
deleted file mode 100644
index 4f70a4e..0000000
--- a/vendor/github.com/docker/go-units/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-[](https://godoc.org/github.com/docker/go-units)
-
-# Introduction
-
-go-units is a library to transform human friendly measurements into machine friendly values.
-
-## Usage
-
-See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
-
-## Copyright and license
-
-Copyright © 2015 Docker, Inc.
-
-go-units is licensed under the Apache License, Version 2.0.
-See [LICENSE](LICENSE) for the full text of the license.
diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml
deleted file mode 100644
index af9d605..0000000
--- a/vendor/github.com/docker/go-units/circle.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-dependencies:
- post:
- # install golint
- - go get golang.org/x/lint/golint
-
-test:
- pre:
- # run analysis before tests
- - go vet ./...
- - test -z "$(golint ./... | tee /dev/stderr)"
- - test -z "$(gofmt -s -l . | tee /dev/stderr)"
diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go
deleted file mode 100644
index 48dd874..0000000
--- a/vendor/github.com/docker/go-units/duration.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Package units provides helper function to parse and print size and time units
-// in human-readable format.
-package units
-
-import (
- "fmt"
- "time"
-)
-
-// HumanDuration returns a human-readable approximation of a duration
-// (eg. "About a minute", "4 hours ago", etc.).
-func HumanDuration(d time.Duration) string {
- if seconds := int(d.Seconds()); seconds < 1 {
- return "Less than a second"
- } else if seconds == 1 {
- return "1 second"
- } else if seconds < 60 {
- return fmt.Sprintf("%d seconds", seconds)
- } else if minutes := int(d.Minutes()); minutes == 1 {
- return "About a minute"
- } else if minutes < 60 {
- return fmt.Sprintf("%d minutes", minutes)
- } else if hours := int(d.Hours() + 0.5); hours == 1 {
- return "About an hour"
- } else if hours < 48 {
- return fmt.Sprintf("%d hours", hours)
- } else if hours < 24*7*2 {
- return fmt.Sprintf("%d days", hours/24)
- } else if hours < 24*30*2 {
- return fmt.Sprintf("%d weeks", hours/24/7)
- } else if hours < 24*365*2 {
- return fmt.Sprintf("%d months", hours/24/30)
- }
- return fmt.Sprintf("%d years", int(d.Hours())/24/365)
-}
diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go
deleted file mode 100644
index 85f6ab0..0000000
--- a/vendor/github.com/docker/go-units/size.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package units
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// See: http://en.wikipedia.org/wiki/Binary_prefix
-const (
- // Decimal
-
- KB = 1000
- MB = 1000 * KB
- GB = 1000 * MB
- TB = 1000 * GB
- PB = 1000 * TB
-
- // Binary
-
- KiB = 1024
- MiB = 1024 * KiB
- GiB = 1024 * MiB
- TiB = 1024 * GiB
- PiB = 1024 * TiB
-)
-
-type unitMap map[string]int64
-
-var (
- decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
- binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
- sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`)
-)
-
-var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
-var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
-
-func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) {
- i := 0
- unitsLimit := len(_map) - 1
- for size >= base && i < unitsLimit {
- size = size / base
- i++
- }
- return size, _map[i]
-}
-
-// CustomSize returns a human-readable approximation of a size
-// using custom format.
-func CustomSize(format string, size float64, base float64, _map []string) string {
- size, unit := getSizeAndUnit(size, base, _map)
- return fmt.Sprintf(format, size, unit)
-}
-
-// HumanSizeWithPrecision allows the size to be in any precision,
-// instead of 4 digit precision used in units.HumanSize.
-func HumanSizeWithPrecision(size float64, precision int) string {
- size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs)
- return fmt.Sprintf("%.*g%s", precision, size, unit)
-}
-
-// HumanSize returns a human-readable approximation of a size
-// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
-func HumanSize(size float64) string {
- return HumanSizeWithPrecision(size, 4)
-}
-
-// BytesSize returns a human-readable size in bytes, kibibytes,
-// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
-func BytesSize(size float64) string {
- return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs)
-}
-
-// FromHumanSize returns an integer from a human-readable specification of a
-// size using SI standard (eg. "44kB", "17MB").
-func FromHumanSize(size string) (int64, error) {
- return parseSize(size, decimalMap)
-}
-
-// RAMInBytes parses a human-readable string representing an amount of RAM
-// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
-// returns the number of bytes, or -1 if the string is unparseable.
-// Units are case-insensitive, and the 'b' suffix is optional.
-func RAMInBytes(size string) (int64, error) {
- return parseSize(size, binaryMap)
-}
-
-// Parses the human-readable size string into the amount it represents.
-func parseSize(sizeStr string, uMap unitMap) (int64, error) {
- matches := sizeRegex.FindStringSubmatch(sizeStr)
- if len(matches) != 4 {
- return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
- }
-
- size, err := strconv.ParseFloat(matches[1], 64)
- if err != nil {
- return -1, err
- }
-
- unitPrefix := strings.ToLower(matches[3])
- if mul, ok := uMap[unitPrefix]; ok {
- size *= float64(mul)
- }
-
- return int64(size), nil
-}
diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go
deleted file mode 100644
index fca0400..0000000
--- a/vendor/github.com/docker/go-units/ulimit.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package units
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// Ulimit is a human friendly version of Rlimit.
-type Ulimit struct {
- Name string
- Hard int64
- Soft int64
-}
-
-// Rlimit specifies the resource limits, such as max open files.
-type Rlimit struct {
- Type int `json:"type,omitempty"`
- Hard uint64 `json:"hard,omitempty"`
- Soft uint64 `json:"soft,omitempty"`
-}
-
-const (
- // magic numbers for making the syscall
- // some of these are defined in the syscall package, but not all.
- // Also since Windows client doesn't get access to the syscall package, need to
- // define these here
- rlimitAs = 9
- rlimitCore = 4
- rlimitCPU = 0
- rlimitData = 2
- rlimitFsize = 1
- rlimitLocks = 10
- rlimitMemlock = 8
- rlimitMsgqueue = 12
- rlimitNice = 13
- rlimitNofile = 7
- rlimitNproc = 6
- rlimitRss = 5
- rlimitRtprio = 14
- rlimitRttime = 15
- rlimitSigpending = 11
- rlimitStack = 3
-)
-
-var ulimitNameMapping = map[string]int{
- //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
- "core": rlimitCore,
- "cpu": rlimitCPU,
- "data": rlimitData,
- "fsize": rlimitFsize,
- "locks": rlimitLocks,
- "memlock": rlimitMemlock,
- "msgqueue": rlimitMsgqueue,
- "nice": rlimitNice,
- "nofile": rlimitNofile,
- "nproc": rlimitNproc,
- "rss": rlimitRss,
- "rtprio": rlimitRtprio,
- "rttime": rlimitRttime,
- "sigpending": rlimitSigpending,
- "stack": rlimitStack,
-}
-
-// ParseUlimit parses and returns a Ulimit from the specified string.
-func ParseUlimit(val string) (*Ulimit, error) {
- parts := strings.SplitN(val, "=", 2)
- if len(parts) != 2 {
- return nil, fmt.Errorf("invalid ulimit argument: %s", val)
- }
-
- if _, exists := ulimitNameMapping[parts[0]]; !exists {
- return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
- }
-
- var (
- soft int64
- hard = &soft // default to soft in case no hard was set
- temp int64
- err error
- )
- switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
- case 2:
- temp, err = strconv.ParseInt(limitVals[1], 10, 64)
- if err != nil {
- return nil, err
- }
- hard = &temp
- fallthrough
- case 1:
- soft, err = strconv.ParseInt(limitVals[0], 10, 64)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
- }
-
- if *hard != -1 {
- if soft == -1 {
- return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard)
- }
- if soft > *hard {
- return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
- }
- }
-
- return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
-}
-
-// GetRlimit returns the RLimit corresponding to Ulimit.
-func (u *Ulimit) GetRlimit() (*Rlimit, error) {
- t, exists := ulimitNameMapping[u.Name]
- if !exists {
- return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
- }
-
- return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
-}
-
-func (u *Ulimit) String() string {
- return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
-}
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
deleted file mode 100644
index 3d97fc7..0000000
--- a/vendor/github.com/gogo/protobuf/AUTHORS
+++ /dev/null
@@ -1,15 +0,0 @@
-# This is the official list of GoGo authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS file, which
-# lists people. For example, employees are listed in CONTRIBUTORS,
-# but not in AUTHORS, because the employer holds the copyright.
-
-# Names should be added to this file as one of
-# Organization's name
-# Individual's name <submission email address>
-# Individual's name <submission email address> <email2> <emailN>
-
-# Please keep the list sorted.
-
-Sendgrid, Inc
-Vastech SA (PTY) LTD
-Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1b4f6c2..0000000
--- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,23 +0,0 @@
-Anton Povarov <anton.povarov@gmail.com>
-Brian Goff <cpuguy83@gmail.com>
-Clayton Coleman <ccoleman@redhat.com>
-Denis Smirnov <denis.smirnov.91@gmail.com>
-DongYun Kang <ceram1000@gmail.com>
-Dwayne Schultz <dschultz@pivotal.io>
-Georg Apitz <gapitz@pivotal.io>
-Gustav Paul <gustav.paul@gmail.com>
-Johan Brandhorst <johan.brandhorst@gmail.com>
-John Shahid <jvshahid@gmail.com>
-John Tuley <john@tuley.org>
-Laurent <laurent@adyoulike.com>
-Patrick Lee <patrick@dropbox.com>
-Peter Edge <peter.edge@gmail.com>
-Roger Johansson <rogeralsing@gmail.com>
-Sam Nguyen <sam.nguyen@sendgrid.com>
-Sergio Arbeo <serabe@gmail.com>
-Stephen J Day <stephen.day@docker.com>
-Tamir Duberstein <tamird@gmail.com>
-Todd Eisenberger <teisenberger@dropbox.com>
-Tormod Erevik Lea <tormodlea@gmail.com>
-Vyacheslav Kim <kane@sendgrid.com>
-Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE
deleted file mode 100644
index f57de90..0000000
--- a/vendor/github.com/gogo/protobuf/LICENSE
+++ /dev/null
@@ -1,35 +0,0 @@
-Copyright (c) 2013, The GoGo Authors. All rights reserved.
-
-Protocol Buffers for Go with Gadgets
-
-Go support for Protocol Buffers - Google's data interchange format
-
-Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
deleted file mode 100644
index 00d65f3..0000000
--- a/vendor/github.com/gogo/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C test_proto
- make -C proto3_proto
- make
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
deleted file mode 100644
index a26b046..0000000
--- a/vendor/github.com/gogo/protobuf/proto/clone.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "fmt"
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(src Message) Message {
- in := reflect.ValueOf(src)
- if in.IsNil() {
- return src
- }
- out := reflect.New(in.Type().Elem())
- dst := out.Interface().(Message)
- Merge(dst, src)
- return dst
-}
-
-// Merger is the interface representing objects that can merge messages of the same type.
-type Merger interface {
- // Merge merges src into this message.
- // Required and optional fields that are set in src will be set to that value in dst.
- // Elements of repeated fields will be appended.
- //
- // Merge may panic if called with a different argument type than the receiver.
- Merge(src Message)
-}
-
-// generatedMerger is the custom merge method that generated protos will have.
-// We must add this method since a generate Merge method will conflict with
-// many existing protos that have a Merge data field already defined.
-type generatedMerger interface {
- XXX_Merge(src Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- if m, ok := dst.(Merger); ok {
- m.Merge(src)
- return
- }
-
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
- }
- if in.IsNil() {
- return // Merge from nil src is a noop
- }
- if m, ok := dst.(generatedMerger); ok {
- m.XXX_Merge(src)
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
- emOut := out.Addr().Interface().(extensionsBytes)
- bIn := emIn.GetExtensions()
- bOut := emOut.GetExtensions()
- *bOut = append(*bOut, *bIn...)
- } else if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
deleted file mode 100644
index 2455248..0000000
--- a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import "reflect"
-
-type custom interface {
- Marshal() ([]byte, error)
- Unmarshal(data []byte) error
- Size() int
-}
-
-var customType = reflect.TypeOf((*custom)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
deleted file mode 100644
index 63b0f08..0000000
--- a/vendor/github.com/gogo/protobuf/proto/decode.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- i := p.index
- buf := p.buf
-
- if i >= len(buf) {
- return 0, io.ErrUnexpectedEOF
- } else if buf[i] < 0x80 {
- p.index++
- return uint64(buf[i]), nil
- } else if len(buf)-i < 10 {
- return p.decodeVarintSlow()
- }
-
- var b uint64
- // we already checked the first byte
- x = uint64(buf[i]) - 0x80
- i++
-
- b = uint64(buf[i])
- i++
- x += b << 7
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 7
-
- b = uint64(buf[i])
- i++
- x += b << 14
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 14
-
- b = uint64(buf[i])
- i++
- x += b << 21
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 21
-
- b = uint64(buf[i])
- i++
- x += b << 28
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 28
-
- b = uint64(buf[i])
- i++
- x += b << 35
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 35
-
- b = uint64(buf[i])
- i++
- x += b << 42
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 42
-
- b = uint64(buf[i])
- i++
- x += b << 49
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 49
-
- b = uint64(buf[i])
- i++
- x += b << 56
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 56
-
- b = uint64(buf[i])
- i++
- x += b << 63
- if b&0x80 == 0 {
- goto done
- }
-
- return 0, errOverflow
-
-done:
- p.index = i
- return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-// Unmarshal implementations should not clear the receiver.
-// Any unmarshaled data should be merged into the receiver.
-// Callers of Unmarshal that do not want to retain existing data
-// should Reset the receiver before calling Unmarshal.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// newUnmarshaler is the interface representing objects that can
-// unmarshal themselves. The semantics are identical to Unmarshaler.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newUnmarshaler interface {
- XXX_Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- if u, ok := pb.(newUnmarshaler); ok {
- return u.XXX_Unmarshal(buf)
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-// StartGroup tag is already consumed. This function consumes
-// EndGroup tag.
-func (p *Buffer) DecodeGroup(pb Message) error {
- b := p.buf[p.index:]
- x, y := findEndGroup(b)
- if x < 0 {
- return io.ErrUnexpectedEOF
- }
- err := Unmarshal(b[:x], pb)
- p.index += y
- return err
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(newUnmarshaler); ok {
- err := u.XXX_Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
- if u, ok := pb.(Unmarshaler); ok {
- // NOTE: The history of proto have unfortunately been inconsistent
- // whether Unmarshaler should or should not implicitly clear itself.
- // Some implementations do, most do not.
- // Thus, calling this here may or may not do what people want.
- //
- // See https://github.com/golang/protobuf/issues/424
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- // Slow workaround for messages that aren't Unmarshalers.
- // This includes some hand-coded .pb.go files and
- // bootstrap protos.
- // TODO: fix all of those and then add Unmarshal to
- // the Message interface. Then:
- // The cast above and code below can be deleted.
- // The old unmarshaler can be deleted.
- // Clients can call Unmarshal directly (can already do that, actually).
- var info InternalMessageInfo
- err := info.Unmarshal(pb, p.buf[p.index:])
- p.index = len(p.buf)
- return err
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go
deleted file mode 100644
index 35b882c..0000000
--- a/vendor/github.com/gogo/protobuf/proto/deprecated.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2018 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import "errors"
-
-// Deprecated: do not use.
-type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-
-// Deprecated: do not use.
-func GetStats() Stats { return Stats{} }
-
-// Deprecated: do not use.
-func MarshalMessageSet(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func UnmarshalMessageSet([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func MarshalMessageSetJSON(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func UnmarshalMessageSetJSON([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: do not use.
-func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go
deleted file mode 100644
index fe1bd7d..0000000
--- a/vendor/github.com/gogo/protobuf/proto/discard.go
+++ /dev/null
@@ -1,350 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2017 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-type generatedDiscarder interface {
- XXX_DiscardUnknown()
-}
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-//
-// For proto2 messages, the unknown fields of message extensions are only
-// discarded from messages that have been accessed via GetExtension.
-func DiscardUnknown(m Message) {
- if m, ok := m.(generatedDiscarder); ok {
- m.XXX_DiscardUnknown()
- return
- }
- // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
- // but the master branch has no implementation for InternalMessageInfo,
- // so it would be more work to replicate that approach.
- discardLegacy(m)
-}
-
-// DiscardUnknown recursively discards all unknown fields.
-func (a *InternalMessageInfo) DiscardUnknown(m Message) {
- di := atomicLoadDiscardInfo(&a.discard)
- if di == nil {
- di = getDiscardInfo(reflect.TypeOf(m).Elem())
- atomicStoreDiscardInfo(&a.discard, di)
- }
- di.discard(toPointer(&m))
-}
-
-type discardInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []discardFieldInfo
- unrecognized field
-}
-
-type discardFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
- discard func(src pointer)
-}
-
-var (
- discardInfoMap = map[reflect.Type]*discardInfo{}
- discardInfoLock sync.Mutex
-)
-
-func getDiscardInfo(t reflect.Type) *discardInfo {
- discardInfoLock.Lock()
- defer discardInfoLock.Unlock()
- di := discardInfoMap[t]
- if di == nil {
- di = &discardInfo{typ: t}
- discardInfoMap[t] = di
- }
- return di
-}
-
-func (di *discardInfo) discard(src pointer) {
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&di.initialized) == 0 {
- di.computeDiscardInfo()
- }
-
- for _, fi := range di.fields {
- sfp := src.offset(fi.field)
- fi.discard(sfp)
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
- // Ignore lock since DiscardUnknown is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- DiscardUnknown(m)
- }
- }
- }
-
- if di.unrecognized.IsValid() {
- *src.offset(di.unrecognized).toBytes() = nil
- }
-}
-
-func (di *discardInfo) computeDiscardInfo() {
- di.lock.Lock()
- defer di.lock.Unlock()
- if di.initialized != 0 {
- return
- }
- t := di.typ
- n := t.NumField()
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- dfi := discardFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
- case isSlice: // E.g., []*pb.T
- discardInfo := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sps := src.getPointerSlice()
- for _, sp := range sps {
- if !sp.isNil() {
- discardInfo.discard(sp)
- }
- }
- }
- default: // E.g., *pb.T
- discardInfo := getDiscardInfo(tf)
- dfi.discard = func(src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- discardInfo.discard(sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
- default: // E.g., map[K]V
- if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
- dfi.discard = func(src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- DiscardUnknown(val.Interface().(Message))
- }
- }
- } else {
- dfi.discard = func(pointer) {} // Noop
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
- default: // E.g., interface{}
- // TODO: Make this faster?
- dfi.discard = func(src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- DiscardUnknown(sv.Interface().(Message))
- }
- }
- }
- }
- default:
- continue
- }
- di.fields = append(di.fields, dfi)
- }
-
- di.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- di.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&di.initialized, 1)
-}
-
-func discardLegacy(m Message) {
- v := reflect.ValueOf(m)
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return
- }
- t := v.Type()
-
- for i := 0; i < v.NumField(); i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- vf := v.Field(i)
- tf := f.Type
-
- // Unwrap tf to get its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
- }
-
- switch tf.Kind() {
- case reflect.Struct:
- switch {
- case !isPointer:
- panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
- case isSlice: // E.g., []*pb.T
- for j := 0; j < vf.Len(); j++ {
- discardLegacy(vf.Index(j).Interface().(Message))
- }
- default: // E.g., *pb.T
- discardLegacy(vf.Interface().(Message))
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
- default: // E.g., map[K]V
- tv := vf.Type().Elem()
- if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
- for _, key := range vf.MapKeys() {
- val := vf.MapIndex(key)
- discardLegacy(val.Interface().(Message))
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
- default: // E.g., test_proto.isCommunique_Union interface
- if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
- vf = vf.Elem() // E.g., *test_proto.Communique_Msg
- if !vf.IsNil() {
- vf = vf.Elem() // E.g., test_proto.Communique_Msg
- vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
- if vf.Kind() == reflect.Ptr {
- discardLegacy(vf.Interface().(Message))
- }
- }
- }
- }
- }
- }
-
- if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
- if vf.Type() != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- vf.Set(reflect.ValueOf([]byte(nil)))
- }
-
- // For proto2 messages, only discard unknown fields in message extensions
- // that have been accessed via GetExtension.
- if em, err := extendable(m); err == nil {
- // Ignore lock since discardLegacy is not concurrency safe.
- emm, _ := em.extensionsRead()
- for _, mx := range emm {
- if m, ok := mx.value.(Message); ok {
- discardLegacy(m)
- }
- }
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go
deleted file mode 100644
index 93464c9..0000000
--- a/vendor/github.com/gogo/protobuf/proto/duration.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// This file implements conversions between google.protobuf.Duration
-// and time.Duration.
-
-import (
- "errors"
- "fmt"
- "time"
-)
-
-const (
- // Range of a Duration in seconds, as specified in
- // google/protobuf/duration.proto. This is about 10,000 years in seconds.
- maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
- minSeconds = -maxSeconds
-)
-
-// validateDuration determines whether the Duration is valid according to the
-// definition in google/protobuf/duration.proto. A valid Duration
-// may still be too large to fit into a time.Duration (the range of Duration
-// is about 10,000 years, and the range of time.Duration is about 290).
-func validateDuration(d *duration) error {
- if d == nil {
- return errors.New("duration: nil Duration")
- }
- if d.Seconds < minSeconds || d.Seconds > maxSeconds {
- return fmt.Errorf("duration: %#v: seconds out of range", d)
- }
- if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
- return fmt.Errorf("duration: %#v: nanos out of range", d)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
- return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
- }
- return nil
-}
-
-// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
-// returns an error if the Duration is invalid or is too large to be
-// represented in a time.Duration.
-func durationFromProto(p *duration) (time.Duration, error) {
- if err := validateDuration(p); err != nil {
- return 0, err
- }
- d := time.Duration(p.Seconds) * time.Second
- if int64(d/time.Second) != p.Seconds {
- return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
- }
- if p.Nanos != 0 {
- d += time.Duration(p.Nanos)
- if (d < 0) != (p.Nanos < 0) {
- return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
- }
- }
- return d, nil
-}
-
-// DurationProto converts a time.Duration to a Duration.
-func durationProto(d time.Duration) *duration {
- nanos := d.Nanoseconds()
- secs := nanos / 1e9
- nanos -= secs * 1e9
- return &duration{
- Seconds: secs,
- Nanos: int32(nanos),
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
deleted file mode 100644
index e748e17..0000000
--- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2016, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
- "time"
-)
-
-var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
-
-type duration struct {
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-}
-
-func (m *duration) Reset() { *m = duration{} }
-func (*duration) ProtoMessage() {}
-func (*duration) String() string { return "duration<string>" }
-
-func init() {
- RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
deleted file mode 100644
index 9581ccd..0000000
--- a/vendor/github.com/gogo/protobuf/proto/encode.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "errors"
- "reflect"
-)
-
-var (
- // errRepeatedHasNil is the error returned if Marshal is called with
- // a struct with a repeated field containing a nil element.
- errRepeatedHasNil = errors.New("proto: repeated field has nil element")
-
- // errOneofHasNil is the error returned if Marshal is called with
- // a struct with a oneof field containing a nil element.
- errOneofHasNil = errors.New("proto: oneof field has nil value")
-
- // ErrNil is the error returned if Marshal is called with nil.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // ErrTooLarge is the error returned if Marshal is called with a
- // message that encodes to >2GB.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- switch {
- case x < 1<<7:
- return 1
- case x < 1<<14:
- return 2
- case x < 1<<21:
- return 3
- case x < 1<<28:
- return 4
- case x < 1<<35:
- return 5
- case x < 1<<42:
- return 6
- case x < 1<<49:
- return 7
- case x < 1<<56:
- return 8
- case x < 1<<63:
- return 9
- }
- return 10
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- siz := Size(pb)
- sizVar := SizeVarint(uint64(siz))
- p.grow(siz + sizVar)
- p.EncodeVarint(uint64(siz))
- return p.Marshal(pb)
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
deleted file mode 100644
index 0f5fb17..0000000
--- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-func NewRequiredNotSetError(field string) *RequiredNotSetError {
- return &RequiredNotSetError{field}
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
deleted file mode 100644
index d4db5a1..0000000
--- a/vendor/github.com/gogo/protobuf/proto/equal.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer comparison.
-
-package proto
-
-import (
- "bytes"
- "log"
- "reflect"
- "strings"
-)
-
-/*
-Equal returns true iff protocol buffers a and b are equal.
-The arguments must both be pointers to protocol buffer structs.
-
-Equality is defined in this way:
- - Two messages are equal iff they are the same type,
- corresponding fields are equal, unknown field sets
- are equal, and extensions sets are equal.
- - Two set scalar fields are equal iff their values are equal.
- If the fields are of a floating-point type, remember that
- NaN != x for all x, including NaN. If the message is defined
- in a proto3 .proto file, fields are not "set"; specifically,
- zero length proto3 "bytes" fields are equal (nil == {}).
- - Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal. Note a "bytes" field,
- although represented by []byte, is not a repeated field and the
- rule for the scalar fields described above applies.
- - Two unset fields are equal.
- - Two unknown field sets are equal if their current
- encoded state is equal.
- - Two extension sets are equal iff they have corresponding
- elements that are pairwise equal.
- - Two map fields are equal iff their lengths are the same,
- and they contain the same set of elements. Zero-length map
- fields are equal.
- - Every other combination of things are not equal.
-
-The return value is undefined if a and b are not protocol buffers.
-*/
-func Equal(a, b Message) bool {
- if a == nil || b == nil {
- return a == b
- }
- v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
- if v1.Type() != v2.Type() {
- return false
- }
- if v1.Kind() == reflect.Ptr {
- if v1.IsNil() {
- return v2.IsNil()
- }
- if v2.IsNil() {
- return false
- }
- v1, v2 = v1.Elem(), v2.Elem()
- }
- if v1.Kind() != reflect.Struct {
- return false
- }
- return equalStruct(v1, v2)
-}
-
-// v1 and v2 are known to have the same type.
-func equalStruct(v1, v2 reflect.Value) bool {
- sprop := GetProperties(v1.Type())
- for i := 0; i < v1.NumField(); i++ {
- f := v1.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- f1, f2 := v1.Field(i), v2.Field(i)
- if f.Type.Kind() == reflect.Ptr {
- if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
- // both unset
- continue
- } else if n1 != n2 {
- // set/unset mismatch
- return false
- }
- f1, f2 = f1.Elem(), f2.Elem()
- }
- if !equalAny(f1, f2, sprop.Prop[i]) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_InternalExtensions")
- if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
- return false
- }
- }
-
- if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
- em2 := v2.FieldByName("XXX_extensions")
- if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
- return false
- }
- }
-
- uf := v1.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return true
- }
-
- u1 := uf.Bytes()
- u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- return bytes.Equal(u1, u2)
-}
-
-// v1 and v2 are known to have the same type.
-// prop may be nil.
-func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
- if v1.Type() == protoMessageType {
- m1, _ := v1.Interface().(Message)
- m2, _ := v2.Interface().(Message)
- return Equal(m1, m2)
- }
- switch v1.Kind() {
- case reflect.Bool:
- return v1.Bool() == v2.Bool()
- case reflect.Float32, reflect.Float64:
- return v1.Float() == v2.Float()
- case reflect.Int32, reflect.Int64:
- return v1.Int() == v2.Int()
- case reflect.Interface:
- // Probably a oneof field; compare the inner values.
- n1, n2 := v1.IsNil(), v2.IsNil()
- if n1 || n2 {
- return n1 == n2
- }
- e1, e2 := v1.Elem(), v2.Elem()
- if e1.Type() != e2.Type() {
- return false
- }
- return equalAny(e1, e2, nil)
- case reflect.Map:
- if v1.Len() != v2.Len() {
- return false
- }
- for _, key := range v1.MapKeys() {
- val2 := v2.MapIndex(key)
- if !val2.IsValid() {
- // This key was not found in the second map.
- return false
- }
- if !equalAny(v1.MapIndex(key), val2, nil) {
- return false
- }
- }
- return true
- case reflect.Ptr:
- // Maps may have nil values in them, so check for nil.
- if v1.IsNil() && v2.IsNil() {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return equalAny(v1.Elem(), v2.Elem(), prop)
- case reflect.Slice:
- if v1.Type().Elem().Kind() == reflect.Uint8 {
- // short circuit: []byte
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value.
- if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
- return true
- }
- if v1.IsNil() != v2.IsNil() {
- return false
- }
- return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
- }
-
- if v1.Len() != v2.Len() {
- return false
- }
- for i := 0; i < v1.Len(); i++ {
- if !equalAny(v1.Index(i), v2.Index(i), prop) {
- return false
- }
- }
- return true
- case reflect.String:
- return v1.Interface().(string) == v2.Interface().(string)
- case reflect.Struct:
- return equalStruct(v1, v2)
- case reflect.Uint32, reflect.Uint64:
- return v1.Uint() == v2.Uint()
- }
-
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to compare %v", v1)
- return false
-}
-
-// base is the struct type that the extensions are based on.
-// x1 and x2 are InternalExtensions.
-func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
- em1, _ := x1.extensionsRead()
- em2, _ := x2.extensionsRead()
- return equalExtMap(base, em1, em2)
-}
-
-func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
- if len(em1) != len(em2) {
- return false
- }
-
- for extNum, e1 := range em1 {
- e2, ok := em2[extNum]
- if !ok {
- return false
- }
-
- m1, m2 := e1.value, e2.value
-
- if m1 == nil && m2 == nil {
- // Both have only encoded form.
- if bytes.Equal(e1.enc, e2.enc) {
- continue
- }
- // The bytes are different, but the extensions might still be
- // equal. We need to decode them to compare.
- }
-
- if m1 != nil && m2 != nil {
- // Both are unencoded.
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- continue
- }
-
- // At least one is encoded. To do a semantically correct comparison
- // we need to unmarshal them first.
- var desc *ExtensionDesc
- if m := extensionMaps[base]; m != nil {
- desc = m[extNum]
- }
- if desc == nil {
- // If both have only encoded form and the bytes are the same,
- // it is handled above. We get here when the bytes are different.
- // We don't know how to decode it, so just compare them as byte
- // slices.
- log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- return false
- }
- var err error
- if m1 == nil {
- m1, err = decodeExtension(e1.enc, desc)
- }
- if m2 == nil && err == nil {
- m2, err = decodeExtension(e2.enc, desc)
- }
- if err != nil {
- // The encoded form is invalid.
- log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
- return false
- }
- if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
deleted file mode 100644
index 341c6f5..0000000
--- a/vendor/github.com/gogo/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,605 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Types and routines for supporting protocol buffer extensions.
- */
-
-import (
- "errors"
- "fmt"
- "io"
- "reflect"
- "strconv"
- "sync"
-)
-
-// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-// ExtensionRange represents a range of message extensions for a protocol buffer.
-// Used in code generated by the protocol compiler.
-type ExtensionRange struct {
- Start, End int32 // both inclusive
-}
-
-// extendableProto is an interface implemented by any protocol buffer generated by the current
-// proto compiler that may be extended.
-type extendableProto interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- extensionsWrite() map[int32]Extension
- extensionsRead() (map[int32]Extension, sync.Locker)
-}
-
-// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
-// version of the proto compiler that may be extended.
-type extendableProtoV1 interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- ExtensionMap() map[int32]Extension
-}
-
-// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
-type extensionAdapter struct {
- extendableProtoV1
-}
-
-func (e extensionAdapter) extensionsWrite() map[int32]Extension {
- return e.ExtensionMap()
-}
-
-func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- return e.ExtensionMap(), notLocker{}
-}
-
-// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
-type notLocker struct{}
-
-func (n notLocker) Lock() {}
-func (n notLocker) Unlock() {}
-
-// extendable returns the extendableProto interface for the given generated proto message.
-// If the proto message has the old extension format, it returns a wrapper that implements
-// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, error) {
- switch p := p.(type) {
- case extendableProto:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return p, nil
- case extendableProtoV1:
- if isNilPtr(p) {
- return nil, fmt.Errorf("proto: nil %T is not extendable", p)
- }
- return extensionAdapter{p}, nil
- case extensionsBytes:
- return slowExtensionAdapter{p}, nil
- }
- // Don't allocate a specific error containing %T:
- // this is the hot path for Clone and MarshalText.
- return nil, errNotExtendable
-}
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-func isNilPtr(x interface{}) bool {
- v := reflect.ValueOf(x)
- return v.Kind() == reflect.Ptr && v.IsNil()
-}
-
-// XXX_InternalExtensions is an internal representation of proto extensions.
-//
-// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
-// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
-//
-// The methods of XXX_InternalExtensions are not concurrency safe in general,
-// but calls to logically read-only methods such as has and get may be executed concurrently.
-type XXX_InternalExtensions struct {
- // The struct must be indirect so that if a user inadvertently copies a
- // generated message and its embedded XXX_InternalExtensions, they
- // avoid the mayhem of a copied mutex.
- //
- // The mutex serializes all logically read-only operations to p.extensionMap.
- // It is up to the client to ensure that write operations to p.extensionMap are
- // mutually exclusive with other accesses.
- p *struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }
-}
-
-// extensionsWrite returns the extension map, creating it on first use.
-func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
- if e.p == nil {
- e.p = new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- })
- e.p.extensionMap = make(map[int32]Extension)
- }
- return e.p.extensionMap
-}
-
-// extensionsRead returns the extensions map for read-only use. It may be nil.
-// The caller must hold the returned mutex's lock when accessing Elements within the map.
-func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
- if e.p == nil {
- return nil, nil
- }
- return e.p.extensionMap, &e.p.mu
-}
-
-// ExtensionDesc represents an extension specification.
-// Used in generated code from the protocol compiler.
-type ExtensionDesc struct {
- ExtendedType Message // nil pointer to the type that is being extended
- ExtensionType interface{} // nil pointer to the extension type
- Field int32 // field number
- Name string // fully-qualified name of extension, for text formatting
- Tag string // protobuf tag style
- Filename string // name of the file in which the extension is defined
-}
-
-func (ed *ExtensionDesc) repeated() bool {
- t := reflect.TypeOf(ed.ExtensionType)
- return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
-}
-
-// Extension represents an extension in a message.
-type Extension struct {
- // When an extension is stored in a message using SetExtension
- // only desc and value are set. When the message is marshaled
- // enc will be set to the encoded form of the message.
- //
- // When a message is unmarshaled and contains extensions, each
- // extension will have only enc set. When such an extension is
- // accessed using GetExtension (or GetExtensions) desc and value
- // will be set.
- desc *ExtensionDesc
- value interface{}
- enc []byte
-}
-
-// SetRawExtension is for testing only.
-func SetRawExtension(base Message, id int32, b []byte) {
- if ebase, ok := base.(extensionsBytes); ok {
- clearExtension(base, id)
- ext := ebase.GetExtensions()
- *ext = append(*ext, b...)
- return
- }
- epb, err := extendable(base)
- if err != nil {
- return
- }
- extmap := epb.extensionsWrite()
- extmap[id] = Extension{enc: b}
-}
-
-// isExtensionField returns true iff the given field number is in an extension range.
-func isExtensionField(pb extendableProto, field int32) bool {
- for _, er := range pb.ExtensionRangeArray() {
- if er.Start <= field && field <= er.End {
- return true
- }
- }
- return false
-}
-
-// checkExtensionTypes checks that the given extension is valid for pb.
-func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
- var pbi interface{} = pb
- // Check the extended type.
- if ea, ok := pbi.(extensionAdapter); ok {
- pbi = ea.extendableProtoV1
- }
- if ea, ok := pbi.(slowExtensionAdapter); ok {
- pbi = ea.extensionsBytes
- }
- if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
- }
- // Check the range.
- if !isExtensionField(pb, extension.Field) {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return nil
-}
-
-// extPropKey is sufficient to uniquely identify an extension.
-type extPropKey struct {
- base reflect.Type
- field int32
-}
-
-var extProp = struct {
- sync.RWMutex
- m map[extPropKey]*Properties
-}{
- m: make(map[extPropKey]*Properties),
-}
-
-func extensionProperties(ed *ExtensionDesc) *Properties {
- key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
-
- extProp.RLock()
- if prop, ok := extProp.m[key]; ok {
- extProp.RUnlock()
- return prop
- }
- extProp.RUnlock()
-
- extProp.Lock()
- defer extProp.Unlock()
- // Check again.
- if prop, ok := extProp.m[key]; ok {
- return prop
- }
-
- prop := new(Properties)
- prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
- extProp.m[key] = prop
- return prop
-}
-
-// HasExtension returns whether the given extension is present in pb.
-func HasExtension(pb Message, extension *ExtensionDesc) bool {
- if epb, doki := pb.(extensionsBytes); doki {
- ext := epb.GetExtensions()
- buf := *ext
- o := 0
- for o < len(buf) {
- tag, n := DecodeVarint(buf[o:])
- fieldNum := int32(tag >> 3)
- if int32(fieldNum) == extension.Field {
- return true
- }
- wireType := int(tag & 0x7)
- o += n
- l, err := size(buf[o:], wireType)
- if err != nil {
- return false
- }
- o += l
- }
- return false
- }
- // TODO: Check types, field numbers, etc.?
- epb, err := extendable(pb)
- if err != nil {
- return false
- }
- extmap, mu := epb.extensionsRead()
- if extmap == nil {
- return false
- }
- mu.Lock()
- _, ok := extmap[extension.Field]
- mu.Unlock()
- return ok
-}
-
-// ClearExtension removes the given extension from pb.
-func ClearExtension(pb Message, extension *ExtensionDesc) {
- clearExtension(pb, extension.Field)
-}
-
-func clearExtension(pb Message, fieldNum int32) {
- if epb, ok := pb.(extensionsBytes); ok {
- offset := 0
- for offset != -1 {
- offset = deleteExtension(epb, fieldNum, offset)
- }
- return
- }
- epb, err := extendable(pb)
- if err != nil {
- return
- }
- // TODO: Check types, field numbers, etc.?
- extmap := epb.extensionsWrite()
- delete(extmap, fieldNum)
-}
-
-// GetExtension retrieves a proto2 extended field from pb.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes of the field extension.
-func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- if epb, doki := pb.(extensionsBytes); doki {
- ext := epb.GetExtensions()
- return decodeExtensionFromBytes(extension, *ext)
- }
-
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
-
- if extension.ExtendedType != nil {
- // can only check type if this is a complete descriptor
- if cerr := checkExtensionTypes(epb, extension); cerr != nil {
- return nil, cerr
- }
- }
-
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return defaultExtensionValue(extension)
- }
- mu.Lock()
- defer mu.Unlock()
- e, ok := emap[extension.Field]
- if !ok {
- // defaultExtensionValue returns the default value or
- // ErrMissingExtension if there is no default.
- return defaultExtensionValue(extension)
- }
-
- if e.value != nil {
- // Already decoded. Check the descriptor, though.
- if e.desc != extension {
- // This shouldn't happen. If it does, it means that
- // GetExtension was called twice with two different
- // descriptors with the same field number.
- return nil, errors.New("proto: descriptor conflict")
- }
- return e.value, nil
- }
-
- if extension.ExtensionType == nil {
- // incomplete descriptor
- return e.enc, nil
- }
-
- v, err := decodeExtension(e.enc, extension)
- if err != nil {
- return nil, err
- }
-
- // Remember the decoded version and drop the encoded version.
- // That way it is safe to mutate what we return.
- e.value = v
- e.desc = extension
- e.enc = nil
- emap[extension.Field] = e
- return e.value, nil
-}
-
-// defaultExtensionValue returns the default value for extension.
-// If no default for an extension is defined ErrMissingExtension is returned.
-func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
- if extension.ExtensionType == nil {
- // incomplete descriptor, so no default
- return nil, ErrMissingExtension
- }
-
- t := reflect.TypeOf(extension.ExtensionType)
- props := extensionProperties(extension)
-
- sf, _, err := fieldDefault(t, props)
- if err != nil {
- return nil, err
- }
-
- if sf == nil || sf.value == nil {
- // There is no default value.
- return nil, ErrMissingExtension
- }
-
- if t.Kind() != reflect.Ptr {
- // We do not need to return a Ptr, we can directly return sf.value.
- return sf.value, nil
- }
-
- // We need to return an interface{} that is a pointer to sf.value.
- value := reflect.New(t).Elem()
- value.Set(reflect.New(value.Type().Elem()))
- if sf.kind == reflect.Int32 {
- // We may have an int32 or an enum, but the underlying data is int32.
- // Since we can't set an int32 into a non int32 reflect.value directly
- // set it as a int32.
- value.Elem().SetInt(int64(sf.value.(int32)))
- } else {
- value.Elem().Set(reflect.ValueOf(sf.value))
- }
- return value.Interface(), nil
-}
-
-// decodeExtension decodes an extension encoded in b.
-func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- t := reflect.TypeOf(extension.ExtensionType)
- unmarshal := typeUnmarshaler(t, extension.Tag)
-
- // t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate space to store the pointer/slice.
- value := reflect.New(t).Elem()
-
- var err error
- for {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- wire := int(x) & 7
-
- b, err = unmarshal(b, valToPointer(value.Addr()), wire)
- if err != nil {
- return nil, err
- }
-
- if len(b) == 0 {
- break
- }
- }
- return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
- if err != nil {
- return
- }
- }
- return
-}
-
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, err := extendable(pb)
- if err != nil {
- return nil, err
- }
- registeredExtensions := RegisteredExtensions(pb)
-
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return nil, nil
- }
- mu.Lock()
- defer mu.Unlock()
- extensions := make([]*ExtensionDesc, 0, len(emap))
- for extid, e := range emap {
- desc := e.desc
- if desc == nil {
- desc = registeredExtensions[extid]
- if desc == nil {
- desc = &ExtensionDesc{Field: extid}
- }
- }
-
- extensions = append(extensions, desc)
- }
- return extensions, nil
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- if epb, ok := pb.(extensionsBytes); ok {
- ClearExtension(pb, extension)
- newb, err := encodeExtension(extension, value)
- if err != nil {
- return err
- }
- bb := epb.GetExtensions()
- *bb = append(*bb, newb...)
- return nil
- }
- epb, err := extendable(pb)
- if err != nil {
- return err
- }
- if err := checkExtensionTypes(epb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
- }
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
- return nil
-}
-
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
- if epb, doki := pb.(extensionsBytes); doki {
- ext := epb.GetExtensions()
- *ext = []byte{}
- return
- }
- epb, err := extendable(pb)
- if err != nil {
- return
- }
- m := epb.extensionsWrite()
- for k := range m {
- delete(m, k)
- }
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
- }
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
- }
- m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
deleted file mode 100644
index 6f1ae12..0000000
--- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strings"
- "sync"
-)
-
-type extensionsBytes interface {
- Message
- ExtensionRangeArray() []ExtensionRange
- GetExtensions() *[]byte
-}
-
-type slowExtensionAdapter struct {
- extensionsBytes
-}
-
-func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
- panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
-}
-
-func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
- b := s.GetExtensions()
- m, err := BytesToExtensionsMap(*b)
- if err != nil {
- panic(err)
- }
- return m, notLocker{}
-}
-
-func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
- if reflect.ValueOf(pb).IsNil() {
- return ifnotset
- }
- value, err := GetExtension(pb, extension)
- if err != nil {
- return ifnotset
- }
- if value == nil {
- return ifnotset
- }
- if value.(*bool) == nil {
- return ifnotset
- }
- return *(value.(*bool))
-}
-
-func (this *Extension) Equal(that *Extension) bool {
- if err := this.Encode(); err != nil {
- return false
- }
- if err := that.Encode(); err != nil {
- return false
- }
- return bytes.Equal(this.enc, that.enc)
-}
-
-func (this *Extension) Compare(that *Extension) int {
- if err := this.Encode(); err != nil {
- return 1
- }
- if err := that.Encode(); err != nil {
- return -1
- }
- return bytes.Compare(this.enc, that.enc)
-}
-
-func SizeOfInternalExtension(m extendableProto) (n int) {
- info := getMarshalInfo(reflect.TypeOf(m))
- return info.sizeV1Extensions(m.extensionsWrite())
-}
-
-type sortableMapElem struct {
- field int32
- ext Extension
-}
-
-func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
- s := make(sortableExtensions, 0, len(m))
- for k, v := range m {
- s = append(s, &sortableMapElem{field: k, ext: v})
- }
- return s
-}
-
-type sortableExtensions []*sortableMapElem
-
-func (this sortableExtensions) Len() int { return len(this) }
-
-func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
-
-func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
-
-func (this sortableExtensions) String() string {
- sort.Sort(this)
- ss := make([]string, len(this))
- for i := range this {
- ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
- }
- return "map[" + strings.Join(ss, ",") + "]"
-}
-
-func StringFromInternalExtension(m extendableProto) string {
- return StringFromExtensionsMap(m.extensionsWrite())
-}
-
-func StringFromExtensionsMap(m map[int32]Extension) string {
- return newSortableExtensionsFromMap(m).String()
-}
-
-func StringFromExtensionsBytes(ext []byte) string {
- m, err := BytesToExtensionsMap(ext)
- if err != nil {
- panic(err)
- }
- return StringFromExtensionsMap(m)
-}
-
-func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
- return EncodeExtensionMap(m.extensionsWrite(), data)
-}
-
-func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
- return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
-}
-
-func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
- o := 0
- for _, e := range m {
- if err := e.Encode(); err != nil {
- return 0, err
- }
- n := copy(data[o:], e.enc)
- if n != len(e.enc) {
- return 0, io.ErrShortBuffer
- }
- o += n
- }
- return o, nil
-}
-
-func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
- o := 0
- end := len(data)
- for _, e := range m {
- if err := e.Encode(); err != nil {
- return 0, err
- }
- n := copy(data[end-len(e.enc):], e.enc)
- if n != len(e.enc) {
- return 0, io.ErrShortBuffer
- }
- end -= n
- o += n
- }
- return o, nil
-}
-
-func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
- e := m[id]
- if err := e.Encode(); err != nil {
- return nil, err
- }
- return e.enc, nil
-}
-
-func size(buf []byte, wire int) (int, error) {
- switch wire {
- case WireVarint:
- _, n := DecodeVarint(buf)
- return n, nil
- case WireFixed64:
- return 8, nil
- case WireBytes:
- v, n := DecodeVarint(buf)
- return int(v) + n, nil
- case WireFixed32:
- return 4, nil
- case WireStartGroup:
- offset := 0
- for {
- u, n := DecodeVarint(buf[offset:])
- fwire := int(u & 0x7)
- offset += n
- if fwire == WireEndGroup {
- return offset, nil
- }
- s, err := size(buf[offset:], wire)
- if err != nil {
- return 0, err
- }
- offset += s
- }
- }
- return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
-}
-
-func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
- m := make(map[int32]Extension)
- i := 0
- for i < len(buf) {
- tag, n := DecodeVarint(buf[i:])
- if n <= 0 {
- return nil, fmt.Errorf("unable to decode varint")
- }
- fieldNum := int32(tag >> 3)
- wireType := int(tag & 0x7)
- l, err := size(buf[i+n:], wireType)
- if err != nil {
- return nil, err
- }
- end := i + int(l) + n
- m[int32(fieldNum)] = Extension{enc: buf[i:end]}
- i = end
- }
- return m, nil
-}
-
-func NewExtension(e []byte) Extension {
- ee := Extension{enc: make([]byte, len(e))}
- copy(ee.enc, e)
- return ee
-}
-
-func AppendExtension(e Message, tag int32, buf []byte) {
- if ee, eok := e.(extensionsBytes); eok {
- ext := ee.GetExtensions()
- *ext = append(*ext, buf...)
- return
- }
- if ee, eok := e.(extendableProto); eok {
- m := ee.extensionsWrite()
- ext := m[int32(tag)] // may be missing
- ext.enc = append(ext.enc, buf...)
- m[int32(tag)] = ext
- }
-}
-
-func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
- u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
- ei := u.getExtElemInfo(extension)
- v := value
- p := toAddrPointer(&v, ei.isptr)
- siz := ei.sizer(p, SizeVarint(ei.wiretag))
- buf := make([]byte, 0, siz)
- return ei.marshaler(buf, p, ei.wiretag, false)
-}
-
-func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
- o := 0
- for o < len(buf) {
- tag, n := DecodeVarint((buf)[o:])
- fieldNum := int32(tag >> 3)
- wireType := int(tag & 0x7)
- if o+n > len(buf) {
- return nil, fmt.Errorf("unable to decode extension")
- }
- l, err := size((buf)[o+n:], wireType)
- if err != nil {
- return nil, err
- }
- if int32(fieldNum) == extension.Field {
- if o+n+l > len(buf) {
- return nil, fmt.Errorf("unable to decode extension")
- }
- v, err := decodeExtension((buf)[o:o+n+l], extension)
- if err != nil {
- return nil, err
- }
- return v, nil
- }
- o += n + l
- }
- return defaultExtensionValue(extension)
-}
-
-func (this *Extension) Encode() error {
- if this.enc == nil {
- var err error
- this.enc, err = encodeExtension(this.desc, this.value)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (this Extension) GoString() string {
- if err := this.Encode(); err != nil {
- return fmt.Sprintf("error encoding extension: %v", err)
- }
- return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
-}
-
-func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
- typ := reflect.TypeOf(pb).Elem()
- ext, ok := extensionMaps[typ]
- if !ok {
- return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
- }
- desc, ok := ext[fieldNum]
- if !ok {
- return errors.New("proto: bad extension number; not in declared ranges")
- }
- return SetExtension(pb, desc, value)
-}
-
-func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
- typ := reflect.TypeOf(pb).Elem()
- ext, ok := extensionMaps[typ]
- if !ok {
- return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
- }
- desc, ok := ext[fieldNum]
- if !ok {
- return nil, fmt.Errorf("unregistered field number %d", fieldNum)
- }
- return GetExtension(pb, desc)
-}
-
-func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
- x := &XXX_InternalExtensions{
- p: new(struct {
- mu sync.Mutex
- extensionMap map[int32]Extension
- }),
- }
- x.p.extensionMap = m
- return *x
-}
-
-func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
- pb := extendable.(extendableProto)
- return pb.extensionsWrite()
-}
-
-func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
- ext := pb.GetExtensions()
- for offset < len(*ext) {
- tag, n1 := DecodeVarint((*ext)[offset:])
- fieldNum := int32(tag >> 3)
- wireType := int(tag & 0x7)
- n2, err := size((*ext)[offset+n1:], wireType)
- if err != nil {
- panic(err)
- }
- newOffset := offset + n1 + n2
- if fieldNum == theFieldNum {
- *ext = append((*ext)[:offset], (*ext)[newOffset:]...)
- return offset
- }
- offset = newOffset
- }
- return -1
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
deleted file mode 100644
index 80db1c1..0000000
--- a/vendor/github.com/gogo/protobuf/proto/lib.go
+++ /dev/null
@@ -1,973 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/gogo/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/gogo/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
-// Marshal reports this when a required field is not initialized.
-// Unmarshal reports this when a required field is missing from the wire data.
-type RequiredNotSetError struct{ field string }
-
-func (e *RequiredNotSetError) Error() string {
- if e.field == "" {
- return fmt.Sprintf("proto: required field not set")
- }
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-type invalidUTF8Error struct{ field string }
-
-func (e *invalidUTF8Error) Error() string {
- if e.field == "" {
- return "proto: invalid UTF-8 detected"
- }
- return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
-}
-func (e *invalidUTF8Error) InvalidUTF8() bool {
- return true
-}
-
-// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
-// This error should not be exposed to the external API as such errors should
-// be recreated with the field information.
-var errInvalidUTF8 = &invalidUTF8Error{}
-
-// isNonFatal reports whether the error is either a RequiredNotSet error
-// or a InvalidUTF8 error.
-func isNonFatal(err error) bool {
- if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
- return true
- }
- if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
- return true
- }
- return false
-}
-
-type nonFatal struct{ E error }
-
-// Merge merges err into nf and reports whether it was successful.
-// Otherwise it returns false for any fatal non-nil errors.
-func (nf *nonFatal) Merge(err error) (ok bool) {
- if err == nil {
- return true // not an error
- }
- if !isNonFatal(err) {
- return false // fatal error
- }
- if nf.E == nil {
- nf.E = err // store first instance of non-fatal error
- }
- return true
-}
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // read point
-
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-// SetDeterministic sets whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (p *Buffer) SetDeterministic(deterministic bool) {
- p.deterministic = deterministic
-}
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- sindex := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = sindex
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or T or []*T or []T
- switch f.Kind() {
- case reflect.Struct:
- setDefaults(f, recur, zeros)
-
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.Kind() == reflect.Ptr && e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Struct:
- nestedMessage = true // non-nullable
-
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr, reflect.Struct:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// mapKeys returns a sort.Interface to be used for sorting the map keys.
-// Map fields may have key types of non-float scalars, strings and enums.
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{vs: vs}
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- case reflect.Bool:
- s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
- case reflect.String:
- s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
- default:
- panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-const (
- // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- GoGoProtoPackageIsVersion3 = true
-
- // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- GoGoProtoPackageIsVersion2 = true
-
- // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
- // to assert that that code is compatible with this version of the proto package.
- GoGoProtoPackageIsVersion1 = true
-)
-
-// InternalMessageInfo is a type used internally by generated .pb.go files.
-// This type is not intended to be used by non-generated code.
-// This type is not subject to any compatibility guarantee.
-type InternalMessageInfo struct {
- marshal *marshalInfo
- unmarshal *unmarshalInfo
- merge *mergeInfo
- discard *discardInfo
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
deleted file mode 100644
index b3aa391..0000000
--- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "encoding/json"
- "strconv"
-)
-
-type Sizer interface {
- Size() int
-}
-
-type ProtoSizer interface {
- ProtoSize() int
-}
-
-func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
- s, ok := m[value]
- if !ok {
- s = strconv.Itoa(int(value))
- }
- return json.Marshal(s)
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
deleted file mode 100644
index f48a756..0000000
--- a/vendor/github.com/gogo/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "errors"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- return ms.find(pb) != nil
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func unmarshalMessageSet(buf []byte, exts interface{}) error {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m = exts.extensionsWrite()
- case map[int32]Extension:
- m = exts
- default:
- return errors.New("proto: not an extension map")
- }
-
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index b6cad90..0000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
- "sync"
-)
-
-const unsafeAllowed = false
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// zeroField is a noop when calling pointer.offset.
-var zeroField = field([]int{})
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// The pointer type is for the table-driven decoder.
-// The implementation here uses a reflect.Value of pointer type to
-// create a generic pointer. In pointer_unsafe.go we use unsafe
-// instead of reflect to implement the same (but faster) interface.
-type pointer struct {
- v reflect.Value
-}
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- return pointer{v: reflect.ValueOf(*i)}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
- v := reflect.ValueOf(*i)
- u := reflect.New(v.Type())
- u.Elem().Set(v)
- return pointer{v: u}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
-}
-
-func (p pointer) isNil() bool {
- return p.v.IsNil()
-}
-
-// grow updates the slice s in place to make it one element longer.
-// s must be addressable.
-// Returns the (addressable) new element.
-func grow(s reflect.Value) reflect.Value {
- n, m := s.Len(), s.Cap()
- if n < m {
- s.SetLen(n + 1)
- } else {
- s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
- }
- return s.Index(n)
-}
-
-func (p pointer) toInt64() *int64 {
- return p.v.Interface().(*int64)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return p.v.Interface().(**int64)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return p.v.Interface().(*[]int64)
-}
-
-var int32ptr = reflect.TypeOf((*int32)(nil))
-
-func (p pointer) toInt32() *int32 {
- return p.v.Convert(int32ptr).Interface().(*int32)
-}
-
-// The toInt32Ptr/Slice methods don't work because of enums.
-// Instead, we must use set/get methods for the int32ptr/slice case.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return p.v.Interface().(**int32)
-}
- func (p pointer) toInt32Slice() *[]int32 {
- return p.v.Interface().(*[]int32)
-}
-*/
-func (p pointer) getInt32Ptr() *int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().(*int32)
- }
- // an enum
- return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
-}
-func (p pointer) setInt32Ptr(v int32) {
- // Allocate value in a *int32. Possibly convert that to a *enum.
- // Then assign it to a **int32 or **enum.
- // Note: we can convert *int32 to *enum, but we can't convert
- // **int32 to **enum!
- p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
-}
-
-// getInt32Slice copies []int32 from p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getInt32Slice() []int32 {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- return p.v.Elem().Interface().([]int32)
- }
- // an enum
- // Allocate a []int32, then assign []enum's values into it.
- // Note: we can't convert []enum to []int32.
- slice := p.v.Elem()
- s := make([]int32, slice.Len())
- for i := 0; i < slice.Len(); i++ {
- s[i] = int32(slice.Index(i).Int())
- }
- return s
-}
-
-// setInt32Slice copies []int32 into p as a new slice.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setInt32Slice(v []int32) {
- if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
- // raw int32 type
- p.v.Elem().Set(reflect.ValueOf(v))
- return
- }
- // an enum
- // Allocate a []enum, then assign []int32's values into it.
- // Note: we can't convert []enum to []int32.
- slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
- for i, x := range v {
- slice.Index(i).SetInt(int64(x))
- }
- p.v.Elem().Set(slice)
-}
-func (p pointer) appendInt32Slice(v int32) {
- grow(p.v.Elem()).SetInt(int64(v))
-}
-
-func (p pointer) toUint64() *uint64 {
- return p.v.Interface().(*uint64)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return p.v.Interface().(**uint64)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return p.v.Interface().(*[]uint64)
-}
-func (p pointer) toUint32() *uint32 {
- return p.v.Interface().(*uint32)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return p.v.Interface().(**uint32)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return p.v.Interface().(*[]uint32)
-}
-func (p pointer) toBool() *bool {
- return p.v.Interface().(*bool)
-}
-func (p pointer) toBoolPtr() **bool {
- return p.v.Interface().(**bool)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return p.v.Interface().(*[]bool)
-}
-func (p pointer) toFloat64() *float64 {
- return p.v.Interface().(*float64)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return p.v.Interface().(**float64)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return p.v.Interface().(*[]float64)
-}
-func (p pointer) toFloat32() *float32 {
- return p.v.Interface().(*float32)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return p.v.Interface().(**float32)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return p.v.Interface().(*[]float32)
-}
-func (p pointer) toString() *string {
- return p.v.Interface().(*string)
-}
-func (p pointer) toStringPtr() **string {
- return p.v.Interface().(**string)
-}
-func (p pointer) toStringSlice() *[]string {
- return p.v.Interface().(*[]string)
-}
-func (p pointer) toBytes() *[]byte {
- return p.v.Interface().(*[]byte)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return p.v.Interface().(*[][]byte)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return p.v.Interface().(*XXX_InternalExtensions)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return p.v.Interface().(*map[int32]Extension)
-}
-func (p pointer) getPointer() pointer {
- return pointer{v: p.v.Elem()}
-}
-func (p pointer) setPointer(q pointer) {
- p.v.Elem().Set(q.v)
-}
-func (p pointer) appendPointer(q pointer) {
- grow(p.v.Elem()).Set(q.v)
-}
-
-// getPointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) getPointerSlice() []pointer {
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// setPointerSlice copies []pointer into p as a new []*T.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) setPointerSlice(v []pointer) {
- if v == nil {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
- return
- }
- s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
- for _, p := range v {
- s = reflect.Append(s, p.v)
- }
- p.v.Elem().Set(s)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- if p.v.Elem().IsNil() {
- return pointer{v: p.v.Elem()}
- }
- return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
-}
-
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- // TODO: check that p.v.Type().Elem() == t?
- return p.v
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- return *p
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomicLock.Lock()
- defer atomicLock.Unlock()
- *p = v
-}
-
-var atomicLock sync.Mutex
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
deleted file mode 100644
index 7ffd3c2..0000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build purego appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "reflect"
-)
-
-// TODO: untested, so probably incorrect.
-
-func (p pointer) getRef() pointer {
- return pointer{v: p.v.Addr()}
-}
-
-func (p pointer) appendRef(v pointer, typ reflect.Type) {
- slice := p.getSlice(typ)
- elem := v.asPointerTo(typ).Elem()
- newSlice := reflect.Append(slice, elem)
- slice.Set(newSlice)
-}
-
-func (p pointer) getSlice(typ reflect.Type) reflect.Value {
- sliceTyp := reflect.SliceOf(typ)
- slice := p.asPointerTo(sliceTyp)
- slice = slice.Elem()
- return slice
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index d55a335..0000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "sync/atomic"
- "unsafe"
-)
-
-const unsafeAllowed = true
-
-// A field identifies a field in a struct, accessible from a pointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// zeroField is a noop when calling pointer.offset.
-const zeroField = field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != invalidField
-}
-
-// The pointer type below is for the new table-driven encoder/decoder.
-// The implementation here uses unsafe.Pointer to create a generic pointer.
-// In pointer_reflect.go we use reflect instead of unsafe to implement
-// the same (but slower) interface.
-type pointer struct {
- p unsafe.Pointer
-}
-
-// size of pointer
-var ptrSize = unsafe.Sizeof(uintptr(0))
-
-// toPointer converts an interface of pointer type to a pointer
-// that points to the same target.
-func toPointer(i *Message) pointer {
- // Super-tricky - read pointer out of data word of interface value.
- // Saves ~25ns over the equivalent:
- // return valToPointer(reflect.ValueOf(*i))
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// toAddrPointer converts an interface to a pointer that points to
-// the interface data.
-func toAddrPointer(i *interface{}, isptr bool) pointer {
- // Super-tricky - read or get the address of data word of interface value.
- if isptr {
- // The interface is of pointer type, thus it is a direct interface.
- // The data word is the pointer data itself. We take its address.
- return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
- }
- // The interface is not of pointer type. The data word is the pointer
- // to the data.
- return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
-}
-
-// valToPointer converts v to a pointer. v must be of pointer type.
-func valToPointer(v reflect.Value) pointer {
- return pointer{p: unsafe.Pointer(v.Pointer())}
-}
-
-// offset converts from a pointer to a structure to a pointer to
-// one of its fields.
-func (p pointer) offset(f field) pointer {
- // For safety, we should panic if !f.IsValid, however calling panic causes
- // this to no longer be inlineable, which is a serious performance cost.
- /*
- if !f.IsValid() {
- panic("invalid field")
- }
- */
- return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
-}
-
-func (p pointer) isNil() bool {
- return p.p == nil
-}
-
-func (p pointer) toInt64() *int64 {
- return (*int64)(p.p)
-}
-func (p pointer) toInt64Ptr() **int64 {
- return (**int64)(p.p)
-}
-func (p pointer) toInt64Slice() *[]int64 {
- return (*[]int64)(p.p)
-}
-func (p pointer) toInt32() *int32 {
- return (*int32)(p.p)
-}
-
-// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
-/*
- func (p pointer) toInt32Ptr() **int32 {
- return (**int32)(p.p)
- }
- func (p pointer) toInt32Slice() *[]int32 {
- return (*[]int32)(p.p)
- }
-*/
-func (p pointer) getInt32Ptr() *int32 {
- return *(**int32)(p.p)
-}
-func (p pointer) setInt32Ptr(v int32) {
- *(**int32)(p.p) = &v
-}
-
-// getInt32Slice loads a []int32 from p.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getInt32Slice() []int32 {
- return *(*[]int32)(p.p)
-}
-
-// setInt32Slice stores a []int32 to p.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setInt32Slice(v []int32) {
- *(*[]int32)(p.p) = v
-}
-
-// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
-func (p pointer) appendInt32Slice(v int32) {
- s := (*[]int32)(p.p)
- *s = append(*s, v)
-}
-
-func (p pointer) toUint64() *uint64 {
- return (*uint64)(p.p)
-}
-func (p pointer) toUint64Ptr() **uint64 {
- return (**uint64)(p.p)
-}
-func (p pointer) toUint64Slice() *[]uint64 {
- return (*[]uint64)(p.p)
-}
-func (p pointer) toUint32() *uint32 {
- return (*uint32)(p.p)
-}
-func (p pointer) toUint32Ptr() **uint32 {
- return (**uint32)(p.p)
-}
-func (p pointer) toUint32Slice() *[]uint32 {
- return (*[]uint32)(p.p)
-}
-func (p pointer) toBool() *bool {
- return (*bool)(p.p)
-}
-func (p pointer) toBoolPtr() **bool {
- return (**bool)(p.p)
-}
-func (p pointer) toBoolSlice() *[]bool {
- return (*[]bool)(p.p)
-}
-func (p pointer) toFloat64() *float64 {
- return (*float64)(p.p)
-}
-func (p pointer) toFloat64Ptr() **float64 {
- return (**float64)(p.p)
-}
-func (p pointer) toFloat64Slice() *[]float64 {
- return (*[]float64)(p.p)
-}
-func (p pointer) toFloat32() *float32 {
- return (*float32)(p.p)
-}
-func (p pointer) toFloat32Ptr() **float32 {
- return (**float32)(p.p)
-}
-func (p pointer) toFloat32Slice() *[]float32 {
- return (*[]float32)(p.p)
-}
-func (p pointer) toString() *string {
- return (*string)(p.p)
-}
-func (p pointer) toStringPtr() **string {
- return (**string)(p.p)
-}
-func (p pointer) toStringSlice() *[]string {
- return (*[]string)(p.p)
-}
-func (p pointer) toBytes() *[]byte {
- return (*[]byte)(p.p)
-}
-func (p pointer) toBytesSlice() *[][]byte {
- return (*[][]byte)(p.p)
-}
-func (p pointer) toExtensions() *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(p.p)
-}
-func (p pointer) toOldExtensions() *map[int32]Extension {
- return (*map[int32]Extension)(p.p)
-}
-
-// getPointerSlice loads []*T from p as a []pointer.
-// The value returned is aliased with the original slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) getPointerSlice() []pointer {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We load it as []pointer.
- return *(*[]pointer)(p.p)
-}
-
-// setPointerSlice stores []pointer into p as a []*T.
-// The value set is aliased with the input slice.
-// This behavior differs from the implementation in pointer_reflect.go.
-func (p pointer) setPointerSlice(v []pointer) {
- // Super-tricky - p should point to a []*T where T is a
- // message type. We store it as []pointer.
- *(*[]pointer)(p.p) = v
-}
-
-// getPointer loads the pointer at p and returns it.
-func (p pointer) getPointer() pointer {
- return pointer{p: *(*unsafe.Pointer)(p.p)}
-}
-
-// setPointer stores the pointer q at p.
-func (p pointer) setPointer(q pointer) {
- *(*unsafe.Pointer)(p.p) = q.p
-}
-
-// append q to the slice pointed to by p.
-func (p pointer) appendPointer(q pointer) {
- s := (*[]unsafe.Pointer)(p.p)
- *s = append(*s, q.p)
-}
-
-// getInterfacePointer returns a pointer that points to the
-// interface data of the interface pointed by p.
-func (p pointer) getInterfacePointer() pointer {
- // Super-tricky - read pointer out of data word of interface value.
- return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
-}
-
-// asPointerTo returns a reflect.Value that is a pointer to an
-// object of type t stored at p.
-func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
- return reflect.NewAt(t, p.p)
-}
-
-func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
- return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
- return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
- return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
-func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
- return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
-}
-func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
- atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
deleted file mode 100644
index aca8eed..0000000
--- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !purego,!appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "unsafe"
-)
-
-func (p pointer) getRef() pointer {
- return pointer{p: (unsafe.Pointer)(&p.p)}
-}
-
-func (p pointer) appendRef(v pointer, typ reflect.Type) {
- slice := p.getSlice(typ)
- elem := v.asPointerTo(typ).Elem()
- newSlice := reflect.Append(slice, elem)
- slice.Set(newSlice)
-}
-
-func (p pointer) getSlice(typ reflect.Type) reflect.Value {
- sliceTyp := reflect.SliceOf(typ)
- slice := p.asPointerTo(sliceTyp)
- slice = slice.Elem()
- return slice
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
deleted file mode 100644
index 28da147..0000000
--- a/vendor/github.com/gogo/protobuf/proto/properties.go
+++ /dev/null
@@ -1,610 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
-)
-
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
-)
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
-type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
- OneofTypes map[string]*OneofProperties
-}
-
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
-type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
- WireType int
- Tag int
- Required bool
- Optional bool
- Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
- CustomType string
- CastType string
- StdTime bool
- StdDuration bool
- WktPointer bool
-
- stype reflect.Type // set for struct types only
- ctype reflect.Type // set for custom types only
- sprop *StructProperties // set for struct types only
-
- mtype reflect.Type // set for map types only
- MapKeyProp *Properties // set for map types only
- MapValProp *Properties // set for map types only
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s += ","
- s += strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
- s += ",json=" + p.JSONName
- }
- if p.proto3 {
- s += ",proto3"
- }
- if p.oneof {
- s += ",oneof"
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- log.Printf("proto: tag has too few fields: %q", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- case "fixed32":
- p.WireType = WireFixed32
- case "fixed64":
- p.WireType = WireFixed64
- case "zigzag32":
- p.WireType = WireVarint
- case "zigzag64":
- p.WireType = WireVarint
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- log.Printf("proto: tag has unknown wire type: %q", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
-outer:
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
- p.Optional = true
- case f == "rep":
- p.Repeated = true
- case f == "packed":
- p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
- p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break outer
- }
- case strings.HasPrefix(f, "embedded="):
- p.OrigName = strings.Split(f, "=")[1]
- case strings.HasPrefix(f, "customtype="):
- p.CustomType = strings.Split(f, "=")[1]
- case strings.HasPrefix(f, "casttype="):
- p.CastType = strings.Split(f, "=")[1]
- case f == "stdtime":
- p.StdTime = true
- case f == "stdduration":
- p.StdDuration = true
- case f == "wktptr":
- p.WktPointer = true
- }
- }
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// setFieldProps initializes the field properties for submessages and maps.
-func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- isMap := typ.Kind() == reflect.Map
- if len(p.CustomType) > 0 && !isMap {
- p.ctype = typ
- p.setTag(lockGetProp)
- return
- }
- if p.StdTime && !isMap {
- p.setTag(lockGetProp)
- return
- }
- if p.StdDuration && !isMap {
- p.setTag(lockGetProp)
- return
- }
- if p.WktPointer && !isMap {
- p.setTag(lockGetProp)
- return
- }
- switch t1 := typ; t1.Kind() {
- case reflect.Struct:
- p.stype = typ
- case reflect.Ptr:
- if t1.Elem().Kind() == reflect.Struct {
- p.stype = t1.Elem()
- }
- case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- case reflect.Struct:
- p.stype = t3
- }
- case reflect.Struct:
- p.stype = t2
- }
-
- case reflect.Map:
-
- p.mtype = t1
- p.MapKeyProp = &Properties{}
- p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.MapValProp = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
-
- p.MapValProp.CustomType = p.CustomType
- p.MapValProp.StdDuration = p.StdDuration
- p.MapValProp.StdTime = p.StdTime
- p.MapValProp.WktPointer = p.WktPointer
- p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
- p.setTag(lockGetProp)
-}
-
-func (p *Properties) setTag(lockGetProp bool) {
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
- }
- }
-}
-
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
-)
-
-// Init populates the properties from a protocol buffer struct tag.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
- p.Name = name
- p.OrigName = name
- if tag == "" {
- return
- }
- p.Parse(tag)
- p.setFieldProps(typ, f, lockGetProp)
-}
-
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
-
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
-func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- return sprop
- }
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
-}
-
-type (
- oneofFuncsIface interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- oneofWrappersIface interface {
- XXX_OneofWrappers() []interface{}
- }
-)
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- return prop
- }
-
- prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
-
- isOneofMessage := false
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
-
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- isOneofMessage = true
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
- }
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
- }
- }
-
- // Re-order prop.order.
- sort.Sort(prop)
-
- if isOneofMessage {
- var oots []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oots = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oots = m.XXX_OneofWrappers()
- }
- if len(oots) > 0 {
- // Interpret oneof metadata.
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
- Prop: new(Properties),
- }
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
- }
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
- }
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
- }
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
- }
- prop.reqCount = reqCount
-
- return prop
-}
-
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-var enumStringMaps = make(map[string]map[int32]string)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
- if _, ok := enumStringMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumStringMaps[typeName] = unusedNameMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
- protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypedNils[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
- // Generated code always calls RegisterType with nil x.
- // This check is just for extra safety.
- protoTypedNils[name] = x
- } else {
- protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
- }
- revProtoTypes[t] = name
-}
-
-// RegisterMapType is called from generated code and maps from the fully qualified
-// proto name to the native map type of the proto map definition.
-func RegisterMapType(x interface{}, name string) {
- if reflect.TypeOf(x).Kind() != reflect.Map {
- panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
- }
- if _, ok := protoMapTypes[name]; ok {
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoMapTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
- type xname interface {
- XXX_MessageName() string
- }
- if m, ok := x.(xname); ok {
- return m.XXX_MessageName()
- }
- return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-// The type is not guaranteed to implement proto.Message if the name refers to a
-// map entry.
-func MessageType(name string) reflect.Type {
- if t, ok := protoTypedNils[name]; ok {
- return reflect.TypeOf(t)
- }
- return protoMapTypes[name]
-}
-
-// A registry of all linked proto files.
-var (
- protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
- protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
deleted file mode 100644
index 40ea3dd..0000000
--- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
-)
-
-var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
-var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
deleted file mode 100644
index 5a5fd93..0000000
--- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "io"
-)
-
-func Skip(data []byte) (n int, err error) {
- l := len(data)
- index := 0
- for index < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[index]
- index++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- index++
- if data[index-1] < 0x80 {
- break
- }
- }
- return index, nil
- case 1:
- index += 8
- return index, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[index]
- index++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- index += length
- return index, nil
- case 3:
- for {
- var innerWire uint64
- var start int = index
- for shift := uint(0); ; shift += 7 {
- if index >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[index]
- index++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := Skip(data[start:])
- if err != nil {
- return 0, err
- }
- index = start + next
- }
- return index, nil
- case 4:
- return index, nil
- case 5:
- index += 4
- return index, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
deleted file mode 100644
index f8babde..0000000
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal.go
+++ /dev/null
@@ -1,3009 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// a sizer takes a pointer to a field and the size of its tag, computes the size of
-// the encoded data.
-type sizer func(pointer, int) int
-
-// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
-// marshals the field to the end of the slice, returns the slice and error (if any).
-type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
-
-// marshalInfo is the information used for marshaling a message.
-type marshalInfo struct {
- typ reflect.Type
- fields []*marshalFieldInfo
- unrecognized field // offset of XXX_unrecognized
- extensions field // offset of XXX_InternalExtensions
- v1extensions field // offset of XXX_extensions
- sizecache field // offset of XXX_sizecache
- initialized int32 // 0 -- only typ is set, 1 -- fully initialized
- messageset bool // uses message set wire format
- hasmarshaler bool // has custom marshaler
- sync.RWMutex // protect extElems map, also for initialization
- extElems map[int32]*marshalElemInfo // info of extension elements
-
- hassizer bool // has custom sizer
- hasprotosizer bool // has custom protosizer
-
- bytesExtensions field // offset of XXX_extensions where the field type is []byte
-}
-
-// marshalFieldInfo is the information used for marshaling a field of a message.
-type marshalFieldInfo struct {
- field field
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isPointer bool
- required bool // field is required
- name string // name of the field, for error reporting
- oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
-}
-
-// marshalElemInfo is the information used for marshaling an extension or oneof element.
-type marshalElemInfo struct {
- wiretag uint64 // tag in wire format
- tagsize int // size of tag in wire format
- sizer sizer
- marshaler marshaler
- isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
-}
-
-var (
- marshalInfoMap = map[reflect.Type]*marshalInfo{}
- marshalInfoLock sync.Mutex
-
- uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind()
-)
-
-// getMarshalInfo returns the information to marshal a given type of message.
-// The info it returns may not necessarily initialized.
-// t is the type of the message (NOT the pointer to it).
-func getMarshalInfo(t reflect.Type) *marshalInfo {
- marshalInfoLock.Lock()
- u, ok := marshalInfoMap[t]
- if !ok {
- u = &marshalInfo{typ: t}
- marshalInfoMap[t] = u
- }
- marshalInfoLock.Unlock()
- return u
-}
-
-// Size is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It computes the size of encoded data of msg.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Size(msg Message) int {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return 0
- }
- return u.size(ptr)
-}
-
-// Marshal is the entry point from generated code,
-// and should be ONLY called by generated code.
-// It marshals msg to the end of b.
-// a is a pointer to a place to store cached marshal info.
-func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
- u := getMessageMarshalInfo(msg, a)
- ptr := toPointer(&msg)
- if ptr.isNil() {
- // We get here if msg is a typed nil ((*SomeMessage)(nil)),
- // so it satisfies the interface, and msg == nil wouldn't
- // catch it. We don't want crash in this case.
- return b, ErrNil
- }
- return u.marshal(b, ptr, deterministic)
-}
-
-func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
- // u := a.marshal, but atomically.
- // We use an atomic here to ensure memory consistency.
- u := atomicLoadMarshalInfo(&a.marshal)
- if u == nil {
- // Get marshal information from type of message.
- t := reflect.ValueOf(msg).Type()
- if t.Kind() != reflect.Ptr {
- panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
- }
- u = getMarshalInfo(t.Elem())
- // Store it in the cache for later users.
- // a.marshal = u, but atomically.
- atomicStoreMarshalInfo(&a.marshal, u)
- }
- return u
-}
-
-// size is the main function to compute the size of the encoded data of a message.
-// ptr is the pointer to the message.
-func (u *marshalInfo) size(ptr pointer) int {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- // Uses the message's Size method if available
- if u.hassizer {
- s := ptr.asPointerTo(u.typ).Interface().(Sizer)
- return s.Size()
- }
- // Uses the message's ProtoSize method if available
- if u.hasprotosizer {
- s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer)
- return s.ProtoSize()
- }
-
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b, _ := m.Marshal()
- return len(b)
- }
-
- n := 0
- for _, f := range u.fields {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- n += f.sizer(ptr.offset(f.field), f.tagsize)
- }
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- n += u.sizeMessageSet(e)
- } else {
- n += u.sizeExtensions(e)
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- n += u.sizeV1Extensions(m)
- }
- if u.bytesExtensions.IsValid() {
- s := *ptr.offset(u.bytesExtensions).toBytes()
- n += len(s)
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- n += len(s)
- }
-
- // cache the result for use in marshal
- if u.sizecache.IsValid() {
- atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
- }
- return n
-}
-
-// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
-// fall back to compute the size.
-func (u *marshalInfo) cachedsize(ptr pointer) int {
- if u.sizecache.IsValid() {
- return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
- }
- return u.size(ptr)
-}
-
-// marshal is the main function to marshal a message. It takes a byte slice and appends
-// the encoded data to the end of the slice, returns the slice and error (if any).
-// ptr is the pointer to the message.
-// If deterministic is true, map is marshaled in deterministic order.
-func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeMarshalInfo()
- }
-
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if u.hasmarshaler {
- m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
- b1, err := m.Marshal()
- b = append(b, b1...)
- return b, err
- }
-
- var err, errLater error
- // The old marshaler encodes extensions at beginning.
- if u.extensions.IsValid() {
- e := ptr.offset(u.extensions).toExtensions()
- if u.messageset {
- b, err = u.appendMessageSet(b, e, deterministic)
- } else {
- b, err = u.appendExtensions(b, e, deterministic)
- }
- if err != nil {
- return b, err
- }
- }
- if u.v1extensions.IsValid() {
- m := *ptr.offset(u.v1extensions).toOldExtensions()
- b, err = u.appendV1Extensions(b, m, deterministic)
- if err != nil {
- return b, err
- }
- }
- if u.bytesExtensions.IsValid() {
- s := *ptr.offset(u.bytesExtensions).toBytes()
- b = append(b, s...)
- }
- for _, f := range u.fields {
- if f.required {
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // Required field is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name}
- }
- continue
- }
- }
- if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
- // nil pointer always marshals to nothing
- continue
- }
- b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
- if err != nil {
- if err1, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errLater == nil {
- errLater = &RequiredNotSetError{f.name + "." + err1.field}
- }
- continue
- }
- if err == errRepeatedHasNil {
- err = errors.New("proto: repeated field " + f.name + " has nil element")
- }
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return b, err
- }
- }
- if u.unrecognized.IsValid() {
- s := *ptr.offset(u.unrecognized).toBytes()
- b = append(b, s...)
- }
- return b, errLater
-}
-
-// computeMarshalInfo initializes the marshal info.
-func (u *marshalInfo) computeMarshalInfo() {
- u.Lock()
- defer u.Unlock()
- if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
- return
- }
-
- t := u.typ
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.v1extensions = invalidField
- u.bytesExtensions = invalidField
- u.sizecache = invalidField
- isOneofMessage := false
-
- if reflect.PtrTo(t).Implements(sizerType) {
- u.hassizer = true
- }
- if reflect.PtrTo(t).Implements(protosizerType) {
- u.hasprotosizer = true
- }
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- if reflect.PtrTo(t).Implements(marshalerType) {
- u.hasmarshaler = true
- atomic.StoreInt32(&u.initialized, 1)
- return
- }
-
- n := t.NumField()
-
- // deal with XXX fields first
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Tag.Get("protobuf_oneof") != "" {
- isOneofMessage = true
- }
- if !strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- switch f.Name {
- case "XXX_sizecache":
- u.sizecache = toField(&f)
- case "XXX_unrecognized":
- u.unrecognized = toField(&f)
- case "XXX_InternalExtensions":
- u.extensions = toField(&f)
- u.messageset = f.Tag.Get("protobuf_messageset") == "1"
- case "XXX_extensions":
- if f.Type.Kind() == reflect.Map {
- u.v1extensions = toField(&f)
- } else {
- u.bytesExtensions = toField(&f)
- }
- case "XXX_NoUnkeyedLiteral":
- // nothing to do
- default:
- panic("unknown XXX field: " + f.Name)
- }
- n--
- }
-
- // get oneof implementers
- var oneofImplementers []interface{}
- // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
- if isOneofMessage {
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
- }
-
- // normal fields
- fields := make([]marshalFieldInfo, n) // batch allocation
- u.fields = make([]*marshalFieldInfo, 0, n)
- for i, j := 0, 0; i < t.NumField(); i++ {
- f := t.Field(i)
-
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- field := &fields[j]
- j++
- field.name = f.Name
- u.fields = append(u.fields, field)
- if f.Tag.Get("protobuf_oneof") != "" {
- field.computeOneofFieldInfo(&f, oneofImplementers)
- continue
- }
- if f.Tag.Get("protobuf") == "" {
- // field has no tag (not in generated message), ignore it
- u.fields = u.fields[:len(u.fields)-1]
- j--
- continue
- }
- field.computeMarshalFieldInfo(&f)
- }
-
- // fields are marshaled in tag order on the wire.
- sort.Sort(byTag(u.fields))
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// helper for sorting fields by tag
-type byTag []*marshalFieldInfo
-
-func (a byTag) Len() int { return len(a) }
-func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
-
-// getExtElemInfo returns the information to marshal an extension element.
-// The info it returns is initialized.
-func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
- // get from cache first
- u.RLock()
- e, ok := u.extElems[desc.Field]
- u.RUnlock()
- if ok {
- return e
- }
-
- t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
- tags := strings.Split(desc.Tag, ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizr, marshalr := typeMarshaler(t, tags, false, false)
- e = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizr,
- marshaler: marshalr,
- isptr: t.Kind() == reflect.Ptr,
- }
-
- // update cache
- u.Lock()
- if u.extElems == nil {
- u.extElems = make(map[int32]*marshalElemInfo)
- }
- u.extElems[desc.Field] = e
- u.Unlock()
- return e
-}
-
-// computeMarshalFieldInfo fills up the information to marshal a field.
-func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
- // parse protobuf tag of the field.
- // tag has format of "bytes,49,opt,name=foo,def=hello!"
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- if tags[0] == "" {
- return
- }
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- if tags[2] == "req" {
- fi.required = true
- }
- fi.setTag(f, tag, wt)
- fi.setMarshaler(f, tags)
-}
-
-func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
- fi.field = toField(f)
- fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
- fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
-
- ityp := f.Type // interface type
- for _, o := range oneofImplementers {
- t := reflect.TypeOf(o)
- if !t.Implements(ityp) {
- continue
- }
- sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
- tags := strings.Split(sf.Tag.Get("protobuf"), ",")
- tag, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("tag is not an integer")
- }
- wt := wiretype(tags[0])
- sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
- fi.oneofElems[t.Elem()] = &marshalElemInfo{
- wiretag: uint64(tag)<<3 | wt,
- tagsize: SizeVarint(uint64(tag) << 3),
- sizer: sizr,
- marshaler: marshalr,
- }
- }
-}
-
-// wiretype returns the wire encoding of the type.
-func wiretype(encoding string) uint64 {
- switch encoding {
- case "fixed32":
- return WireFixed32
- case "fixed64":
- return WireFixed64
- case "varint", "zigzag32", "zigzag64":
- return WireVarint
- case "bytes":
- return WireBytes
- case "group":
- return WireStartGroup
- }
- panic("unknown wire type " + encoding)
-}
-
-// setTag fills up the tag (in wire format) and its size in the info of a field.
-func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
- fi.field = toField(f)
- fi.wiretag = uint64(tag)<<3 | wt
- fi.tagsize = SizeVarint(uint64(tag) << 3)
-}
-
-// setMarshaler fills up the sizer and marshaler in the info of a field.
-func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
- switch f.Type.Kind() {
- case reflect.Map:
- // map field
- fi.isPointer = true
- fi.sizer, fi.marshaler = makeMapMarshaler(f)
- return
- case reflect.Ptr, reflect.Slice:
- fi.isPointer = true
- }
- fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
-}
-
-// typeMarshaler returns the sizer and marshaler of a given field.
-// t is the type of the field.
-// tags is the generated "protobuf" tag of the field.
-// If nozero is true, zero value is not marshaled to the wire.
-// If oneof is true, it is a oneof field.
-func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
- encoding := tags[0]
-
- pointer := false
- slice := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- packed := false
- proto3 := false
- ctype := false
- isTime := false
- isDuration := false
- isWktPointer := false
- validateUTF8 := true
- for i := 2; i < len(tags); i++ {
- if tags[i] == "packed" {
- packed = true
- }
- if tags[i] == "proto3" {
- proto3 = true
- }
- if strings.HasPrefix(tags[i], "customtype=") {
- ctype = true
- }
- if tags[i] == "stdtime" {
- isTime = true
- }
- if tags[i] == "stdduration" {
- isDuration = true
- }
- if tags[i] == "wktptr" {
- isWktPointer = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
- if !proto3 && !pointer && !slice {
- nozero = false
- }
-
- if ctype {
- if reflect.PtrTo(t).Implements(customType) {
- if slice {
- return makeMessageRefSliceMarshaler(getMarshalInfo(t))
- }
- if pointer {
- return makeCustomPtrMarshaler(getMarshalInfo(t))
- }
- return makeCustomMarshaler(getMarshalInfo(t))
- } else {
- panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
- }
- }
-
- if isTime {
- if pointer {
- if slice {
- return makeTimePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeTimePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeTimeSliceMarshaler(getMarshalInfo(t))
- }
- return makeTimeMarshaler(getMarshalInfo(t))
- }
-
- if isDuration {
- if pointer {
- if slice {
- return makeDurationPtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeDurationPtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeDurationSliceMarshaler(getMarshalInfo(t))
- }
- return makeDurationMarshaler(getMarshalInfo(t))
- }
-
- if isWktPointer {
- switch t.Kind() {
- case reflect.Float64:
- if pointer {
- if slice {
- return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdDoubleValueMarshaler(getMarshalInfo(t))
- case reflect.Float32:
- if pointer {
- if slice {
- return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdFloatValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdFloatValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdFloatValueMarshaler(getMarshalInfo(t))
- case reflect.Int64:
- if pointer {
- if slice {
- return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt64ValueMarshaler(getMarshalInfo(t))
- case reflect.Uint64:
- if pointer {
- if slice {
- return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt64ValueMarshaler(getMarshalInfo(t))
- case reflect.Int32:
- if pointer {
- if slice {
- return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdInt32ValueMarshaler(getMarshalInfo(t))
- case reflect.Uint32:
- if pointer {
- if slice {
- return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdUInt32ValueMarshaler(getMarshalInfo(t))
- case reflect.Bool:
- if pointer {
- if slice {
- return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBoolValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdBoolValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBoolValueMarshaler(getMarshalInfo(t))
- case reflect.String:
- if pointer {
- if slice {
- return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdStringValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdStringValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdStringValueMarshaler(getMarshalInfo(t))
- case uint8SliceType:
- if pointer {
- if slice {
- return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBytesValuePtrMarshaler(getMarshalInfo(t))
- }
- if slice {
- return makeStdBytesValueSliceMarshaler(getMarshalInfo(t))
- }
- return makeStdBytesValueMarshaler(getMarshalInfo(t))
- default:
- panic(fmt.Sprintf("unknown wktpointer type %#v", t))
- }
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return sizeBoolPtr, appendBoolPtr
- }
- if slice {
- if packed {
- return sizeBoolPackedSlice, appendBoolPackedSlice
- }
- return sizeBoolSlice, appendBoolSlice
- }
- if nozero {
- return sizeBoolValueNoZero, appendBoolValueNoZero
- }
- return sizeBoolValue, appendBoolValue
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixed32Ptr, appendFixed32Ptr
- }
- if slice {
- if packed {
- return sizeFixed32PackedSlice, appendFixed32PackedSlice
- }
- return sizeFixed32Slice, appendFixed32Slice
- }
- if nozero {
- return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
- }
- return sizeFixed32Value, appendFixed32Value
- case "varint":
- if pointer {
- return sizeVarint32Ptr, appendVarint32Ptr
- }
- if slice {
- if packed {
- return sizeVarint32PackedSlice, appendVarint32PackedSlice
- }
- return sizeVarint32Slice, appendVarint32Slice
- }
- if nozero {
- return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
- }
- return sizeVarint32Value, appendVarint32Value
- }
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return sizeFixedS32Ptr, appendFixedS32Ptr
- }
- if slice {
- if packed {
- return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
- }
- return sizeFixedS32Slice, appendFixedS32Slice
- }
- if nozero {
- return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
- }
- return sizeFixedS32Value, appendFixedS32Value
- case "varint":
- if pointer {
- return sizeVarintS32Ptr, appendVarintS32Ptr
- }
- if slice {
- if packed {
- return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
- }
- return sizeVarintS32Slice, appendVarintS32Slice
- }
- if nozero {
- return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
- }
- return sizeVarintS32Value, appendVarintS32Value
- case "zigzag32":
- if pointer {
- return sizeZigzag32Ptr, appendZigzag32Ptr
- }
- if slice {
- if packed {
- return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
- }
- return sizeZigzag32Slice, appendZigzag32Slice
- }
- if nozero {
- return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
- }
- return sizeZigzag32Value, appendZigzag32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixed64Ptr, appendFixed64Ptr
- }
- if slice {
- if packed {
- return sizeFixed64PackedSlice, appendFixed64PackedSlice
- }
- return sizeFixed64Slice, appendFixed64Slice
- }
- if nozero {
- return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
- }
- return sizeFixed64Value, appendFixed64Value
- case "varint":
- if pointer {
- return sizeVarint64Ptr, appendVarint64Ptr
- }
- if slice {
- if packed {
- return sizeVarint64PackedSlice, appendVarint64PackedSlice
- }
- return sizeVarint64Slice, appendVarint64Slice
- }
- if nozero {
- return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
- }
- return sizeVarint64Value, appendVarint64Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return sizeFixedS64Ptr, appendFixedS64Ptr
- }
- if slice {
- if packed {
- return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
- }
- return sizeFixedS64Slice, appendFixedS64Slice
- }
- if nozero {
- return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
- }
- return sizeFixedS64Value, appendFixedS64Value
- case "varint":
- if pointer {
- return sizeVarintS64Ptr, appendVarintS64Ptr
- }
- if slice {
- if packed {
- return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
- }
- return sizeVarintS64Slice, appendVarintS64Slice
- }
- if nozero {
- return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
- }
- return sizeVarintS64Value, appendVarintS64Value
- case "zigzag64":
- if pointer {
- return sizeZigzag64Ptr, appendZigzag64Ptr
- }
- if slice {
- if packed {
- return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
- }
- return sizeZigzag64Slice, appendZigzag64Slice
- }
- if nozero {
- return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
- }
- return sizeZigzag64Value, appendZigzag64Value
- }
- case reflect.Float32:
- if pointer {
- return sizeFloat32Ptr, appendFloat32Ptr
- }
- if slice {
- if packed {
- return sizeFloat32PackedSlice, appendFloat32PackedSlice
- }
- return sizeFloat32Slice, appendFloat32Slice
- }
- if nozero {
- return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
- }
- return sizeFloat32Value, appendFloat32Value
- case reflect.Float64:
- if pointer {
- return sizeFloat64Ptr, appendFloat64Ptr
- }
- if slice {
- if packed {
- return sizeFloat64PackedSlice, appendFloat64PackedSlice
- }
- return sizeFloat64Slice, appendFloat64Slice
- }
- if nozero {
- return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
- }
- return sizeFloat64Value, appendFloat64Value
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return sizeStringPtr, appendUTF8StringPtr
- }
- if slice {
- return sizeStringSlice, appendUTF8StringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendUTF8StringValueNoZero
- }
- return sizeStringValue, appendUTF8StringValue
- }
- if pointer {
- return sizeStringPtr, appendStringPtr
- }
- if slice {
- return sizeStringSlice, appendStringSlice
- }
- if nozero {
- return sizeStringValueNoZero, appendStringValueNoZero
- }
- return sizeStringValue, appendStringValue
- case reflect.Slice:
- if slice {
- return sizeBytesSlice, appendBytesSlice
- }
- if oneof {
- // Oneof bytes field may also have "proto3" tag.
- // We want to marshal it as a oneof field. Do this
- // check before the proto3 check.
- return sizeBytesOneof, appendBytesOneof
- }
- if proto3 {
- return sizeBytes3, appendBytes3
- }
- return sizeBytes, appendBytes
- case reflect.Struct:
- switch encoding {
- case "group":
- if slice {
- return makeGroupSliceMarshaler(getMarshalInfo(t))
- }
- return makeGroupMarshaler(getMarshalInfo(t))
- case "bytes":
- if pointer {
- if slice {
- return makeMessageSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageMarshaler(getMarshalInfo(t))
- } else {
- if slice {
- return makeMessageRefSliceMarshaler(getMarshalInfo(t))
- }
- return makeMessageRefMarshaler(getMarshalInfo(t))
- }
- }
- }
- panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
-}
-
-// Below are functions to size/marshal a specific type of a field.
-// They are stored in the field's info, and called by function pointers.
-// They have type sizer or marshaler.
-
-func sizeFixed32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixed32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixedS32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFixedS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFloat32Value(_ pointer, tagsize int) int {
- return 4 + tagsize
-}
-func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return 0
- }
- return 4 + tagsize
-}
-func sizeFloat32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- return (4 + tagsize) * len(s)
-}
-func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return 0
- }
- return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
-}
-func sizeFixed64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixed64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFixedS64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFixedS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeFloat64Value(_ pointer, tagsize int) int {
- return 8 + tagsize
-}
-func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return 0
- }
- return 8 + tagsize
-}
-func sizeFloat64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- return (8 + tagsize) * len(s)
-}
-func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return 0
- }
- return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
-}
-func sizeVarint32Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarint32Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarint32Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarint64Value(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toUint64()
- if v == 0 {
- return 0
- }
- return SizeVarint(v) + tagsize
-}
-func sizeVarint64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(*p) + tagsize
-}
-func sizeVarint64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(v) + tagsize
- }
- return n
-}
-func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeVarintS64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v)) + tagsize
-}
-func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- return SizeVarint(uint64(*p)) + tagsize
-}
-func sizeVarintS64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v)) + tagsize
- }
- return n
-}
-func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag32Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt32()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
- p := ptr.getInt32Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
-}
-func sizeZigzag32Slice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
- }
- return n
-}
-func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeZigzag64Value(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toInt64()
- if v == 0 {
- return 0
- }
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return 0
- }
- v := *p
- return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
-}
-func sizeZigzag64Slice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
- }
- return n
-}
-func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return 0
- }
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- return n + SizeVarint(uint64(n)) + tagsize
-}
-func sizeBoolValue(_ pointer, tagsize int) int {
- return 1 + tagsize
-}
-func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toBool()
- if !v {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolPtr(ptr pointer, tagsize int) int {
- p := *ptr.toBoolPtr()
- if p == nil {
- return 0
- }
- return 1 + tagsize
-}
-func sizeBoolSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- return (1 + tagsize) * len(s)
-}
-func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return 0
- }
- return len(s) + SizeVarint(uint64(len(s))) + tagsize
-}
-func sizeStringValue(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringValueNoZero(ptr pointer, tagsize int) int {
- v := *ptr.toString()
- if v == "" {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringPtr(ptr pointer, tagsize int) int {
- p := *ptr.toStringPtr()
- if p == nil {
- return 0
- }
- v := *p
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeStringSlice(ptr pointer, tagsize int) int {
- s := *ptr.toStringSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-func sizeBytes(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if v == nil {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytes3(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return 0
- }
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesOneof(ptr pointer, tagsize int) int {
- v := *ptr.toBytes()
- return len(v) + SizeVarint(uint64(len(v))) + tagsize
-}
-func sizeBytesSlice(ptr pointer, tagsize int) int {
- s := *ptr.toBytesSlice()
- n := 0
- for _, v := range s {
- n += len(v) + SizeVarint(uint64(len(v))) + tagsize
- }
- return n
-}
-
-// appendFixed32 appends an encoded fixed32 to b.
-func appendFixed32(b []byte, v uint32) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24))
- return b
-}
-
-// appendFixed64 appends an encoded fixed64 to b.
-func appendFixed64(b []byte, v uint64) []byte {
- b = append(b,
- byte(v),
- byte(v>>8),
- byte(v>>16),
- byte(v>>24),
- byte(v>>32),
- byte(v>>40),
- byte(v>>48),
- byte(v>>56))
- return b
-}
-
-// appendVarint appends an encoded varint to b.
-func appendVarint(b []byte, v uint64) []byte {
- // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
- // have non-leaf inliner.
- switch {
- case v < 1<<7:
- b = append(b, byte(v))
- case v < 1<<14:
- b = append(b,
- byte(v&0x7f|0x80),
- byte(v>>7))
- case v < 1<<21:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte(v>>14))
- case v < 1<<28:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte(v>>21))
- case v < 1<<35:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte(v>>28))
- case v < 1<<42:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte(v>>35))
- case v < 1<<49:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte(v>>42))
- case v < 1<<56:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte(v>>49))
- case v < 1<<63:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte(v>>56))
- default:
- b = append(b,
- byte(v&0x7f|0x80),
- byte((v>>7)&0x7f|0x80),
- byte((v>>14)&0x7f|0x80),
- byte((v>>21)&0x7f|0x80),
- byte((v>>28)&0x7f|0x80),
- byte((v>>35)&0x7f|0x80),
- byte((v>>42)&0x7f|0x80),
- byte((v>>49)&0x7f|0x80),
- byte((v>>56)&0x7f|0x80),
- 1)
- }
- return b
-}
-
-func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, *p)
- return b, nil
-}
-func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, v)
- }
- return b, nil
-}
-func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- return b, nil
-}
-func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(*p))
- return b, nil
-}
-func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, uint32(v))
- }
- return b, nil
-}
-func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float32bits(*ptr.toFloat32())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, v)
- return b, nil
-}
-func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(*p))
- return b, nil
-}
-func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(4*len(s)))
- for _, v := range s {
- b = appendFixed32(b, math.Float32bits(v))
- }
- return b, nil
-}
-func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, *p)
- return b, nil
-}
-func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, v)
- }
- return b, nil
-}
-func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- return b, nil
-}
-func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(*p))
- return b, nil
-}
-func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, uint64(v))
- }
- return b, nil
-}
-func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := math.Float64bits(*ptr.toFloat64())
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, v)
- return b, nil
-}
-func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toFloat64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(*p))
- return b, nil
-}
-func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toFloat64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(8*len(s)))
- for _, v := range s {
- b = appendFixed64(b, math.Float64bits(v))
- }
- return b, nil
-}
-func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toUint64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- return b, nil
-}
-func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toUint64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, *p)
- return b, nil
-}
-func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toUint64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(v)
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, v)
- }
- return b, nil
-}
-func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- return b, nil
-}
-func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(*p))
- return b, nil
-}
-func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v))
- }
- return b, nil
-}
-func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt32()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := ptr.getInt32Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- return b, nil
-}
-func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := ptr.getInt32Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
- }
- return b, nil
-}
-func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toInt64()
- if v == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toInt64Ptr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- v := *p
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- return b, nil
-}
-func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toInt64Slice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- // compute size
- n := 0
- for _, v := range s {
- n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
- }
- b = appendVarint(b, uint64(n))
- for _, v := range s {
- b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
- }
- return b, nil
-}
-func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBool()
- if !v {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = append(b, 1)
- return b, nil
-}
-
-func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toBoolPtr()
- if p == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- if *p {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- return b, nil
-}
-func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBoolSlice()
- if len(s) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag&^7|WireBytes)
- b = appendVarint(b, uint64(len(s)))
- for _, v := range s {
- if v {
- b = append(b, 1)
- } else {
- b = append(b, 0)
- }
- }
- return b, nil
-}
-func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toStringSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- v := *ptr.toString()
- if v == "" {
- return b, nil
- }
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- p := *ptr.toStringPtr()
- if p == nil {
- return b, nil
- }
- v := *p
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- var invalidUTF8 bool
- s := *ptr.toStringSlice()
- for _, v := range s {
- if !utf8.ValidString(v) {
- invalidUTF8 = true
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- if invalidUTF8 {
- return b, errInvalidUTF8
- }
- return b, nil
-}
-func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if v == nil {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- if len(v) == 0 {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- v := *ptr.toBytes()
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- return b, nil
-}
-func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
- s := *ptr.toBytesSlice()
- for _, v := range s {
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(v)))
- b = append(b, v...)
- }
- return b, nil
-}
-
-// makeGroupMarshaler returns the sizer and marshaler for a group.
-// u is the marshal info of the underlying message.
-func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- return u.size(p) + 2*tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- var err error
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, p, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- return b, err
- }
-}
-
-// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
-// u is the marshal info of the underlying message.
-func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- n += u.size(v) + 2*tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag) // start group
- b, err = u.marshal(b, v, deterministic)
- b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMessageMarshaler returns the sizer and marshaler for a message field.
-// u is the marshal info of the message.
-func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.size(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- p := ptr.getPointer()
- if p.isNil() {
- return b, nil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(p)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, p, deterministic)
- }
-}
-
-// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
-// u is the marshal info of the message.
-func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getPointerSlice()
- n := 0
- for _, v := range s {
- if v.isNil() {
- continue
- }
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getPointerSlice()
- var err error
- var nerr nonFatal
- for _, v := range s {
- if v.isNil() {
- return b, errRepeatedHasNil
- }
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if !nerr.Merge(err) {
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeMapMarshaler returns the sizer and marshaler for a map field.
-// f is the pointer to the reflect data structure of the field.
-func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
- // figure out key and value type
- t := f.Type
- keyType := t.Key()
- valType := t.Elem()
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- stdOptions := false
- for _, t := range tags {
- if strings.HasPrefix(t, "customtype=") {
- valTags = append(valTags, t)
- }
- if t == "stdtime" {
- valTags = append(valTags, t)
- stdOptions = true
- }
- if t == "stdduration" {
- valTags = append(valTags, t)
- stdOptions = true
- }
- if t == "wktptr" {
- valTags = append(valTags, t)
- }
- }
- keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
- valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
- keyWireTag := 1<<3 | wiretype(keyTags[0])
- valWireTag := 2<<3 | wiretype(valTags[0])
-
- // We create an interface to get the addresses of the map key and value.
- // If value is pointer-typed, the interface is a direct interface, the
- // idata itself is the value. Otherwise, the idata is the pointer to the
- // value.
- // Key cannot be pointer-typed.
- valIsPtr := valType.Kind() == reflect.Ptr
-
- // If value is a message with nested maps, calling
- // valSizer in marshal may be quadratic. We should use
- // cached version in marshal (but not in size).
- // If value is not message type, we don't have size cache,
- // but it cannot be nested either. Just use valSizer.
- valCachedSizer := valSizer
- if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct {
- u := getMarshalInfo(valType.Elem())
- valCachedSizer = func(ptr pointer, tagsize int) int {
- // Same as message sizer, but use cache.
- p := ptr.getPointer()
- if p.isNil() {
- return 0
- }
- siz := u.cachedsize(p)
- return siz + SizeVarint(uint64(siz)) + tagsize
- }
- }
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(t).Elem() // the map
- n := 0
- for _, k := range m.MapKeys() {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
- siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(t).Elem() // the map
- var err error
- keys := m.MapKeys()
- if len(keys) > 1 && deterministic {
- sort.Sort(mapKeys(keys))
- }
-
- var nerr nonFatal
- for _, k := range keys {
- ki := k.Interface()
- vi := m.MapIndex(k).Interface()
- kaddr := toAddrPointer(&ki, false) // pointer to key
- vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
- b = appendVarint(b, tag)
- siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
- b = appendVarint(b, uint64(siz))
- b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
- if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
- return b, err
- }
- }
- return b, nerr.E
- }
-}
-
-// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
-// fi is the marshal info of the field.
-// f is the pointer to the reflect data structure of the field.
-func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
- // Oneof field is an interface. We need to get the actual data type on the fly.
- t := f.Type
- return func(ptr pointer, _ int) int {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return 0
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- e := fi.oneofElems[telem]
- return e.sizer(p, e.tagsize)
- },
- func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
- p := ptr.getInterfacePointer()
- if p.isNil() {
- return b, nil
- }
- v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
- telem := v.Type()
- if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
- return b, errOneofHasNil
- }
- e := fi.oneofElems[telem]
- return e.marshaler(b, p, e.wiretag, deterministic)
- }
-}
-
-// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
-func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- n += ei.sizer(p, ei.tagsize)
- }
- mu.Unlock()
- return n
-}
-
-// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
-func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- // Not sure this is required, but the old code does it.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// message set format is:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-
-// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
-// in message set format (above).
-func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
- m, mu := ext.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
-
- n := 0
- for id, e := range m {
- n += 2 // start group, end group. tag = 1 (size=1)
- n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- siz := len(msgWithLen)
- n += siz + 1 // message, tag = 3 (size=1)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- n += ei.sizer(p, 1) // message, tag = 3 (size=1)
- }
- mu.Unlock()
- return n
-}
-
-// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
-// to the end of byte slice b.
-func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
- m, mu := ext.extensionsRead()
- if m == nil {
- return b, nil
- }
- mu.Lock()
- defer mu.Unlock()
-
- var err error
- var nerr nonFatal
-
- // Fast-path for common cases: zero or one extensions.
- // Don't bother sorting the keys.
- if len(m) <= 1 {
- for id, e := range m {
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- b = append(b, 1<<3|WireEndGroup)
- }
- return b, nerr.E
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, id := range keys {
- e := m[int32(id)]
- b = append(b, 1<<3|WireStartGroup)
- b = append(b, 2<<3|WireVarint)
- b = appendVarint(b, uint64(id))
-
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
- b = append(b, 3<<3|WireBytes)
- b = append(b, msgWithLen...)
- b = append(b, 1<<3|WireEndGroup)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
- b = append(b, 1<<3|WireEndGroup)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
-func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
- if m == nil {
- return 0
- }
-
- n := 0
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- n += ei.sizer(p, ei.tagsize)
- }
- return n
-}
-
-// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
-func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
- if m == nil {
- return b, nil
- }
-
- // Sort the keys to provide a deterministic encoding.
- keys := make([]int, 0, len(m))
- for k := range m {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- var err error
- var nerr nonFatal
- for _, k := range keys {
- e := m[int32(k)]
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- b = append(b, e.enc...)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- ei := u.getExtElemInfo(e.desc)
- v := e.value
- p := toAddrPointer(&v, ei.isptr)
- b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
- if !nerr.Merge(err) {
- return b, err
- }
- }
- return b, nerr.E
-}
-
-// newMarshaler is the interface representing objects that can marshal themselves.
-//
-// This exists to support protoc-gen-go generated messages.
-// The proto package will stop type-asserting to this interface in the future.
-//
-// DO NOT DEPEND ON THIS.
-type newMarshaler interface {
- XXX_Size() int
- XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
-}
-
-// Size returns the encoded size of a protocol buffer message.
-// This is the main entry point.
-func Size(pb Message) int {
- if m, ok := pb.(newMarshaler); ok {
- return m.XXX_Size()
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- b, _ := m.Marshal()
- return len(b)
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return 0
- }
- var info InternalMessageInfo
- return info.Size(pb)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, returning the data.
-// This is the main entry point.
-func Marshal(pb Message) ([]byte, error) {
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- b := make([]byte, 0, siz)
- return m.XXX_Marshal(b, false)
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- return m.Marshal()
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return nil, ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- b := make([]byte, 0, siz)
- return info.Marshal(b, pb, false)
-}
-
-// Marshal takes a protocol buffer message
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-// This is an alternative entry point. It is not necessary to use
-// a Buffer for most applications.
-func (p *Buffer) Marshal(pb Message) error {
- var err error
- if p.deterministic {
- if _, ok := pb.(Marshaler); ok {
- return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb)
- }
- }
- if m, ok := pb.(newMarshaler); ok {
- siz := m.XXX_Size()
- p.grow(siz) // make sure buf has enough capacity
- pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz]
- pp, err = m.XXX_Marshal(pp, p.deterministic)
- p.buf = append(p.buf, pp...)
- return err
- }
- if m, ok := pb.(Marshaler); ok {
- // If the message can marshal itself, let it do it, for compatibility.
- // NOTE: This is not efficient.
- var b []byte
- b, err = m.Marshal()
- p.buf = append(p.buf, b...)
- return err
- }
- // in case somehow we didn't generate the wrapper
- if pb == nil {
- return ErrNil
- }
- var info InternalMessageInfo
- siz := info.Size(pb)
- p.grow(siz) // make sure buf has enough capacity
- p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
- return err
-}
-
-// grow grows the buffer's capacity, if necessary, to guarantee space for
-// another n bytes. After grow(n), at least n bytes can be written to the
-// buffer without another allocation.
-func (p *Buffer) grow(n int) {
- need := len(p.buf) + n
- if need <= cap(p.buf) {
- return
- }
- newCap := len(p.buf) * 2
- if newCap < need {
- newCap = need
- }
- p.buf = append(make([]byte, 0, newCap), p.buf...)
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
deleted file mode 100644
index 997f57c..0000000
--- a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
+++ /dev/null
@@ -1,388 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
- "time"
-)
-
-// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
-// It marshal a message T instead of a *T
-func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- siz := u.size(ptr)
- return siz + SizeVarint(uint64(siz)) + tagsize
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- b = appendVarint(b, wiretag)
- siz := u.cachedsize(ptr)
- b = appendVarint(b, uint64(siz))
- return u.marshal(b, ptr, deterministic)
- }
-}
-
-// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
-// It marshals a slice of messages []T instead of []*T
-func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- e := elem.Interface()
- v := toAddrPointer(&e, false)
- siz := u.size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- var err, errreq error
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- e := elem.Interface()
- v := toAddrPointer(&e, false)
- b = appendVarint(b, wiretag)
- siz := u.size(v)
- b = appendVarint(b, uint64(siz))
- b, err = u.marshal(b, v, deterministic)
-
- if err != nil {
- if _, ok := err.(*RequiredNotSetError); ok {
- // Required field in submessage is not set.
- // We record the error but keep going, to give a complete marshaling.
- if errreq == nil {
- errreq = err
- }
- continue
- }
- if err == ErrNil {
- err = errRepeatedHasNil
- }
- return b, err
- }
- }
-
- return b, errreq
- }
-}
-
-func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
- siz := m.Size()
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
- siz := m.Size()
- buf, err := m.Marshal()
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- m := ptr.asPointerTo(u.typ).Interface().(custom)
- siz := m.Size()
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- m := ptr.asPointerTo(u.typ).Interface().(custom)
- siz := m.Size()
- buf, err := m.Marshal()
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return nil, err
- }
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return nil, err
- }
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(time.Time)
- ts, err := timestampProto(t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(time.Time)
- ts, err := timestampProto(t)
- if err != nil {
- return nil, err
- }
- siz := Size(ts)
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return 0
- }
- siz := Size(ts)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*time.Time)
- ts, err := timestampProto(*t)
- if err != nil {
- return nil, err
- }
- siz := Size(ts)
- buf, err := Marshal(ts)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
- dur := durationProto(*d)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
- dur := durationProto(*d)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(time.Duration)
- dur := durationProto(d)
- siz := Size(dur)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(time.Duration)
- dur := durationProto(d)
- siz := Size(dur)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- d := elem.Interface().(*time.Duration)
- dur := durationProto(*d)
- siz := Size(dur)
- buf, err := Marshal(dur)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
deleted file mode 100644
index 60dcf70..0000000
--- a/vendor/github.com/gogo/protobuf/proto/table_merge.go
+++ /dev/null
@@ -1,676 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
- "sync/atomic"
-)
-
-// Merge merges the src message into dst.
-// This assumes that dst and src of the same type and are non-nil.
-func (a *InternalMessageInfo) Merge(dst, src Message) {
- mi := atomicLoadMergeInfo(&a.merge)
- if mi == nil {
- mi = getMergeInfo(reflect.TypeOf(dst).Elem())
- atomicStoreMergeInfo(&a.merge, mi)
- }
- mi.merge(toPointer(&dst), toPointer(&src))
-}
-
-type mergeInfo struct {
- typ reflect.Type
-
- initialized int32 // 0: only typ is valid, 1: everything is valid
- lock sync.Mutex
-
- fields []mergeFieldInfo
- unrecognized field // Offset of XXX_unrecognized
-}
-
-type mergeFieldInfo struct {
- field field // Offset of field, guaranteed to be valid
-
- // isPointer reports whether the value in the field is a pointer.
- // This is true for the following situations:
- // * Pointer to struct
- // * Pointer to basic type (proto2 only)
- // * Slice (first value in slice header is a pointer)
- // * String (first value in string header is a pointer)
- isPointer bool
-
- // basicWidth reports the width of the field assuming that it is directly
- // embedded in the struct (as is the case for basic types in proto3).
- // The possible values are:
- // 0: invalid
- // 1: bool
- // 4: int32, uint32, float32
- // 8: int64, uint64, float64
- basicWidth int
-
- // Where dst and src are pointers to the types being merged.
- merge func(dst, src pointer)
-}
-
-var (
- mergeInfoMap = map[reflect.Type]*mergeInfo{}
- mergeInfoLock sync.Mutex
-)
-
-func getMergeInfo(t reflect.Type) *mergeInfo {
- mergeInfoLock.Lock()
- defer mergeInfoLock.Unlock()
- mi := mergeInfoMap[t]
- if mi == nil {
- mi = &mergeInfo{typ: t}
- mergeInfoMap[t] = mi
- }
- return mi
-}
-
-// merge merges src into dst assuming they are both of type *mi.typ.
-func (mi *mergeInfo) merge(dst, src pointer) {
- if dst.isNil() {
- panic("proto: nil destination")
- }
- if src.isNil() {
- return // Nothing to do.
- }
-
- if atomic.LoadInt32(&mi.initialized) == 0 {
- mi.computeMergeInfo()
- }
-
- for _, fi := range mi.fields {
- sfp := src.offset(fi.field)
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
- continue
- }
- if fi.basicWidth > 0 {
- switch {
- case fi.basicWidth == 1 && !*sfp.toBool():
- continue
- case fi.basicWidth == 4 && *sfp.toUint32() == 0:
- continue
- case fi.basicWidth == 8 && *sfp.toUint64() == 0:
- continue
- }
- }
- }
-
- dfp := dst.offset(fi.field)
- fi.merge(dfp, sfp)
- }
-
- // TODO: Make this faster?
- out := dst.asPointerTo(mi.typ).Elem()
- in := src.asPointerTo(mi.typ).Elem()
- if emIn, err := extendable(in.Addr().Interface()); err == nil {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- if mi.unrecognized.IsValid() {
- if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
- *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
- }
- }
-}
-
-func (mi *mergeInfo) computeMergeInfo() {
- mi.lock.Lock()
- defer mi.lock.Unlock()
- if mi.initialized != 0 {
- return
- }
- t := mi.typ
- n := t.NumField()
-
- props := GetProperties(t)
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
-
- mfi := mergeFieldInfo{field: toField(&f)}
- tf := f.Type
-
- // As an optimization, we can avoid the merge function call cost
- // if we know for sure that the source will have no effect
- // by checking if it is the zero value.
- if unsafeAllowed {
- switch tf.Kind() {
- case reflect.Ptr, reflect.Slice, reflect.String:
- // As a special case, we assume slices and strings are pointers
- // since we know that the first field in the SliceSlice or
- // StringHeader is a data pointer.
- mfi.isPointer = true
- case reflect.Bool:
- mfi.basicWidth = 1
- case reflect.Int32, reflect.Uint32, reflect.Float32:
- mfi.basicWidth = 4
- case reflect.Int64, reflect.Uint64, reflect.Float64:
- mfi.basicWidth = 8
- }
- }
-
- // Unwrap tf to get at its most basic type.
- var isPointer, isSlice bool
- if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
- isSlice = true
- tf = tf.Elem()
- }
- if tf.Kind() == reflect.Ptr {
- isPointer = true
- tf = tf.Elem()
- }
- if isPointer && isSlice && tf.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + tf.Name())
- }
-
- switch tf.Kind() {
- case reflect.Int32:
- switch {
- case isSlice: // E.g., []int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
- /*
- sfsp := src.toInt32Slice()
- if *sfsp != nil {
- dfsp := dst.toInt32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- */
- sfs := src.getInt32Slice()
- if sfs != nil {
- dfs := dst.getInt32Slice()
- dfs = append(dfs, sfs...)
- if dfs == nil {
- dfs = []int32{}
- }
- dst.setInt32Slice(dfs)
- }
- }
- case isPointer: // E.g., *int32
- mfi.merge = func(dst, src pointer) {
- // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
- /*
- sfpp := src.toInt32Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt32Ptr()
- if *dfpp == nil {
- *dfpp = Int32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- */
- sfp := src.getInt32Ptr()
- if sfp != nil {
- dfp := dst.getInt32Ptr()
- if dfp == nil {
- dst.setInt32Ptr(*sfp)
- } else {
- *dfp = *sfp
- }
- }
- }
- default: // E.g., int32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt32(); v != 0 {
- *dst.toInt32() = v
- }
- }
- }
- case reflect.Int64:
- switch {
- case isSlice: // E.g., []int64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toInt64Slice()
- if *sfsp != nil {
- dfsp := dst.toInt64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []int64{}
- }
- }
- }
- case isPointer: // E.g., *int64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toInt64Ptr()
- if *sfpp != nil {
- dfpp := dst.toInt64Ptr()
- if *dfpp == nil {
- *dfpp = Int64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., int64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toInt64(); v != 0 {
- *dst.toInt64() = v
- }
- }
- }
- case reflect.Uint32:
- switch {
- case isSlice: // E.g., []uint32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint32Slice()
- if *sfsp != nil {
- dfsp := dst.toUint32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint32{}
- }
- }
- }
- case isPointer: // E.g., *uint32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint32Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint32Ptr()
- if *dfpp == nil {
- *dfpp = Uint32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint32(); v != 0 {
- *dst.toUint32() = v
- }
- }
- }
- case reflect.Uint64:
- switch {
- case isSlice: // E.g., []uint64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toUint64Slice()
- if *sfsp != nil {
- dfsp := dst.toUint64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []uint64{}
- }
- }
- }
- case isPointer: // E.g., *uint64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toUint64Ptr()
- if *sfpp != nil {
- dfpp := dst.toUint64Ptr()
- if *dfpp == nil {
- *dfpp = Uint64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., uint64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toUint64(); v != 0 {
- *dst.toUint64() = v
- }
- }
- }
- case reflect.Float32:
- switch {
- case isSlice: // E.g., []float32
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat32Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat32Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float32{}
- }
- }
- }
- case isPointer: // E.g., *float32
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat32Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat32Ptr()
- if *dfpp == nil {
- *dfpp = Float32(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float32
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat32(); v != 0 {
- *dst.toFloat32() = v
- }
- }
- }
- case reflect.Float64:
- switch {
- case isSlice: // E.g., []float64
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toFloat64Slice()
- if *sfsp != nil {
- dfsp := dst.toFloat64Slice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []float64{}
- }
- }
- }
- case isPointer: // E.g., *float64
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toFloat64Ptr()
- if *sfpp != nil {
- dfpp := dst.toFloat64Ptr()
- if *dfpp == nil {
- *dfpp = Float64(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., float64
- mfi.merge = func(dst, src pointer) {
- if v := *src.toFloat64(); v != 0 {
- *dst.toFloat64() = v
- }
- }
- }
- case reflect.Bool:
- switch {
- case isSlice: // E.g., []bool
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toBoolSlice()
- if *sfsp != nil {
- dfsp := dst.toBoolSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []bool{}
- }
- }
- }
- case isPointer: // E.g., *bool
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toBoolPtr()
- if *sfpp != nil {
- dfpp := dst.toBoolPtr()
- if *dfpp == nil {
- *dfpp = Bool(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., bool
- mfi.merge = func(dst, src pointer) {
- if v := *src.toBool(); v {
- *dst.toBool() = v
- }
- }
- }
- case reflect.String:
- switch {
- case isSlice: // E.g., []string
- mfi.merge = func(dst, src pointer) {
- sfsp := src.toStringSlice()
- if *sfsp != nil {
- dfsp := dst.toStringSlice()
- *dfsp = append(*dfsp, *sfsp...)
- if *dfsp == nil {
- *dfsp = []string{}
- }
- }
- }
- case isPointer: // E.g., *string
- mfi.merge = func(dst, src pointer) {
- sfpp := src.toStringPtr()
- if *sfpp != nil {
- dfpp := dst.toStringPtr()
- if *dfpp == nil {
- *dfpp = String(**sfpp)
- } else {
- **dfpp = **sfpp
- }
- }
- }
- default: // E.g., string
- mfi.merge = func(dst, src pointer) {
- if v := *src.toString(); v != "" {
- *dst.toString() = v
- }
- }
- }
- case reflect.Slice:
- isProto3 := props.Prop[i].proto3
- switch {
- case isPointer:
- panic("bad pointer in byte slice case in " + tf.Name())
- case tf.Elem().Kind() != reflect.Uint8:
- panic("bad element kind in byte slice case in " + tf.Name())
- case isSlice: // E.g., [][]byte
- mfi.merge = func(dst, src pointer) {
- sbsp := src.toBytesSlice()
- if *sbsp != nil {
- dbsp := dst.toBytesSlice()
- for _, sb := range *sbsp {
- if sb == nil {
- *dbsp = append(*dbsp, nil)
- } else {
- *dbsp = append(*dbsp, append([]byte{}, sb...))
- }
- }
- if *dbsp == nil {
- *dbsp = [][]byte{}
- }
- }
- }
- default: // E.g., []byte
- mfi.merge = func(dst, src pointer) {
- sbp := src.toBytes()
- if *sbp != nil {
- dbp := dst.toBytes()
- if !isProto3 || len(*sbp) > 0 {
- *dbp = append([]byte{}, *sbp...)
- }
- }
- }
- }
- case reflect.Struct:
- switch {
- case isSlice && !isPointer: // E.g. []pb.T
- mergeInfo := getMergeInfo(tf)
- zero := reflect.Zero(tf)
- mfi.merge = func(dst, src pointer) {
- // TODO: Make this faster?
- dstsp := dst.asPointerTo(f.Type)
- dsts := dstsp.Elem()
- srcs := src.asPointerTo(f.Type).Elem()
- for i := 0; i < srcs.Len(); i++ {
- dsts = reflect.Append(dsts, zero)
- srcElement := srcs.Index(i).Addr()
- dstElement := dsts.Index(dsts.Len() - 1).Addr()
- mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
- }
- if dsts.IsNil() {
- dsts = reflect.MakeSlice(f.Type, 0, 0)
- }
- dstsp.Elem().Set(dsts)
- }
- case !isPointer:
- mergeInfo := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- mergeInfo.merge(dst, src)
- }
- case isSlice: // E.g., []*pb.T
- mergeInfo := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sps := src.getPointerSlice()
- if sps != nil {
- dps := dst.getPointerSlice()
- for _, sp := range sps {
- var dp pointer
- if !sp.isNil() {
- dp = valToPointer(reflect.New(tf))
- mergeInfo.merge(dp, sp)
- }
- dps = append(dps, dp)
- }
- if dps == nil {
- dps = []pointer{}
- }
- dst.setPointerSlice(dps)
- }
- }
- default: // E.g., *pb.T
- mergeInfo := getMergeInfo(tf)
- mfi.merge = func(dst, src pointer) {
- sp := src.getPointer()
- if !sp.isNil() {
- dp := dst.getPointer()
- if dp.isNil() {
- dp = valToPointer(reflect.New(tf))
- dst.setPointer(dp)
- }
- mergeInfo.merge(dp, sp)
- }
- }
- }
- case reflect.Map:
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in map case in " + tf.Name())
- default: // E.g., map[K]V
- mfi.merge = func(dst, src pointer) {
- sm := src.asPointerTo(tf).Elem()
- if sm.Len() == 0 {
- return
- }
- dm := dst.asPointerTo(tf).Elem()
- if dm.IsNil() {
- dm.Set(reflect.MakeMap(tf))
- }
-
- switch tf.Elem().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(Clone(val.Interface().(Message)))
- dm.SetMapIndex(key, val)
- }
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- dm.SetMapIndex(key, val)
- }
- default: // Basic type (e.g., string)
- for _, key := range sm.MapKeys() {
- val := sm.MapIndex(key)
- dm.SetMapIndex(key, val)
- }
- }
- }
- }
- case reflect.Interface:
- // Must be oneof field.
- switch {
- case isPointer || isSlice:
- panic("bad pointer or slice in interface case in " + tf.Name())
- default: // E.g., interface{}
- // TODO: Make this faster?
- mfi.merge = func(dst, src pointer) {
- su := src.asPointerTo(tf).Elem()
- if !su.IsNil() {
- du := dst.asPointerTo(tf).Elem()
- typ := su.Elem().Type()
- if du.IsNil() || du.Elem().Type() != typ {
- du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
- }
- sv := su.Elem().Elem().Field(0)
- if sv.Kind() == reflect.Ptr && sv.IsNil() {
- return
- }
- dv := du.Elem().Elem().Field(0)
- if dv.Kind() == reflect.Ptr && dv.IsNil() {
- dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
- }
- switch sv.Type().Kind() {
- case reflect.Ptr: // Proto struct (e.g., *T)
- Merge(dv.Interface().(Message), sv.Interface().(Message))
- case reflect.Slice: // E.g. Bytes type (e.g., []byte)
- dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
- default: // Basic type (e.g., string)
- dv.Set(sv)
- }
- }
- }
- }
- default:
- panic(fmt.Sprintf("merger not found for type:%s", tf))
- }
- mi.fields = append(mi.fields, mfi)
- }
-
- mi.unrecognized = invalidField
- if f, ok := t.FieldByName("XXX_unrecognized"); ok {
- if f.Type != reflect.TypeOf([]byte{}) {
- panic("expected XXX_unrecognized to be of type []byte")
- }
- mi.unrecognized = toField(&f)
- }
-
- atomic.StoreInt32(&mi.initialized, 1)
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
deleted file mode 100644
index 9372293..0000000
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
+++ /dev/null
@@ -1,2249 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "unicode/utf8"
-)
-
-// Unmarshal is the entry point from the generated .pb.go files.
-// This function is not intended to be used by non-generated code.
-// This function is not subject to any compatibility guarantee.
-// msg contains a pointer to a protocol buffer struct.
-// b is the data to be unmarshaled into the protocol buffer.
-// a is a pointer to a place to store cached unmarshal information.
-func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
- // Load the unmarshal information for this message type.
- // The atomic load ensures memory consistency.
- u := atomicLoadUnmarshalInfo(&a.unmarshal)
- if u == nil {
- // Slow path: find unmarshal info for msg, update a with it.
- u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
- atomicStoreUnmarshalInfo(&a.unmarshal, u)
- }
- // Then do the unmarshaling.
- err := u.unmarshal(toPointer(&msg), b)
- return err
-}
-
-type unmarshalInfo struct {
- typ reflect.Type // type of the protobuf struct
-
- // 0 = only typ field is initialized
- // 1 = completely initialized
- initialized int32
- lock sync.Mutex // prevents double initialization
- dense []unmarshalFieldInfo // fields indexed by tag #
- sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
- reqFields []string // names of required fields
- reqMask uint64 // 1<<len(reqFields)-1
- unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
- extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
- oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
- extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
- isMessageSet bool // if true, implies extensions field is valid
-
- bytesExtensions field // offset of XXX_extensions with type []byte
-}
-
-// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
-// It decodes the field, stores it at f, and returns the unused bytes.
-// w is the wire encoding.
-// b is the data after the tag and wire encoding have been read.
-type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
-
-type unmarshalFieldInfo struct {
- // location of the field in the proto message structure.
- field field
-
- // function to unmarshal the data for the field.
- unmarshal unmarshaler
-
- // if a required field, contains a single set bit at this field's index in the required field list.
- reqMask uint64
-
- name string // name of the field, for error reporting
-}
-
-var (
- unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
- unmarshalInfoLock sync.Mutex
-)
-
-// getUnmarshalInfo returns the data structure which can be
-// subsequently used to unmarshal a message of the given type.
-// t is the type of the message (note: not pointer to message).
-func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
- // It would be correct to return a new unmarshalInfo
- // unconditionally. We would end up allocating one
- // per occurrence of that type as a message or submessage.
- // We use a cache here just to reduce memory usage.
- unmarshalInfoLock.Lock()
- defer unmarshalInfoLock.Unlock()
- u := unmarshalInfoMap[t]
- if u == nil {
- u = &unmarshalInfo{typ: t}
- // Note: we just set the type here. The rest of the fields
- // will be initialized on first use.
- unmarshalInfoMap[t] = u
- }
- return u
-}
-
-// unmarshal does the main work of unmarshaling a message.
-// u provides type information used to unmarshal the message.
-// m is a pointer to a protocol buffer message.
-// b is a byte stream to unmarshal into m.
-// This is top routine used when recursively unmarshaling submessages.
-func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
- if atomic.LoadInt32(&u.initialized) == 0 {
- u.computeUnmarshalInfo()
- }
- if u.isMessageSet {
- return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
- }
- var reqMask uint64 // bitmask of required fields we've seen.
- var errLater error
- for len(b) > 0 {
- // Read tag and wire type.
- // Special case 1 and 2 byte varints.
- var x uint64
- if b[0] < 128 {
- x = uint64(b[0])
- b = b[1:]
- } else if len(b) >= 2 && b[1] < 128 {
- x = uint64(b[0]&0x7f) + uint64(b[1])<<7
- b = b[2:]
- } else {
- var n int
- x, n = decodeVarint(b)
- if n == 0 {
- return io.ErrUnexpectedEOF
- }
- b = b[n:]
- }
- tag := x >> 3
- wire := int(x) & 7
-
- // Dispatch on the tag to one of the unmarshal* functions below.
- var f unmarshalFieldInfo
- if tag < uint64(len(u.dense)) {
- f = u.dense[tag]
- } else {
- f = u.sparse[tag]
- }
- if fn := f.unmarshal; fn != nil {
- var err error
- b, err = fn(b, m.offset(f.field), wire)
- if err == nil {
- reqMask |= f.reqMask
- continue
- }
- if r, ok := err.(*RequiredNotSetError); ok {
- // Remember this error, but keep parsing. We need to produce
- // a full parse even if a required field is missing.
- if errLater == nil {
- errLater = r
- }
- reqMask |= f.reqMask
- continue
- }
- if err != errInternalBadWireType {
- if err == errInvalidUTF8 {
- if errLater == nil {
- fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
- errLater = &invalidUTF8Error{fullName}
- }
- continue
- }
- return err
- }
- // Fragments with bad wire type are treated as unknown fields.
- }
-
- // Unknown tag.
- if !u.unrecognized.IsValid() {
- // Don't keep unrecognized data; just skip it.
- var err error
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- continue
- }
- // Keep unrecognized data around.
- // maybe in extensions, maybe in the unrecognized field.
- z := m.offset(u.unrecognized).toBytes()
- var emap map[int32]Extension
- var e Extension
- for _, r := range u.extensionRanges {
- if uint64(r.Start) <= tag && tag <= uint64(r.End) {
- if u.extensions.IsValid() {
- mp := m.offset(u.extensions).toExtensions()
- emap = mp.extensionsWrite()
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.oldExtensions.IsValid() {
- p := m.offset(u.oldExtensions).toOldExtensions()
- emap = *p
- if emap == nil {
- emap = map[int32]Extension{}
- *p = emap
- }
- e = emap[int32(tag)]
- z = &e.enc
- break
- }
- if u.bytesExtensions.IsValid() {
- z = m.offset(u.bytesExtensions).toBytes()
- break
- }
- panic("no extensions field available")
- }
- }
- // Use wire type to skip data.
- var err error
- b0 := b
- b, err = skipField(b, wire)
- if err != nil {
- return err
- }
- *z = encodeVarint(*z, tag<<3|uint64(wire))
- *z = append(*z, b0[:len(b0)-len(b)]...)
-
- if emap != nil {
- emap[int32(tag)] = e
- }
- }
- if reqMask != u.reqMask && errLater == nil {
- // A required field of this message is missing.
- for _, n := range u.reqFields {
- if reqMask&1 == 0 {
- errLater = &RequiredNotSetError{n}
- }
- reqMask >>= 1
- }
- }
- return errLater
-}
-
-// computeUnmarshalInfo fills in u with information for use
-// in unmarshaling protocol buffers of type u.typ.
-func (u *unmarshalInfo) computeUnmarshalInfo() {
- u.lock.Lock()
- defer u.lock.Unlock()
- if u.initialized != 0 {
- return
- }
- t := u.typ
- n := t.NumField()
-
- // Set up the "not found" value for the unrecognized byte buffer.
- // This is the default for proto3.
- u.unrecognized = invalidField
- u.extensions = invalidField
- u.oldExtensions = invalidField
- u.bytesExtensions = invalidField
-
- // List of the generated type and offset for each oneof field.
- type oneofField struct {
- ityp reflect.Type // interface type of oneof field
- field field // offset in containing message
- }
- var oneofFields []oneofField
-
- for i := 0; i < n; i++ {
- f := t.Field(i)
- if f.Name == "XXX_unrecognized" {
- // The byte slice used to hold unrecognized input is special.
- if f.Type != reflect.TypeOf(([]byte)(nil)) {
- panic("bad type for XXX_unrecognized field: " + f.Type.Name())
- }
- u.unrecognized = toField(&f)
- continue
- }
- if f.Name == "XXX_InternalExtensions" {
- // Ditto here.
- if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
- panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
- }
- u.extensions = toField(&f)
- if f.Tag.Get("protobuf_messageset") == "1" {
- u.isMessageSet = true
- }
- continue
- }
- if f.Name == "XXX_extensions" {
- // An older form of the extensions field.
- if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) {
- u.oldExtensions = toField(&f)
- continue
- } else if f.Type == reflect.TypeOf(([]byte)(nil)) {
- u.bytesExtensions = toField(&f)
- continue
- }
- panic("bad type for XXX_extensions field: " + f.Type.Name())
- }
- if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
- continue
- }
-
- oneof := f.Tag.Get("protobuf_oneof")
- if oneof != "" {
- oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
- // The rest of oneof processing happens below.
- continue
- }
-
- tags := f.Tag.Get("protobuf")
- tagArray := strings.Split(tags, ",")
- if len(tagArray) < 2 {
- panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
- }
- tag, err := strconv.Atoi(tagArray[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tagArray[1])
- }
-
- name := ""
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- }
-
- // Extract unmarshaling function from the field (its type and tags).
- unmarshal := fieldUnmarshaler(&f)
-
- // Required field?
- var reqMask uint64
- if tagArray[2] == "req" {
- bit := len(u.reqFields)
- u.reqFields = append(u.reqFields, name)
- reqMask = uint64(1) << uint(bit)
- // TODO: if we have more than 64 required fields, we end up
- // not verifying that all required fields are present.
- // Fix this, perhaps using a count of required fields?
- }
-
- // Store the info in the correct slot in the message.
- u.setTag(tag, toField(&f), unmarshal, reqMask, name)
- }
-
- // Find any types associated with oneof fields.
- // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
- if len(oneofFields) > 0 {
- var oneofImplementers []interface{}
- switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
- case oneofFuncsIface:
- _, _, _, oneofImplementers = m.XXX_OneofFuncs()
- case oneofWrappersIface:
- oneofImplementers = m.XXX_OneofWrappers()
- }
- for _, v := range oneofImplementers {
- tptr := reflect.TypeOf(v) // *Msg_X
- typ := tptr.Elem() // Msg_X
-
- f := typ.Field(0) // oneof implementers have one field
- baseUnmarshal := fieldUnmarshaler(&f)
- tags := strings.Split(f.Tag.Get("protobuf"), ",")
- fieldNum, err := strconv.Atoi(tags[1])
- if err != nil {
- panic("protobuf tag field not an integer: " + tags[1])
- }
- var name string
- for _, tag := range tags {
- if strings.HasPrefix(tag, "name=") {
- name = strings.TrimPrefix(tag, "name=")
- break
- }
- }
-
- // Find the oneof field that this struct implements.
- // Might take O(n^2) to process all of the oneofs, but who cares.
- for _, of := range oneofFields {
- if tptr.Implements(of.ityp) {
- // We have found the corresponding interface for this struct.
- // That lets us know where this struct should be stored
- // when we encounter it during unmarshaling.
- unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
- u.setTag(fieldNum, of.field, unmarshal, 0, name)
- }
- }
-
- }
- }
-
- // Get extension ranges, if any.
- fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
- if fn.IsValid() {
- if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
- panic("a message with extensions, but no extensions field in " + t.Name())
- }
- u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
- }
-
- // Explicitly disallow tag 0. This will ensure we flag an error
- // when decoding a buffer of all zeros. Without this code, we
- // would decode and skip an all-zero buffer of even length.
- // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
- u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
- return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
- }, 0, "")
-
- // Set mask for required field check.
- u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
-
- atomic.StoreInt32(&u.initialized, 1)
-}
-
-// setTag stores the unmarshal information for the given tag.
-// tag = tag # for field
-// field/unmarshal = unmarshal info for that field.
-// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
-// name = short name of the field.
-func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
- i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
- n := u.typ.NumField()
- if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
- for len(u.dense) <= tag {
- u.dense = append(u.dense, unmarshalFieldInfo{})
- }
- u.dense[tag] = i
- return
- }
- if u.sparse == nil {
- u.sparse = map[uint64]unmarshalFieldInfo{}
- }
- u.sparse[uint64(tag)] = i
-}
-
-// fieldUnmarshaler returns an unmarshaler for the given field.
-func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
- if f.Type.Kind() == reflect.Map {
- return makeUnmarshalMap(f)
- }
- return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
-}
-
-// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
-func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
- tagArray := strings.Split(tags, ",")
- encoding := tagArray[0]
- name := "unknown"
- ctype := false
- isTime := false
- isDuration := false
- isWktPointer := false
- proto3 := false
- validateUTF8 := true
- for _, tag := range tagArray[3:] {
- if strings.HasPrefix(tag, "name=") {
- name = tag[5:]
- }
- if tag == "proto3" {
- proto3 = true
- }
- if strings.HasPrefix(tag, "customtype=") {
- ctype = true
- }
- if tag == "stdtime" {
- isTime = true
- }
- if tag == "stdduration" {
- isDuration = true
- }
- if tag == "wktptr" {
- isWktPointer = true
- }
- }
- validateUTF8 = validateUTF8 && proto3
-
- // Figure out packaging (pointer, slice, or both)
- slice := false
- pointer := false
- if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
- slice = true
- t = t.Elem()
- }
- if t.Kind() == reflect.Ptr {
- pointer = true
- t = t.Elem()
- }
-
- if ctype {
- if reflect.PtrTo(t).Implements(customType) {
- if slice {
- return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name)
- }
- if pointer {
- return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalCustom(getUnmarshalInfo(t), name)
- } else {
- panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
- }
- }
-
- if isTime {
- if pointer {
- if slice {
- return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalTimePtr(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalTime(getUnmarshalInfo(t), name)
- }
-
- if isDuration {
- if pointer {
- if slice {
- return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalDuration(getUnmarshalInfo(t), name)
- }
-
- if isWktPointer {
- switch t.Kind() {
- case reflect.Float64:
- if pointer {
- if slice {
- return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Float32:
- if pointer {
- if slice {
- return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Int64:
- if pointer {
- if slice {
- return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Uint64:
- if pointer {
- if slice {
- return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Int32:
- if pointer {
- if slice {
- return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Uint32:
- if pointer {
- if slice {
- return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.Bool:
- if pointer {
- if slice {
- return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name)
- case reflect.String:
- if pointer {
- if slice {
- return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name)
- case uint8SliceType:
- if pointer {
- if slice {
- return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name)
- }
- if slice {
- return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name)
- }
- return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name)
- default:
- panic(fmt.Sprintf("unknown wktpointer type %#v", t))
- }
- }
-
- // We'll never have both pointer and slice for basic types.
- if pointer && slice && t.Kind() != reflect.Struct {
- panic("both pointer and slice for basic type in " + t.Name())
- }
-
- switch t.Kind() {
- case reflect.Bool:
- if pointer {
- return unmarshalBoolPtr
- }
- if slice {
- return unmarshalBoolSlice
- }
- return unmarshalBoolValue
- case reflect.Int32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixedS32Ptr
- }
- if slice {
- return unmarshalFixedS32Slice
- }
- return unmarshalFixedS32Value
- case "varint":
- // this could be int32 or enum
- if pointer {
- return unmarshalInt32Ptr
- }
- if slice {
- return unmarshalInt32Slice
- }
- return unmarshalInt32Value
- case "zigzag32":
- if pointer {
- return unmarshalSint32Ptr
- }
- if slice {
- return unmarshalSint32Slice
- }
- return unmarshalSint32Value
- }
- case reflect.Int64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixedS64Ptr
- }
- if slice {
- return unmarshalFixedS64Slice
- }
- return unmarshalFixedS64Value
- case "varint":
- if pointer {
- return unmarshalInt64Ptr
- }
- if slice {
- return unmarshalInt64Slice
- }
- return unmarshalInt64Value
- case "zigzag64":
- if pointer {
- return unmarshalSint64Ptr
- }
- if slice {
- return unmarshalSint64Slice
- }
- return unmarshalSint64Value
- }
- case reflect.Uint32:
- switch encoding {
- case "fixed32":
- if pointer {
- return unmarshalFixed32Ptr
- }
- if slice {
- return unmarshalFixed32Slice
- }
- return unmarshalFixed32Value
- case "varint":
- if pointer {
- return unmarshalUint32Ptr
- }
- if slice {
- return unmarshalUint32Slice
- }
- return unmarshalUint32Value
- }
- case reflect.Uint64:
- switch encoding {
- case "fixed64":
- if pointer {
- return unmarshalFixed64Ptr
- }
- if slice {
- return unmarshalFixed64Slice
- }
- return unmarshalFixed64Value
- case "varint":
- if pointer {
- return unmarshalUint64Ptr
- }
- if slice {
- return unmarshalUint64Slice
- }
- return unmarshalUint64Value
- }
- case reflect.Float32:
- if pointer {
- return unmarshalFloat32Ptr
- }
- if slice {
- return unmarshalFloat32Slice
- }
- return unmarshalFloat32Value
- case reflect.Float64:
- if pointer {
- return unmarshalFloat64Ptr
- }
- if slice {
- return unmarshalFloat64Slice
- }
- return unmarshalFloat64Value
- case reflect.Map:
- panic("map type in typeUnmarshaler in " + t.Name())
- case reflect.Slice:
- if pointer {
- panic("bad pointer in slice case in " + t.Name())
- }
- if slice {
- return unmarshalBytesSlice
- }
- return unmarshalBytesValue
- case reflect.String:
- if validateUTF8 {
- if pointer {
- return unmarshalUTF8StringPtr
- }
- if slice {
- return unmarshalUTF8StringSlice
- }
- return unmarshalUTF8StringValue
- }
- if pointer {
- return unmarshalStringPtr
- }
- if slice {
- return unmarshalStringSlice
- }
- return unmarshalStringValue
- case reflect.Struct:
- // message or group field
- if !pointer {
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessage(getUnmarshalInfo(t), name)
- }
- }
- switch encoding {
- case "bytes":
- if slice {
- return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
- case "group":
- if slice {
- return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
- }
- return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
- }
- }
- panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
-}
-
-// Below are all the unmarshalers for individual fields of various types.
-
-func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x)
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64() = v
- return b, nil
-}
-
-func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- *f.toInt64Ptr() = &v
- return b, nil
-}
-
-func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int64(x>>1) ^ int64(x)<<63>>63
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64() = v
- return b, nil
-}
-
-func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- *f.toUint64Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint64(x)
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x)
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- *f.toInt32() = v
- return b, nil
-}
-
-func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.setInt32Ptr(v)
- return b, nil
-}
-
-func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := int32(x>>1) ^ int32(x)<<31>>31
- f.appendInt32Slice(v)
- return b, nil
-}
-
-func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32() = v
- return b, nil
-}
-
-func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- *f.toUint32Ptr() = &v
- return b, nil
-}
-
-func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- v := uint32(x)
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b, nil
-}
-
-func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64() = v
- return b[8:], nil
-}
-
-func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- *f.toUint64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- s := f.toUint64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64() = v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- *f.toInt64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
- s := f.toInt64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32() = v
- return b[4:], nil
-}
-
-func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- *f.toUint32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
- s := f.toUint32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- *f.toInt32() = v
- return b[4:], nil
-}
-
-func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.setInt32Ptr(v)
- return b[4:], nil
-}
-
-func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
- f.appendInt32Slice(v)
- return b[4:], nil
-}
-
-func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- // Note: any length varint is allowed, even though any sane
- // encoder will use one byte.
- // See https://github.com/golang/protobuf/issues/76
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- // TODO: check if x>1? Tests seem to indicate no.
- v := x != 0
- *f.toBool() = v
- return b[n:], nil
-}
-
-func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- *f.toBoolPtr() = &v
- return b[n:], nil
-}
-
-func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- x, n = decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- b = b[n:]
- }
- return res, nil
- }
- if w != WireVarint {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := x != 0
- s := f.toBoolSlice()
- *s = append(*s, v)
- return b[n:], nil
-}
-
-func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64() = v
- return b[8:], nil
-}
-
-func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- *f.toFloat64Ptr() = &v
- return b[8:], nil
-}
-
-func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- b = b[8:]
- }
- return res, nil
- }
- if w != WireFixed64 {
- return b, errInternalBadWireType
- }
- if len(b) < 8 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
- s := f.toFloat64Slice()
- *s = append(*s, v)
- return b[8:], nil
-}
-
-func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32() = v
- return b[4:], nil
-}
-
-func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- *f.toFloat32Ptr() = &v
- return b[4:], nil
-}
-
-func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
- if w == WireBytes { // packed
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- res := b[x:]
- b = b[:x]
- for len(b) > 0 {
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- b = b[4:]
- }
- return res, nil
- }
- if w != WireFixed32 {
- return b, errInternalBadWireType
- }
- if len(b) < 4 {
- return nil, io.ErrUnexpectedEOF
- }
- v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
- s := f.toFloat32Slice()
- *s = append(*s, v)
- return b[4:], nil
-}
-
-func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- return b[x:], nil
-}
-
-func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- return b[x:], nil
-}
-
-func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toString() = v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- *f.toStringPtr() = &v
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := string(b[:x])
- s := f.toStringSlice()
- *s = append(*s, v)
- if !utf8.ValidString(v) {
- return b[x:], errInvalidUTF8
- }
- return b[x:], nil
-}
-
-var emptyBuf [0]byte
-
-func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // The use of append here is a trick which avoids the zeroing
- // that would be required if we used a make/copy pair.
- // We append to emptyBuf instead of nil because we want
- // a non-nil result even when the length is 0.
- v := append(emptyBuf[:], b[:x]...)
- *f.toBytes() = v
- return b[x:], nil
-}
-
-func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := append(emptyBuf[:], b[:x]...)
- s := f.toBytesSlice()
- *s = append(*s, v)
- return b[x:], nil
-}
-
-func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return b, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[y:], err
- }
-}
-
-func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireStartGroup {
- return b, errInternalBadWireType
- }
- x, y := findEndGroup(b)
- if x < 0 {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendPointer(v)
- return b[y:], err
- }
-}
-
-func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
- t := f.Type
- kt := t.Key()
- vt := t.Elem()
- tagArray := strings.Split(f.Tag.Get("protobuf"), ",")
- valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
- for _, t := range tagArray {
- if strings.HasPrefix(t, "customtype=") {
- valTags = append(valTags, t)
- }
- if t == "stdtime" {
- valTags = append(valTags, t)
- }
- if t == "stdduration" {
- valTags = append(valTags, t)
- }
- if t == "wktptr" {
- valTags = append(valTags, t)
- }
- }
- unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
- unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ","))
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // The map entry is a submessage. Figure out how big it is.
- if w != WireBytes {
- return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- r := b[x:] // unused data to return
- b = b[:x] // data for map entry
-
- // Note: we could use #keys * #values ~= 200 functions
- // to do map decoding without reflection. Probably not worth it.
- // Maps will be somewhat slow. Oh well.
-
- // Read key and value from data.
- var nerr nonFatal
- k := reflect.New(kt)
- v := reflect.New(vt)
- for len(b) > 0 {
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- wire := int(x) & 7
- b = b[n:]
-
- var err error
- switch x >> 3 {
- case 1:
- b, err = unmarshalKey(b, valToPointer(k), wire)
- case 2:
- b, err = unmarshalVal(b, valToPointer(v), wire)
- default:
- err = errInternalBadWireType // skip unknown tag
- }
-
- if nerr.Merge(err) {
- continue
- }
- if err != errInternalBadWireType {
- return nil, err
- }
-
- // Skip past unknown fields.
- b, err = skipField(b, wire)
- if err != nil {
- return nil, err
- }
- }
-
- // Get map, allocate if needed.
- m := f.asPointerTo(t).Elem() // an addressable map[K]T
- if m.IsNil() {
- m.Set(reflect.MakeMap(t))
- }
-
- // Insert into map.
- m.SetMapIndex(k.Elem(), v.Elem())
-
- return r, nerr.E
- }
-}
-
-// makeUnmarshalOneof makes an unmarshaler for oneof fields.
-// for:
-// message Msg {
-// oneof F {
-// int64 X = 1;
-// float64 Y = 2;
-// }
-// }
-// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
-// ityp is the interface type of the oneof field (e.g. isMsg_F).
-// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
-// Note that this function will be called once for each case in the oneof.
-func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
- sf := typ.Field(0)
- field0 := toField(&sf)
- return func(b []byte, f pointer, w int) ([]byte, error) {
- // Allocate holder for value.
- v := reflect.New(typ)
-
- // Unmarshal data into holder.
- // We unmarshal into the first field of the holder object.
- var err error
- var nerr nonFatal
- b, err = unmarshal(b, valToPointer(v).offset(field0), w)
- if !nerr.Merge(err) {
- return nil, err
- }
-
- // Write pointer to holder into target field.
- f.asPointerTo(ityp).Elem().Set(v)
-
- return b, nerr.E
- }
-}
-
-// Error used by decode internally.
-var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
-
-// skipField skips past a field of type wire and returns the remaining bytes.
-func skipField(b []byte, wire int) ([]byte, error) {
- switch wire {
- case WireVarint:
- _, k := decodeVarint(b)
- if k == 0 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[k:]
- case WireFixed32:
- if len(b) < 4 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[4:]
- case WireFixed64:
- if len(b) < 8 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[8:]
- case WireBytes:
- m, k := decodeVarint(b)
- if k == 0 || uint64(len(b)-k) < m {
- return b, io.ErrUnexpectedEOF
- }
- b = b[uint64(k)+m:]
- case WireStartGroup:
- _, i := findEndGroup(b)
- if i == -1 {
- return b, io.ErrUnexpectedEOF
- }
- b = b[i:]
- default:
- return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
- }
- return b, nil
-}
-
-// findEndGroup finds the index of the next EndGroup tag.
-// Groups may be nested, so the "next" EndGroup tag is the first
-// unpaired EndGroup.
-// findEndGroup returns the indexes of the start and end of the EndGroup tag.
-// Returns (-1,-1) if it can't find one.
-func findEndGroup(b []byte) (int, int) {
- depth := 1
- i := 0
- for {
- x, n := decodeVarint(b[i:])
- if n == 0 {
- return -1, -1
- }
- j := i
- i += n
- switch x & 7 {
- case WireVarint:
- _, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- case WireFixed32:
- if len(b)-4 < i {
- return -1, -1
- }
- i += 4
- case WireFixed64:
- if len(b)-8 < i {
- return -1, -1
- }
- i += 8
- case WireBytes:
- m, k := decodeVarint(b[i:])
- if k == 0 {
- return -1, -1
- }
- i += k
- if uint64(len(b)-i) < m {
- return -1, -1
- }
- i += int(m)
- case WireStartGroup:
- depth++
- case WireEndGroup:
- depth--
- if depth == 0 {
- return j, i
- }
- default:
- return -1, -1
- }
- }
-}
-
-// encodeVarint appends a varint-encoded integer to b and returns the result.
-func encodeVarint(b []byte, x uint64) []byte {
- for x >= 1<<7 {
- b = append(b, byte(x&0x7f|0x80))
- x >>= 7
- }
- return append(b, byte(x))
-}
-
-// decodeVarint reads a varint-encoded integer from b.
-// Returns the decoded integer and the number of bytes read.
-// If there is an error, it returns 0,0.
-func decodeVarint(b []byte) (uint64, int) {
- var x, y uint64
- if len(b) == 0 {
- goto bad
- }
- x = uint64(b[0])
- if x < 0x80 {
- return x, 1
- }
- x -= 0x80
-
- if len(b) <= 1 {
- goto bad
- }
- y = uint64(b[1])
- x += y << 7
- if y < 0x80 {
- return x, 2
- }
- x -= 0x80 << 7
-
- if len(b) <= 2 {
- goto bad
- }
- y = uint64(b[2])
- x += y << 14
- if y < 0x80 {
- return x, 3
- }
- x -= 0x80 << 14
-
- if len(b) <= 3 {
- goto bad
- }
- y = uint64(b[3])
- x += y << 21
- if y < 0x80 {
- return x, 4
- }
- x -= 0x80 << 21
-
- if len(b) <= 4 {
- goto bad
- }
- y = uint64(b[4])
- x += y << 28
- if y < 0x80 {
- return x, 5
- }
- x -= 0x80 << 28
-
- if len(b) <= 5 {
- goto bad
- }
- y = uint64(b[5])
- x += y << 35
- if y < 0x80 {
- return x, 6
- }
- x -= 0x80 << 35
-
- if len(b) <= 6 {
- goto bad
- }
- y = uint64(b[6])
- x += y << 42
- if y < 0x80 {
- return x, 7
- }
- x -= 0x80 << 42
-
- if len(b) <= 7 {
- goto bad
- }
- y = uint64(b[7])
- x += y << 49
- if y < 0x80 {
- return x, 8
- }
- x -= 0x80 << 49
-
- if len(b) <= 8 {
- goto bad
- }
- y = uint64(b[8])
- x += y << 56
- if y < 0x80 {
- return x, 9
- }
- x -= 0x80 << 56
-
- if len(b) <= 9 {
- goto bad
- }
- y = uint64(b[9])
- x += y << 63
- if y < 2 {
- return x, 10
- }
-
-bad:
- return 0, 0
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
deleted file mode 100644
index 00d6c7a..0000000
--- a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
+++ /dev/null
@@ -1,385 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "io"
- "reflect"
-)
-
-func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- // First read the message field to see if something is there.
- // The semantics of multiple submessages are weird. Instead of
- // the last one winning (as it is for all other fields), multiple
- // submessages are merged.
- v := f // gogo: changed from v := f.getPointer()
- if v.isNil() {
- v = valToPointer(reflect.New(sub.typ))
- f.setPointer(v)
- }
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- return b[x:], err
- }
-}
-
-func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- v := valToPointer(reflect.New(sub.typ))
- err := sub.unmarshal(v, b[:x])
- if err != nil {
- if r, ok := err.(*RequiredNotSetError); ok {
- r.field = name + "." + r.field
- } else {
- return nil, err
- }
- }
- f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
- return b[x:], err
- }
-}
-
-func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
-
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.New(sub.typ))
- m := s.Interface().(custom)
- if err := m.Unmarshal(b[:x]); err != nil {
- return nil, err
- }
- return b[x:], nil
- }
-}
-
-func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := reflect.New(sub.typ)
- c := m.Interface().(custom)
- if err := c.Unmarshal(b[:x]); err != nil {
- return nil, err
- }
- v := valToPointer(m)
- f.appendRef(v, sub.typ)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
-
- m := f.asPointerTo(sub.typ).Interface().(custom)
- if err := m.Unmarshal(b[:x]); err != nil {
- return nil, err
- }
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(t))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&t))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&t))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := ×tamp{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- t, err := timestampFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(t))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&d))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(d))
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&d))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &duration{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- d, err := durationFromProto(m)
- if err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(d))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
deleted file mode 100644
index 87416af..0000000
--- a/vendor/github.com/gogo/protobuf/proto/text.go
+++ /dev/null
@@ -1,930 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Print("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if name == "XXX_NoUnkeyedLiteral" {
- continue
- }
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("<nil>\n")); err != nil {
- return err
- }
- continue
- }
- if len(props.Enum) > 0 {
- if err := tm.writeEnum(w, v, props); err != nil {
- return err
- }
- } else if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.MapValProp); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
-
- if len(props.Enum) > 0 {
- if err := tm.writeEnum(w, fv, props); err != nil {
- return err
- }
- } else if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv
- if pv.CanAddr() {
- pv = sv.Addr()
- } else {
- pv = reflect.New(sv.Type())
- pv.Elem().Set(sv)
- }
- if _, err := extendable(pv.Interface()); err == nil {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- if props != nil {
- if len(props.CustomType) > 0 {
- custom, ok := v.Interface().(Marshaler)
- if ok {
- data, err := custom.Marshal()
- if err != nil {
- return err
- }
- if err := writeString(w, string(data)); err != nil {
- return err
- }
- return nil
- }
- } else if len(props.CastType) > 0 {
- if _, ok := v.Interface().(interface {
- String() string
- }); ok {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- _, err := fmt.Fprintf(w, "%d", v.Interface())
- return err
- }
- }
- } else if props.StdTime {
- t, ok := v.Interface().(time.Time)
- if !ok {
- return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
- }
- tproto, err := timestampProto(t)
- if err != nil {
- return err
- }
- propsCopy := *props // Make a copy so that this is goroutine-safe
- propsCopy.StdTime = false
- err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
- return err
- } else if props.StdDuration {
- d, ok := v.Interface().(time.Duration)
- if !ok {
- return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
- }
- dproto := durationProto(d)
- propsCopy := *props // Make a copy so that this is goroutine-safe
- propsCopy.StdDuration = false
- err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
- return err
- }
- }
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Bytes())); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if v.CanAddr() {
- // Calling v.Interface on a struct causes the reflect package to
- // copy the entire struct. This is racy with the new Marshaler
- // since we atomically update the XXX_sizecache.
- //
- // Thus, we retrieve a pointer to the struct if possible to avoid
- // a race since v.Interface on the pointer doesn't copy the struct.
- //
- // If v is not addressable, then we are not worried about a race
- // since it implies that the binary Marshaler cannot possibly be
- // mutating this value.
- v = v.Addr()
- }
- if v.Type().Implements(textMarshalerType) {
- text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, ferr := fmt.Fprintf(w, "/* %v */\n", err)
- return ferr
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, werr := w.Write(endBraceNewline); werr != nil {
- return werr
- }
- continue
- }
- if _, ferr := fmt.Fprint(w, tag); ferr != nil {
- return ferr
- }
- if wire != WireStartGroup {
- if err = w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err = w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- e := pv.Interface().(Message)
-
- var m map[int32]Extension
- var mu sync.Locker
- if em, ok := e.(extensionsBytes); ok {
- eb := em.GetExtensions()
- var err error
- m, err = BytesToExtensionsMap(*eb)
- if err != nil {
- return err
- }
- mu = notLocker{}
- } else if _, ok := e.(extendableProto); ok {
- ep, _ := extendable(e)
- m, mu = ep.extensionsRead()
- if m == nil {
- return nil
- }
- }
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
-
- mu.Lock()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- mu.Unlock()
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(e, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte("<nil>"))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
deleted file mode 100644
index 1d6c6aa..0000000
--- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "fmt"
- "reflect"
-)
-
-func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
- m, ok := enumStringMaps[props.Enum]
- if !ok {
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- }
- key := int32(0)
- if v.Kind() == reflect.Ptr {
- key = int32(v.Elem().Int())
- } else {
- key = int32(v.Int())
- }
- s, ok := m[key]
- if !ok {
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- }
- _, err := fmt.Fprint(w, s)
- return err
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
deleted file mode 100644
index f85c0cc..0000000
--- a/vendor/github.com/gogo/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,1018 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2013, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(rune(i)), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- if fieldSet["type_url"] {
- return p.errorf(anyRepeatedlyUnpacked, "type_url")
- }
- if fieldSet["value"] {
- return p.errorf(anyRepeatedlyUnpacked, "value")
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- fieldSet["type_url"] = true
- fieldSet["value"] = true
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(Message)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- field := sv.Field(oop.Field)
- if !field.IsNil() {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
- }
- field.Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order. See b/28924776 for a time
- // this went wrong.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.MapKeyProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- case "value":
- if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.MapValProp); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- default:
- p.back()
- return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
- if len(props.CustomType) > 0 {
- if props.Repeated {
- t := reflect.TypeOf(v.Interface())
- if t.Kind() == reflect.Slice {
- tc := reflect.TypeOf(new(Marshaler))
- ok := t.Elem().Implements(tc.Elem())
- if ok {
- fv := v
- flen := fv.Len()
- if flen == fv.Cap() {
- nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
- reflect.Copy(nav, fv)
- fv.Set(nav)
- }
- fv.SetLen(flen + 1)
-
- // Read one.
- p.back()
- return p.readAny(fv.Index(flen), props)
- }
- }
- }
- if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
- custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
- err := custom.Unmarshal([]byte(tok.unquoted))
- if err != nil {
- return p.errorf("%v %v: %v", err, v.Type(), tok.value)
- }
- v.Set(reflect.ValueOf(custom))
- } else {
- custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
- err := custom.Unmarshal([]byte(tok.unquoted))
- if err != nil {
- return p.errorf("%v %v: %v", err, v.Type(), tok.value)
- }
- v.Set(reflect.Indirect(reflect.ValueOf(custom)))
- }
- return nil
- }
- if props.StdTime {
- fv := v
- p.back()
- props.StdTime = false
- tproto := ×tamp{}
- err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
- props.StdTime = true
- if err != nil {
- return err
- }
- tim, err := timestampFromProto(tproto)
- if err != nil {
- return err
- }
- if props.Repeated {
- t := reflect.TypeOf(v.Interface())
- if t.Kind() == reflect.Slice {
- if t.Elem().Kind() == reflect.Ptr {
- ts := fv.Interface().([]*time.Time)
- ts = append(ts, &tim)
- fv.Set(reflect.ValueOf(ts))
- return nil
- } else {
- ts := fv.Interface().([]time.Time)
- ts = append(ts, tim)
- fv.Set(reflect.ValueOf(ts))
- return nil
- }
- }
- }
- if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
- v.Set(reflect.ValueOf(&tim))
- } else {
- v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
- }
- return nil
- }
- if props.StdDuration {
- fv := v
- p.back()
- props.StdDuration = false
- dproto := &duration{}
- err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
- props.StdDuration = true
- if err != nil {
- return err
- }
- dur, err := durationFromProto(dproto)
- if err != nil {
- return err
- }
- if props.Repeated {
- t := reflect.TypeOf(v.Interface())
- if t.Kind() == reflect.Slice {
- if t.Elem().Kind() == reflect.Ptr {
- ds := fv.Interface().([]*time.Duration)
- ds = append(ds, &dur)
- fv.Set(reflect.ValueOf(ds))
- return nil
- } else {
- ds := fv.Interface().([]time.Duration)
- ds = append(ds, dur)
- fv.Set(reflect.ValueOf(ds))
- return nil
- }
- }
- }
- if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
- v.Set(reflect.ValueOf(&dur))
- } else {
- v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
- }
- return nil
- }
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- ntok := p.next()
- if ntok.err != nil {
- return ntok.err
- }
- if ntok.value == "]" {
- break
- }
- if ntok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", ntok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // true/1/t/True or false/f/0/False.
- switch tok.value {
- case "true", "1", "t", "True":
- fv.SetBool(true)
- return nil
- case "false", "0", "f", "False":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int8:
- if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
- fv.SetInt(x)
- return nil
- }
- case reflect.Int16:
- if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
- fv.SetInt(x)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint8:
- if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
- fv.SetUint(x)
- return nil
- }
- case reflect.Uint16:
- if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
- fv.SetUint(x)
- return nil
- }
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- return um.UnmarshalText([]byte(s))
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- return newTextParser(s).readStruct(v.Elem(), "")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go
deleted file mode 100644
index 9324f65..0000000
--- a/vendor/github.com/gogo/protobuf/proto/timestamp.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// This file implements operations on google.protobuf.Timestamp.
-
-import (
- "errors"
- "fmt"
- "time"
-)
-
-const (
- // Seconds field of the earliest valid Timestamp.
- // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- minValidSeconds = -62135596800
- // Seconds field just after the latest valid Timestamp.
- // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- maxValidSeconds = 253402300800
-)
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range
-// [0001-01-01, 10000-01-01) and has a Nanos field
-// in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes
-// the problem.
-//
-// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
-func validateTimestamp(ts *timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
-
-// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return value
-// is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-func timestampFromProto(ts *timestamp) (time.Time, error) {
- // Don't return the zero value on error, because corresponds to a valid
- // timestamp. Instead return whatever time.Unix gives us.
- var t time.Time
- if ts == nil {
- t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
- } else {
- t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
- }
- return t, validateTimestamp(ts)
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-func timestampProto(t time.Time) (*timestamp, error) {
- seconds := t.Unix()
- nanos := int32(t.Sub(time.Unix(seconds, 0)))
- ts := ×tamp{
- Seconds: seconds,
- Nanos: nanos,
- }
- if err := validateTimestamp(ts); err != nil {
- return nil, err
- }
- return ts, nil
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
deleted file mode 100644
index 38439fa..0000000
--- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2016, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "reflect"
- "time"
-)
-
-var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
-
-type timestamp struct {
- Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
-}
-
-func (m *timestamp) Reset() { *m = timestamp{} }
-func (*timestamp) ProtoMessage() {}
-func (*timestamp) String() string { return "timestamp<string>" }
-
-func init() {
- RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go
deleted file mode 100644
index b175d1b..0000000
--- a/vendor/github.com/gogo/protobuf/proto/wrappers.go
+++ /dev/null
@@ -1,1888 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "io"
- "reflect"
-)
-
-func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*float64)
- v := &float64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
- v := &float64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float64)
- v := &float64Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float64)
- v := &float64Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float64)
- v := &float64Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*float32)
- v := &float32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
- v := &float32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float32)
- v := &float32Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(float32)
- v := &float32Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*float32)
- v := &float32Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &float32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*int64)
- v := &int64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
- v := &int64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int64)
- v := &int64Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int64)
- v := &int64Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int64)
- v := &int64Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*uint64)
- v := &uint64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
- v := &uint64Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint64)
- v := &uint64Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint64)
- v := &uint64Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint64)
- v := &uint64Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint64Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*int32)
- v := &int32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
- v := &int32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int32)
- v := &int32Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(int32)
- v := &int32Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*int32)
- v := &int32Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &int32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*uint32)
- v := &uint32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
- v := &uint32Value{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint32)
- v := &uint32Value{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(uint32)
- v := &uint32Value{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*uint32)
- v := &uint32Value{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &uint32Value{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*bool)
- v := &boolValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
- v := &boolValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(bool)
- v := &boolValue{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(bool)
- v := &boolValue{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*bool)
- v := &boolValue{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &boolValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*string)
- v := &stringValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
- v := &stringValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(string)
- v := &stringValue{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(string)
- v := &stringValue{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*string)
- v := &stringValue{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &stringValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
- v := &bytesValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- if ptr.isNil() {
- return 0
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- return tagsize + SizeVarint(uint64(siz)) + siz
- }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- if ptr.isNil() {
- return b, nil
- }
- t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
- v := &bytesValue{*t}
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(len(buf)))
- b = append(b, buf...)
- return b, nil
- }
-}
-
-func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(u.typ)
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().([]byte)
- v := &bytesValue{t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(u.typ)
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().([]byte)
- v := &bytesValue{t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
- return func(ptr pointer, tagsize int) int {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- n := 0
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- n += siz + SizeVarint(uint64(siz)) + tagsize
- }
- return n
- },
- func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
- s := ptr.getSlice(reflect.PtrTo(u.typ))
- for i := 0; i < s.Len(); i++ {
- elem := s.Index(i)
- t := elem.Interface().(*[]byte)
- v := &bytesValue{*t}
- siz := Size(v)
- buf, err := Marshal(v)
- if err != nil {
- return nil, err
- }
- b = appendVarint(b, wiretag)
- b = appendVarint(b, uint64(siz))
- b = append(b, buf...)
- }
-
- return b, nil
- }
-}
-
-func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(sub.typ).Elem()
- s.Set(reflect.ValueOf(m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
- s.Set(reflect.ValueOf(&m.Value))
- return b[x:], nil
- }
-}
-
-func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(reflect.PtrTo(sub.typ))
- newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
-
-func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
- return func(b []byte, f pointer, w int) ([]byte, error) {
- if w != WireBytes {
- return nil, errInternalBadWireType
- }
- x, n := decodeVarint(b)
- if n == 0 {
- return nil, io.ErrUnexpectedEOF
- }
- b = b[n:]
- if x > uint64(len(b)) {
- return nil, io.ErrUnexpectedEOF
- }
- m := &bytesValue{}
- if err := Unmarshal(b[:x], m); err != nil {
- return nil, err
- }
- slice := f.getSlice(sub.typ)
- newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
- slice.Set(newSlice)
- return b[x:], nil
- }
-}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
deleted file mode 100644
index c1cf7bf..0000000
--- a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Protocol Buffers for Go with Gadgets
-//
-// Copyright (c) 2018, The GoGo Authors. All rights reserved.
-// http://github.com/gogo/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-type float64Value struct {
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *float64Value) Reset() { *m = float64Value{} }
-func (*float64Value) ProtoMessage() {}
-func (*float64Value) String() string { return "float64<string>" }
-
-type float32Value struct {
- Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *float32Value) Reset() { *m = float32Value{} }
-func (*float32Value) ProtoMessage() {}
-func (*float32Value) String() string { return "float32<string>" }
-
-type int64Value struct {
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *int64Value) Reset() { *m = int64Value{} }
-func (*int64Value) ProtoMessage() {}
-func (*int64Value) String() string { return "int64<string>" }
-
-type uint64Value struct {
- Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *uint64Value) Reset() { *m = uint64Value{} }
-func (*uint64Value) ProtoMessage() {}
-func (*uint64Value) String() string { return "uint64<string>" }
-
-type int32Value struct {
- Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *int32Value) Reset() { *m = int32Value{} }
-func (*int32Value) ProtoMessage() {}
-func (*int32Value) String() string { return "int32<string>" }
-
-type uint32Value struct {
- Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *uint32Value) Reset() { *m = uint32Value{} }
-func (*uint32Value) ProtoMessage() {}
-func (*uint32Value) String() string { return "uint32<string>" }
-
-type boolValue struct {
- Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *boolValue) Reset() { *m = boolValue{} }
-func (*boolValue) ProtoMessage() {}
-func (*boolValue) String() string { return "bool<string>" }
-
-type stringValue struct {
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *stringValue) Reset() { *m = stringValue{} }
-func (*stringValue) ProtoMessage() {}
-func (*stringValue) String() string { return "string<string>" }
-
-type bytesValue struct {
- Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *bytesValue) Reset() { *m = bytesValue{} }
-func (*bytesValue) ProtoMessage() {}
-func (*bytesValue) String() string { return "[]byte<string>" }
-
-func init() {
- RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
- RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
- RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
- RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
- RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
- RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
- RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
- RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
- RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
-}
diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap
deleted file mode 100644
index eaf8b2f..0000000
--- a/vendor/github.com/opencontainers/go-digest/.mailmap
+++ /dev/null
@@ -1,4 +0,0 @@
-Aaron Lehmann <aaronl@vitelus.com> <aaron.lehmann@docker.com>
-Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
-Stephen J Day <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
-Haibing Zhou <zhouhaibing089@gmail.com>
diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
deleted file mode 100644
index b6165f8..0000000
--- a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-version: 2
-
-requirements:
- signed_off_by:
- required: true
-
-always_pending:
- title_regex: '^WIP'
- explanation: 'Work in progress...'
-
-group_defaults:
- required: 2
- approve_by_comment:
- enabled: true
- approve_regex: '^LGTM'
- reject_regex: '^Rejected'
- reset_on_push:
- enabled: true
- author_approval:
- ignored: true
- conditions:
- branches:
- - master
-
-groups:
- go-digest:
- teams:
- - go-digest-maintainers
diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml
deleted file mode 100644
index 5775f88..0000000
--- a/vendor/github.com/opencontainers/go-digest/.travis.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-language: go
-go:
- - 1.12.x
- - 1.13.x
- - master
diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
deleted file mode 100644
index e4d962a..0000000
--- a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# Contributing to Docker open source projects
-
-Want to hack on this project? Awesome! Here are instructions to get you started.
-
-This project is a part of the [Docker](https://www.docker.com) project, and follows
-the same rules and principles. If you're already familiar with the way
-Docker does things, you'll feel right at home.
-
-Otherwise, go read Docker's
-[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
-[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
-[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
-[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
-
-For an in-depth description of our contribution process, visit the
-contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/)
-
-### Sign your work
-
-The sign-off is a simple line at the end of the explanation for the patch. Your
-signature certifies that you wrote the patch or otherwise have the right to pass
-it on as an open-source patch. The rules are pretty simple: if you can certify
-the below (from [developercertificate.org](http://developercertificate.org/)):
-
-```
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-1 Letterman Drive
-Suite D4700
-San Francisco, CA, 94129
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-```
-
-Then you just add a line to every git commit message:
-
- Signed-off-by: Joe Smith <joe.smith@email.com>
-
-Use your real name (sorry, no pseudonyms or anonymous contributions.)
-
-If you set your `user.name` and `user.email` git configs, you can sign your
-commit automatically with `git commit -s`.
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE
deleted file mode 100644
index 3ac8ab6..0000000
--- a/vendor/github.com/opencontainers/go-digest/LICENSE
+++ /dev/null
@@ -1,192 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- https://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2019, 2020 OCI Contributors
- Copyright 2016 Docker, Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs
deleted file mode 100644
index e26cd4f..0000000
--- a/vendor/github.com/opencontainers/go-digest/LICENSE.docs
+++ /dev/null
@@ -1,425 +0,0 @@
-Attribution-ShareAlike 4.0 International
-
-=======================================================================
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of
-Creative Commons public licenses does not create a lawyer-client or
-other relationship. Creative Commons makes its licenses and related
-information available on an "as-is" basis. Creative Commons gives no
-warranties regarding its licenses, any material licensed under their
-terms and conditions, or any related information. Creative Commons
-disclaims all liability for damages resulting from their use to the
-fullest extent possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share
-original works of authorship and other material subject to copyright
-and certain other rights specified in the public license below. The
-following considerations are for informational purposes only, are not
-exhaustive, and do not form part of our licenses.
-
- Considerations for licensors: Our public licenses are
- intended for use by those authorized to give the public
- permission to use material in ways otherwise restricted by
- copyright and certain other rights. Our licenses are
- irrevocable. Licensors should read and understand the terms
- and conditions of the license they choose before applying it.
- Licensors should also secure all rights necessary before
- applying our licenses so that the public can reuse the
- material as expected. Licensors should clearly mark any
- material not subject to the license. This includes other CC-
- licensed material, or material used under an exception or
- limitation to copyright. More considerations for licensors:
- wiki.creativecommons.org/Considerations_for_licensors
-
- Considerations for the public: By using one of our public
- licenses, a licensor grants the public permission to use the
- licensed material under specified terms and conditions. If
- the licensor's permission is not necessary for any reason--for
- example, because of any applicable exception or limitation to
- copyright--then that use is not regulated by the license. Our
- licenses grant only permissions under copyright and certain
- other rights that a licensor has authority to grant. Use of
- the licensed material may still be restricted for other
- reasons, including because others have copyright or other
- rights in the material. A licensor may make special requests,
- such as asking that all changes be marked or described.
- Although not required by our licenses, you are encouraged to
- respect those requests where reasonable. More_considerations
- for the public:
- wiki.creativecommons.org/Considerations_for_licensees
-
-=======================================================================
-
-Creative Commons Attribution-ShareAlike 4.0 International Public
-License
-
-By exercising the Licensed Rights (defined below), You accept and agree
-to be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You
-such rights in consideration of benefits the Licensor receives from
-making the Licensed Material available under these terms and
-conditions.
-
-
-Section 1 -- Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material
- and in which the Licensed Material is translated, altered,
- arranged, transformed, or otherwise modified in a manner requiring
- permission under the Copyright and Similar Rights held by the
- Licensor. For purposes of this Public License, where the Licensed
- Material is a musical work, performance, or sound recording,
- Adapted Material is always produced where the Licensed Material is
- synched in timed relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright
- and Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative
- Commons as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright
- Treaty adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or
- any other exception or limitation to Copyright and Similar Rights
- that applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name
- of a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database,
- or other material to which the Licensor applied this Public
- License.
-
- i. Licensed Rights means the rights granted to You subject to the
- terms and conditions of this Public License, which are limited to
- all Copyright and Similar Rights that apply to Your use of the
- Licensed Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such
- as reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the
- public may access the material from a place and at a time
- individually chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially
- equivalent rights anywhere in the world.
-
- m. You means the individual or entity exercising the Licensed Rights
- under this Public License. Your has a corresponding meaning.
-
-
-Section 2 -- Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License,
- the Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- a. reproduce and Share the Licensed Material, in whole or
- in part; and
-
- b. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with
- its terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section
- 6(a).
-
- 4. Media and formats; technical modifications allowed. The
- Licensor authorizes You to exercise the Licensed Rights in
- all media and formats whether now known or hereafter created,
- and to make technical modifications necessary to do so. The
- Licensor waives and/or agrees not to assert any right or
- authority to forbid You from making technical modifications
- necessary to exercise the Licensed Rights, including
- technical modifications necessary to circumvent Effective
- Technological Measures. For purposes of this Public License,
- simply making modifications authorized by this Section 2(a)
- (4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- a. Offer from the Licensor -- Licensed Material. Every
- recipient of the Licensed Material automatically
- receives an offer from the Licensor to exercise the
- Licensed Rights under the terms and conditions of this
- Public License.
-
- b. Additional offer from the Licensor -- Adapted Material.
- Every recipient of Adapted Material from You
- automatically receives an offer from the Licensor to
- exercise the Licensed Rights in the Adapted Material
- under the conditions of the Adapter's License You apply.
-
- c. No downstream restrictions. You may not offer or impose
- any additional or different terms or conditions on, or
- apply any Effective Technological Measures to, the
- Licensed Material if doing so restricts exercise of the
- Licensed Rights by any recipient of the Licensed
- Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You
- are, or that Your use of the Licensed Material is, connected
- with, or sponsored, endorsed, or granted official status by,
- the Licensor or others designated to receive attribution as
- provided in Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not
- licensed under this Public License, nor are publicity,
- privacy, and/or other similar personality rights; however, to
- the extent possible, the Licensor waives and/or agrees not to
- assert any such rights held by the Licensor to the limited
- extent necessary to allow You to exercise the Licensed
- Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this
- Public License.
-
- 3. To the extent possible, the Licensor waives any right to
- collect royalties from You for the exercise of the Licensed
- Rights, whether directly or through a collecting society
- under any voluntary or waivable statutory or compulsory
- licensing scheme. In all other cases the Licensor expressly
- reserves any right to collect such royalties.
-
-
-Section 3 -- License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified
- form), You must:
-
- a. retain the following if it is supplied by the Licensor
- with the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by
- the Licensor (including by pseudonym if
- designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of
- warranties;
-
- v. a URI or hyperlink to the Licensed Material to the
- extent reasonably practicable;
-
- b. indicate if You modified the Licensed Material and
- retain an indication of any previous modifications; and
-
- c. indicate the Licensed Material is licensed under this
- Public License, and include the text of, or the URI or
- hyperlink to, this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required
- information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable.
-
- b. ShareAlike.
-
- In addition to the conditions in Section 3(a), if You Share
- Adapted Material You produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-
-Section 4 -- Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that
-apply to Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right
- to extract, reuse, reproduce, and Share all or a substantial
- portion of the contents of the database;
-
- b. if You include all or a substantial portion of the database
- contents in a database in which You have Sui Generis Database
- Rights, then the database in which You have Sui Generis Database
- Rights (but not its individual contents) is Adapted Material,
-
- including for purposes of Section 3(b); and
- c. You must comply with the conditions in Section 3(a) if You Share
- all or a substantial portion of the contents of the database.
-
-For the avoidance of doubt, this Section 4 supplements and does not
-replace Your obligations under this Public License where the Licensed
-Rights include other Copyright and Similar Rights.
-
-
-Section 5 -- Disclaimer of Warranties and Limitation of Liability.
-
- a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
- EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
- AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
- ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
- IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
- WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
- PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
- ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
- KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
- ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
-
- b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
- TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
- NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
- INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
- COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
- USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
- ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
- DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
- IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent
- possible, most closely approximates an absolute disclaimer and
- waiver of all liability.
-
-
-Section 6 -- Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided
- it is cured within 30 days of Your discovery of the
- violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations
- of this Public License.
-
- c. For the avoidance of doubt, the Licensor may also offer the
- Licensed Material under separate terms or conditions or stop
- distributing the Licensed Material at any time; however, doing so
- will not terminate this Public License.
-
- d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
- License.
-
-
-Section 7 -- Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different
- terms or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-
-Section 8 -- Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and
- shall not be interpreted to, reduce, limit, restrict, or impose
- conditions on any use of the Licensed Material that could lawfully
- be made without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted
- as a limitation upon, or waiver of, any privileges and immunities
- that apply to the Licensor or You, including from the legal
- processes of any jurisdiction or authority.
-
-
-=======================================================================
-
-Creative Commons is not a party to its public licenses.
-Notwithstanding, Creative Commons may elect to apply one of its public
-licenses to material it publishes and in those instances will be
-considered the "Licensor." Except for the limited purpose of indicating
-that material is shared under a Creative Commons public license or as
-otherwise permitted by the Creative Commons policies published at
-creativecommons.org/policies, Creative Commons does not authorize the
-use of the trademark "Creative Commons" or any other trademark or logo
-of Creative Commons without its prior written consent including,
-without limitation, in connection with any unauthorized modifications
-to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For
-the avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS
deleted file mode 100644
index 843b1b2..0000000
--- a/vendor/github.com/opencontainers/go-digest/MAINTAINERS
+++ /dev/null
@@ -1,5 +0,0 @@
-Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
-Stephen Day <stevvooe@gmail.com> (@stevvooe)
-Vincent Batts <vbatts@hashbangbash.com> (@vbatts)
-Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> (@AkihiroSuda)
-Sebastiaan van Stijn <github@gone.nl> (@thaJeztah)
diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md
deleted file mode 100644
index a112872..0000000
--- a/vendor/github.com/opencontainers/go-digest/README.md
+++ /dev/null
@@ -1,96 +0,0 @@
-# go-digest
-
-[](https://godoc.org/github.com/opencontainers/go-digest) [](https://goreportcard.com/report/github.com/opencontainers/go-digest) [](https://travis-ci.org/opencontainers/go-digest)
-
-Common digest package used across the container ecosystem.
-
-Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information.
-
-# What is a digest?
-
-A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function).
-
-The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems:
-
-```go
-id := digest.FromBytes([]byte("my content"))
-```
-
-In the example above, the id can be used to uniquely identify the byte slice "my content".
-This allows two disparate applications to agree on a verifiable identifier without having to trust one another.
-
-An identifying digest can be verified, as follows:
-
-```go
-if id != digest.FromBytes([]byte("my content")) {
- return errors.New("the content has changed!")
-}
-```
-
-A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense:
-
-```go
-rd := getContent()
-verifier := id.Verifier()
-io.Copy(verifier, rd)
-
-if !verifier.Verified() {
- return errors.New("the content has changed!")
-}
-```
-
-Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system.
-
-# Usage
-
-While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package.
-
-1. Make sure to import the hash implementations into your application or the package will panic.
- You should have something like the following in the main (or other entrypoint) of your application:
-
- ```go
- import (
- _ "crypto/sha256"
- _ "crypto/sha512"
- )
- ```
- This may seem inconvenient but it allows you replace the hash
- implementations with others, such as https://github.com/stevvooe/resumable.
-
-2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input.
- While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application.
-
-3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests.
-
-# Stability
-
-The Go API, at this stage, is considered stable, unless otherwise noted.
-
-As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest).
-
-# Contributing
-
-This package is considered fairly complete.
-It has been in production in thousands (millions?) of deployments and is fairly battle-hardened.
-New additions will be met with skepticism.
-If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR.
-
-## Code of Conduct
-
-Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct].
-
-## Security
-
-If you find an issue, please follow the [security][security] protocol to report it.
-
-# Copyright and license
-
-Copyright © 2019, 2020 OCI Contributors
-Copyright © 2016 Docker, Inc.
-All rights reserved, except as follows.
-Code is released under the [Apache 2.0 license](LICENSE).
-This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs).
-You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/.
-
-[security]: https://github.com/opencontainers/org/blob/master/security
-[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md
diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go
deleted file mode 100644
index 490951d..0000000
--- a/vendor/github.com/opencontainers/go-digest/algorithm.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import (
- "crypto"
- "fmt"
- "hash"
- "io"
- "regexp"
-)
-
-// Algorithm identifies and implementation of a digester by an identifier.
-// Note the that this defines both the hash algorithm used and the string
-// encoding.
-type Algorithm string
-
-// supported digest types
-const (
- SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
- SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
- SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
-
- // Canonical is the primary digest algorithm used with the distribution
- // project. Other digests may be used but this one is the primary storage
- // digest.
- Canonical = SHA256
-)
-
-var (
- // TODO(stevvooe): Follow the pattern of the standard crypto package for
- // registration of digests. Effectively, we are a registerable set and
- // common symbol access.
-
- // algorithms maps values to hash.Hash implementations. Other algorithms
- // may be available but they cannot be calculated by the digest package.
- algorithms = map[Algorithm]crypto.Hash{
- SHA256: crypto.SHA256,
- SHA384: crypto.SHA384,
- SHA512: crypto.SHA512,
- }
-
- // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
- // Note that /A-F/ disallowed.
- anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
- SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
- SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
- SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
- }
-)
-
-// Available returns true if the digest type is available for use. If this
-// returns false, Digester and Hash will return nil.
-func (a Algorithm) Available() bool {
- h, ok := algorithms[a]
- if !ok {
- return false
- }
-
- // check availability of the hash, as well
- return h.Available()
-}
-
-func (a Algorithm) String() string {
- return string(a)
-}
-
-// Size returns number of bytes returned by the hash.
-func (a Algorithm) Size() int {
- h, ok := algorithms[a]
- if !ok {
- return 0
- }
- return h.Size()
-}
-
-// Set implemented to allow use of Algorithm as a command line flag.
-func (a *Algorithm) Set(value string) error {
- if value == "" {
- *a = Canonical
- } else {
- // just do a type conversion, support is queried with Available.
- *a = Algorithm(value)
- }
-
- if !a.Available() {
- return ErrDigestUnsupported
- }
-
- return nil
-}
-
-// Digester returns a new digester for the specified algorithm. If the algorithm
-// does not have a digester implementation, nil will be returned. This can be
-// checked by calling Available before calling Digester.
-func (a Algorithm) Digester() Digester {
- return &digester{
- alg: a,
- hash: a.Hash(),
- }
-}
-
-// Hash returns a new hash as used by the algorithm. If not available, the
-// method will panic. Check Algorithm.Available() before calling.
-func (a Algorithm) Hash() hash.Hash {
- if !a.Available() {
- // Empty algorithm string is invalid
- if a == "" {
- panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()"))
- }
-
- // NOTE(stevvooe): A missing hash is usually a programming error that
- // must be resolved at compile time. We don't import in the digest
- // package to allow users to choose their hash implementation (such as
- // when using stevvooe/resumable or a hardware accelerated package).
- //
- // Applications that may want to resolve the hash at runtime should
- // call Algorithm.Available before call Algorithm.Hash().
- panic(fmt.Sprintf("%v not available (make sure it is imported)", a))
- }
-
- return algorithms[a].New()
-}
-
-// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into
-// the encoded portion of the digest.
-func (a Algorithm) Encode(d []byte) string {
- // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we
- // add support for back registration, we can modify this accordingly.
- return fmt.Sprintf("%x", d)
-}
-
-// FromReader returns the digest of the reader using the algorithm.
-func (a Algorithm) FromReader(rd io.Reader) (Digest, error) {
- digester := a.Digester()
-
- if _, err := io.Copy(digester.Hash(), rd); err != nil {
- return "", err
- }
-
- return digester.Digest(), nil
-}
-
-// FromBytes digests the input and returns a Digest.
-func (a Algorithm) FromBytes(p []byte) Digest {
- digester := a.Digester()
-
- if _, err := digester.Hash().Write(p); err != nil {
- // Writes to a Hash should never fail. None of the existing
- // hash implementations in the stdlib or hashes vendored
- // here can return errors from Write. Having a panic in this
- // condition instead of having FromBytes return an error value
- // avoids unnecessary error handling paths in all callers.
- panic("write to hash function returned error: " + err.Error())
- }
-
- return digester.Digest()
-}
-
-// FromString digests the string input and returns a Digest.
-func (a Algorithm) FromString(s string) Digest {
- return a.FromBytes([]byte(s))
-}
-
-// Validate validates the encoded portion string
-func (a Algorithm) Validate(encoded string) error {
- r, ok := anchoredEncodedRegexps[a]
- if !ok {
- return ErrDigestUnsupported
- }
- // Digests much always be hex-encoded, ensuring that their hex portion will
- // always be size*2
- if a.Size()*2 != len(encoded) {
- return ErrDigestInvalidLength
- }
- if r.MatchString(encoded) {
- return nil
- }
- return ErrDigestInvalidFormat
-}
diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go
deleted file mode 100644
index 518b5e7..0000000
--- a/vendor/github.com/opencontainers/go-digest/digest.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import (
- "fmt"
- "hash"
- "io"
- "regexp"
- "strings"
-)
-
-// Digest allows simple protection of hex formatted digest strings, prefixed
-// by their algorithm. Strings of type Digest have some guarantee of being in
-// the correct format and it provides quick access to the components of a
-// digest string.
-//
-// The following is an example of the contents of Digest types:
-//
-// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
-//
-// This allows to abstract the digest behind this type and work only in those
-// terms.
-type Digest string
-
-// NewDigest returns a Digest from alg and a hash.Hash object.
-func NewDigest(alg Algorithm, h hash.Hash) Digest {
- return NewDigestFromBytes(alg, h.Sum(nil))
-}
-
-// NewDigestFromBytes returns a new digest from the byte contents of p.
-// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...)
-// functions. This is also useful for rebuilding digests from binary
-// serializations.
-func NewDigestFromBytes(alg Algorithm, p []byte) Digest {
- return NewDigestFromEncoded(alg, alg.Encode(p))
-}
-
-// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded.
-func NewDigestFromHex(alg, hex string) Digest {
- return NewDigestFromEncoded(Algorithm(alg), hex)
-}
-
-// NewDigestFromEncoded returns a Digest from alg and the encoded digest.
-func NewDigestFromEncoded(alg Algorithm, encoded string) Digest {
- return Digest(fmt.Sprintf("%s:%s", alg, encoded))
-}
-
-// DigestRegexp matches valid digest types.
-var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`)
-
-// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match.
-var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`)
-
-var (
- // ErrDigestInvalidFormat returned when digest format invalid.
- ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
-
- // ErrDigestInvalidLength returned when digest has invalid length.
- ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
-
- // ErrDigestUnsupported returned when the digest algorithm is unsupported.
- ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
-)
-
-// Parse parses s and returns the validated digest object. An error will
-// be returned if the format is invalid.
-func Parse(s string) (Digest, error) {
- d := Digest(s)
- return d, d.Validate()
-}
-
-// FromReader consumes the content of rd until io.EOF, returning canonical digest.
-func FromReader(rd io.Reader) (Digest, error) {
- return Canonical.FromReader(rd)
-}
-
-// FromBytes digests the input and returns a Digest.
-func FromBytes(p []byte) Digest {
- return Canonical.FromBytes(p)
-}
-
-// FromString digests the input and returns a Digest.
-func FromString(s string) Digest {
- return Canonical.FromString(s)
-}
-
-// Validate checks that the contents of d is a valid digest, returning an
-// error if not.
-func (d Digest) Validate() error {
- s := string(d)
- i := strings.Index(s, ":")
- if i <= 0 || i+1 == len(s) {
- return ErrDigestInvalidFormat
- }
- algorithm, encoded := Algorithm(s[:i]), s[i+1:]
- if !algorithm.Available() {
- if !DigestRegexpAnchored.MatchString(s) {
- return ErrDigestInvalidFormat
- }
- return ErrDigestUnsupported
- }
- return algorithm.Validate(encoded)
-}
-
-// Algorithm returns the algorithm portion of the digest. This will panic if
-// the underlying digest is not in a valid format.
-func (d Digest) Algorithm() Algorithm {
- return Algorithm(d[:d.sepIndex()])
-}
-
-// Verifier returns a writer object that can be used to verify a stream of
-// content against the digest. If the digest is invalid, the method will panic.
-func (d Digest) Verifier() Verifier {
- return hashVerifier{
- hash: d.Algorithm().Hash(),
- digest: d,
- }
-}
-
-// Encoded returns the encoded portion of the digest. This will panic if the
-// underlying digest is not in a valid format.
-func (d Digest) Encoded() string {
- return string(d[d.sepIndex()+1:])
-}
-
-// Hex is deprecated. Please use Digest.Encoded.
-func (d Digest) Hex() string {
- return d.Encoded()
-}
-
-func (d Digest) String() string {
- return string(d)
-}
-
-func (d Digest) sepIndex() int {
- i := strings.Index(string(d), ":")
-
- if i < 0 {
- panic(fmt.Sprintf("no ':' separator in digest %q", d))
- }
-
- return i
-}
diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go
deleted file mode 100644
index ede9077..0000000
--- a/vendor/github.com/opencontainers/go-digest/digester.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import "hash"
-
-// Digester calculates the digest of written data. Writes should go directly
-// to the return value of Hash, while calling Digest will return the current
-// value of the digest.
-type Digester interface {
- Hash() hash.Hash // provides direct access to underlying hash instance.
- Digest() Digest
-}
-
-// digester provides a simple digester definition that embeds a hasher.
-type digester struct {
- alg Algorithm
- hash hash.Hash
-}
-
-func (d *digester) Hash() hash.Hash {
- return d.hash
-}
-
-func (d *digester) Digest() Digest {
- return NewDigest(d.alg, d.hash)
-}
diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go
deleted file mode 100644
index 83d3a93..0000000
--- a/vendor/github.com/opencontainers/go-digest/doc.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package digest provides a generalized type to opaquely represent message
-// digests and their operations within the registry. The Digest type is
-// designed to serve as a flexible identifier in a content-addressable system.
-// More importantly, it provides tools and wrappers to work with
-// hash.Hash-based digests with little effort.
-//
-// Basics
-//
-// The format of a digest is simply a string with two parts, dubbed the
-// "algorithm" and the "digest", separated by a colon:
-//
-// <algorithm>:<digest>
-//
-// An example of a sha256 digest representation follows:
-//
-// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
-//
-// The "algorithm" portion defines both the hashing algorithm used to calculate
-// the digest and the encoding of the resulting digest, which defaults to "hex"
-// if not otherwise specified. Currently, all supported algorithms have their
-// digests encoded in hex strings.
-//
-// In the example above, the string "sha256" is the algorithm and the hex bytes
-// are the "digest".
-//
-// Because the Digest type is simply a string, once a valid Digest is
-// obtained, comparisons are cheap, quick and simple to express with the
-// standard equality operator.
-//
-// Verification
-//
-// The main benefit of using the Digest type is simple verification against a
-// given digest. The Verifier interface, modeled after the stdlib hash.Hash
-// interface, provides a common write sink for digest verification. After
-// writing is complete, calling the Verifier.Verified method will indicate
-// whether or not the stream of bytes matches the target digest.
-//
-// Missing Features
-//
-// In addition to the above, we intend to add the following features to this
-// package:
-//
-// 1. A Digester type that supports write sink digest calculation.
-//
-// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
-//
-package digest
diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go
deleted file mode 100644
index afef506..0000000
--- a/vendor/github.com/opencontainers/go-digest/verifiers.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019, 2020 OCI Contributors
-// Copyright 2017 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package digest
-
-import (
- "hash"
- "io"
-)
-
-// Verifier presents a general verification interface to be used with message
-// digests and other byte stream verifications. Users instantiate a Verifier
-// from one of the various methods, write the data under test to it then check
-// the result with the Verified method.
-type Verifier interface {
- io.Writer
-
- // Verified will return true if the content written to Verifier matches
- // the digest.
- Verified() bool
-}
-
-type hashVerifier struct {
- digest Digest
- hash hash.Hash
-}
-
-func (hv hashVerifier) Write(p []byte) (n int, err error) {
- return hv.hash.Write(p)
-}
-
-func (hv hashVerifier) Verified() bool {
- return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash)
-}
diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE
deleted file mode 100644
index 9fdc20f..0000000
--- a/vendor/github.com/opencontainers/image-spec/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- Copyright 2016 The Linux Foundation.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
deleted file mode 100644
index 35d8108..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-const (
- // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339).
- AnnotationCreated = "org.opencontainers.image.created"
-
- // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string).
- AnnotationAuthors = "org.opencontainers.image.authors"
-
- // AnnotationURL is the annotation key for the URL to find more information on the image.
- AnnotationURL = "org.opencontainers.image.url"
-
- // AnnotationDocumentation is the annotation key for the URL to get documentation on the image.
- AnnotationDocumentation = "org.opencontainers.image.documentation"
-
- // AnnotationSource is the annotation key for the URL to get source code for building the image.
- AnnotationSource = "org.opencontainers.image.source"
-
- // AnnotationVersion is the annotation key for the version of the packaged software.
- // The version MAY match a label or tag in the source code repository.
- // The version MAY be Semantic versioning-compatible.
- AnnotationVersion = "org.opencontainers.image.version"
-
- // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software.
- AnnotationRevision = "org.opencontainers.image.revision"
-
- // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual.
- AnnotationVendor = "org.opencontainers.image.vendor"
-
- // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression.
- AnnotationLicenses = "org.opencontainers.image.licenses"
-
- // AnnotationRefName is the annotation key for the name of the reference for a target.
- // SHOULD only be considered valid when on descriptors on `index.json` within image layout.
- AnnotationRefName = "org.opencontainers.image.ref.name"
-
- // AnnotationTitle is the annotation key for the human-readable title of the image.
- AnnotationTitle = "org.opencontainers.image.title"
-
- // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
- AnnotationDescription = "org.opencontainers.image.description"
-)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
deleted file mode 100644
index fe799bd..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-import (
- "time"
-
- digest "github.com/opencontainers/go-digest"
-)
-
-// ImageConfig defines the execution parameters which should be used as a base when running a container using an image.
-type ImageConfig struct {
- // User defines the username or UID which the process in the container should run as.
- User string `json:"User,omitempty"`
-
- // ExposedPorts a set of ports to expose from a container running this image.
- ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"`
-
- // Env is a list of environment variables to be used in a container.
- Env []string `json:"Env,omitempty"`
-
- // Entrypoint defines a list of arguments to use as the command to execute when the container starts.
- Entrypoint []string `json:"Entrypoint,omitempty"`
-
- // Cmd defines the default arguments to the entrypoint of the container.
- Cmd []string `json:"Cmd,omitempty"`
-
- // Volumes is a set of directories describing where the process is likely write data specific to a container instance.
- Volumes map[string]struct{} `json:"Volumes,omitempty"`
-
- // WorkingDir sets the current working directory of the entrypoint process in the container.
- WorkingDir string `json:"WorkingDir,omitempty"`
-
- // Labels contains arbitrary metadata for the container.
- Labels map[string]string `json:"Labels,omitempty"`
-
- // StopSignal contains the system call signal that will be sent to the container to exit.
- StopSignal string `json:"StopSignal,omitempty"`
-}
-
-// RootFS describes a layer content addresses
-type RootFS struct {
- // Type is the type of the rootfs.
- Type string `json:"type"`
-
- // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most.
- DiffIDs []digest.Digest `json:"diff_ids"`
-}
-
-// History describes the history of a layer.
-type History struct {
- // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6.
- Created *time.Time `json:"created,omitempty"`
-
- // CreatedBy is the command which created the layer.
- CreatedBy string `json:"created_by,omitempty"`
-
- // Author is the author of the build point.
- Author string `json:"author,omitempty"`
-
- // Comment is a custom message set when creating the layer.
- Comment string `json:"comment,omitempty"`
-
- // EmptyLayer is used to mark if the history item created a filesystem diff.
- EmptyLayer bool `json:"empty_layer,omitempty"`
-}
-
-// Image is the JSON structure which describes some basic information about the image.
-// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
-type Image struct {
- // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6.
- Created *time.Time `json:"created,omitempty"`
-
- // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
- Author string `json:"author,omitempty"`
-
- // Architecture is the CPU architecture which the binaries in this image are built to run on.
- Architecture string `json:"architecture"`
-
- // OS is the name of the operating system which the image is built to run on.
- OS string `json:"os"`
-
- // Config defines the execution parameters which should be used as a base when running a container using the image.
- Config ImageConfig `json:"config,omitempty"`
-
- // RootFS references the layer content addresses used by the image.
- RootFS RootFS `json:"rootfs"`
-
- // History describes the history of each layer.
- History []History `json:"history,omitempty"`
-}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
deleted file mode 100644
index 6e442a0..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-import digest "github.com/opencontainers/go-digest"
-
-// Descriptor describes the disposition of targeted content.
-// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype
-// when marshalled to JSON.
-type Descriptor struct {
- // MediaType is the media type of the object this schema refers to.
- MediaType string `json:"mediaType,omitempty"`
-
- // Digest is the digest of the targeted content.
- Digest digest.Digest `json:"digest"`
-
- // Size specifies the size in bytes of the blob.
- Size int64 `json:"size"`
-
- // URLs specifies a list of URLs from which this object MAY be downloaded
- URLs []string `json:"urls,omitempty"`
-
- // Annotations contains arbitrary metadata relating to the targeted content.
- Annotations map[string]string `json:"annotations,omitempty"`
-
- // Platform describes the platform which the image in the manifest runs on.
- //
- // This should only be used when referring to a manifest.
- Platform *Platform `json:"platform,omitempty"`
-}
-
-// Platform describes the platform which the image in the manifest runs on.
-type Platform struct {
- // Architecture field specifies the CPU architecture, for example
- // `amd64` or `ppc64`.
- Architecture string `json:"architecture"`
-
- // OS specifies the operating system, for example `linux` or `windows`.
- OS string `json:"os"`
-
- // OSVersion is an optional field specifying the operating system
- // version, for example on Windows `10.0.14393.1066`.
- OSVersion string `json:"os.version,omitempty"`
-
- // OSFeatures is an optional field specifying an array of strings,
- // each listing a required OS feature (for example on Windows `win32k`).
- OSFeatures []string `json:"os.features,omitempty"`
-
- // Variant is an optional field specifying a variant of the CPU, for
- // example `v7` to specify ARMv7 when architecture is `arm`.
- Variant string `json:"variant,omitempty"`
-}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
deleted file mode 100644
index 82da6c6..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-import "github.com/opencontainers/image-spec/specs-go"
-
-// Index references manifests for various platforms.
-// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON.
-type Index struct {
- specs.Versioned
-
- // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json`
- MediaType string `json:"mediaType,omitempty"`
-
- // Manifests references platform specific manifests.
- Manifests []Descriptor `json:"manifests"`
-
- // Annotations contains arbitrary metadata for the image index.
- Annotations map[string]string `json:"annotations,omitempty"`
-}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go
deleted file mode 100644
index fc79e9e..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-const (
- // ImageLayoutFile is the file name of oci image layout file
- ImageLayoutFile = "oci-layout"
- // ImageLayoutVersion is the version of ImageLayout
- ImageLayoutVersion = "1.0.0"
-)
-
-// ImageLayout is the structure in the "oci-layout" file, found in the root
-// of an OCI Image-layout directory.
-type ImageLayout struct {
- Version string `json:"imageLayoutVersion"`
-}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
deleted file mode 100644
index d72d15c..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-import "github.com/opencontainers/image-spec/specs-go"
-
-// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON.
-type Manifest struct {
- specs.Versioned
-
- // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
- MediaType string `json:"mediaType,omitempty"`
-
- // Config references a configuration object for a container, by digest.
- // The referenced configuration object is a JSON blob that the runtime uses to set up the container.
- Config Descriptor `json:"config"`
-
- // Layers is an indexed list of layers referenced by the manifest.
- Layers []Descriptor `json:"layers"`
-
- // Annotations contains arbitrary metadata for the image manifest.
- Annotations map[string]string `json:"annotations,omitempty"`
-}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
deleted file mode 100644
index bad7bb9..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v1
-
-const (
- // MediaTypeDescriptor specifies the media type for a content descriptor.
- MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json"
-
- // MediaTypeLayoutHeader specifies the media type for the oci-layout.
- MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json"
-
- // MediaTypeImageManifest specifies the media type for an image manifest.
- MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json"
-
- // MediaTypeImageIndex specifies the media type for an image index.
- MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json"
-
- // MediaTypeImageLayer is the media type used for layers referenced by the manifest.
- MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar"
-
- // MediaTypeImageLayerGzip is the media type used for gzipped layers
- // referenced by the manifest.
- MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip"
-
- // MediaTypeImageLayerNonDistributable is the media type for layers referenced by
- // the manifest but with distribution restrictions.
- MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
-
- // MediaTypeImageLayerNonDistributableGzip is the media type for
- // gzipped layers referenced by the manifest but with distribution
- // restrictions.
- MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
-
- // MediaTypeImageConfig specifies the media type for the image configuration.
- MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
-)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
deleted file mode 100644
index 0d9543f..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package specs
-
-import "fmt"
-
-const (
- // VersionMajor is for an API incompatible changes
- VersionMajor = 1
- // VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 0
- // VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 2
-
- // VersionDev indicates development branch. Releases will be empty string.
- VersionDev = ""
-)
-
-// Version is the specification version that the package types support.
-var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go
deleted file mode 100644
index 58a1510..0000000
--- a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 The Linux Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package specs
-
-// Versioned provides a struct with the manifest schemaVersion and mediaType.
-// Incoming content with unknown schema version can be decoded against this
-// struct to check the version.
-type Versioned struct {
- // SchemaVersion is the image manifest schema that this image follows
- SchemaVersion int `json:"schemaVersion"`
-}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
deleted file mode 100644
index 1fb13ab..0000000
--- a/vendor/github.com/sirupsen/logrus/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-logrus
-vendor
-
-.idea/
diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml
deleted file mode 100644
index 65dc285..0000000
--- a/vendor/github.com/sirupsen/logrus/.golangci.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-run:
- # do not run on test files yet
- tests: false
-
-# all available settings of specific linters
-linters-settings:
- errcheck:
- # report about not checking of errors in type assetions: `a := b.(MyStruct)`;
- # default is false: such cases aren't reported by default.
- check-type-assertions: false
-
- # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
- # default is false: such cases aren't reported by default.
- check-blank: false
-
- lll:
- line-length: 100
- tab-width: 4
-
- prealloc:
- simple: false
- range-loops: false
- for-loops: false
-
- whitespace:
- multi-if: false # Enforces newlines (or comments) after every multi-line if statement
- multi-func: false # Enforces newlines (or comments) after every multi-line function signature
-
-linters:
- enable:
- - megacheck
- - govet
- disable:
- - maligned
- - prealloc
- disable-all: false
- presets:
- - bugs
- - unused
- fast: false
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
deleted file mode 100644
index c1dbd5a..0000000
--- a/vendor/github.com/sirupsen/logrus/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go_import_path: github.com/sirupsen/logrus
-git:
- depth: 1
-env:
- - GO111MODULE=on
-go: 1.15.x
-os: linux
-install:
- - ./travis/install.sh
-script:
- - cd ci
- - go run mage.go -v -w ../ crossBuild
- - go run mage.go -v -w ../ lint
- - go run mage.go -v -w ../ test
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
deleted file mode 100644
index 7567f61..0000000
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ /dev/null
@@ -1,259 +0,0 @@
-# 1.8.1
-Code quality:
- * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
- * improve timestamp format documentation
-
-Fixes:
- * fix race condition on logger hooks
-
-
-# 1.8.0
-
-Correct versioning number replacing v1.7.1.
-
-# 1.7.1
-
-Beware this release has introduced a new public API and its semver is therefore incorrect.
-
-Code quality:
- * use go 1.15 in travis
- * use magefile as task runner
-
-Fixes:
- * small fixes about new go 1.13 error formatting system
- * Fix for long time race condiction with mutating data hooks
-
-Features:
- * build support for zos
-
-# 1.7.0
-Fixes:
- * the dependency toward a windows terminal library has been removed
-
-Features:
- * a new buffer pool management API has been added
- * a set of `<LogLevel>Fn()` functions have been added
-
-# 1.6.0
-Fixes:
- * end of line cleanup
- * revert the entry concurrency bug fix whic leads to deadlock under some circumstances
- * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14
-
-Features:
- * add an option to the `TextFormatter` to completely disable fields quoting
-
-# 1.5.0
-Code quality:
- * add golangci linter run on travis
-
-Fixes:
- * add mutex for hooks concurrent access on `Entry` data
- * caller function field for go1.14
- * fix build issue for gopherjs target
-
-Feature:
- * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level
- * add a `DisableHTMLEscape` option in the `JSONFormatter`
- * add `ForceQuote` and `PadLevelText` options in the `TextFormatter`
-
-# 1.4.2
- * Fixes build break for plan9, nacl, solaris
-# 1.4.1
-This new release introduces:
- * Enhance TextFormatter to not print caller information when they are empty (#944)
- * Remove dependency on golang.org/x/crypto (#932, #943)
-
-Fixes:
- * Fix Entry.WithContext method to return a copy of the initial entry (#941)
-
-# 1.4.0
-This new release introduces:
- * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
- * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911)
- * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
-
-Fixes:
- * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
- * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
- * Fix infinite recursion on unknown `Level.String()` (#907)
- * Fix race condition in `getCaller` (#916).
-
-
-# 1.3.0
-This new release introduces:
- * Log, Logf, Logln functions for Logger and Entry that take a Level
-
-Fixes:
- * Building prometheus node_exporter on AIX (#840)
- * Race condition in TextFormatter (#468)
- * Travis CI import path (#868)
- * Remove coloured output on Windows (#862)
- * Pointer to func as field in JSONFormatter (#870)
- * Properly marshal Levels (#873)
-
-# 1.2.0
-This new release introduces:
- * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
- * A new trace level named `Trace` whose level is below `Debug`
- * A configurable exit function to be called upon a Fatal trace
- * The `Level` object now implements `encoding.TextUnmarshaler` interface
-
-# 1.1.1
-This is a bug fix release.
- * fix the build break on Solaris
- * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
-
-# 1.1.0
-This new release introduces:
- * several fixes:
- * a fix for a race condition on entry formatting
- * proper cleanup of previously used entries before putting them back in the pool
- * the extra new line at the end of message in text formatter has been removed
- * a new global public API to check if a level is activated: IsLevelEnabled
- * the following methods have been added to the Logger object
- * IsLevelEnabled
- * SetFormatter
- * SetOutput
- * ReplaceHooks
- * introduction of go module
- * an indent configuration for the json formatter
- * output colour support for windows
- * the field sort function is now configurable for text formatter
- * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
-
-# 1.0.6
-
-This new release introduces:
- * a new api WithTime which allows to easily force the time of the log entry
- which is mostly useful for logger wrapper
- * a fix reverting the immutability of the entry given as parameter to the hooks
- a new configuration field of the json formatter in order to put all the fields
- in a nested dictionnary
- * a new SetOutput method in the Logger
- * a new configuration of the textformatter to configure the name of the default keys
- * a new configuration of the text formatter to disable the level truncation
-
-# 1.0.5
-
-* Fix hooks race (#707)
-* Fix panic deadlock (#695)
-
-# 1.0.4
-
-* Fix race when adding hooks (#612)
-* Fix terminal check in AppEngine (#635)
-
-# 1.0.3
-
-* Replace example files with testable examples
-
-# 1.0.2
-
-* bug: quote non-string values in text formatter (#583)
-* Make (*Logger) SetLevel a public method
-
-# 1.0.1
-
-* bug: fix escaping in text formatter (#575)
-
-# 1.0.0
-
-* Officially changed name to lower-case
-* bug: colors on Windows 10 (#541)
-* bug: fix race in accessing level (#512)
-
-# 0.11.5
-
-* feature: add writer and writerlevel to entry (#372)
-
-# 0.11.4
-
-* bug: fix undefined variable on solaris (#493)
-
-# 0.11.3
-
-* formatter: configure quoting of empty values (#484)
-* formatter: configure quoting character (default is `"`) (#484)
-* bug: fix not importing io correctly in non-linux environments (#481)
-
-# 0.11.2
-
-* bug: fix windows terminal detection (#476)
-
-# 0.11.1
-
-* bug: fix tty detection with custom out (#471)
-
-# 0.11.0
-
-* performance: Use bufferpool to allocate (#370)
-* terminal: terminal detection for app-engine (#343)
-* feature: exit handler (#375)
-
-# 0.10.0
-
-* feature: Add a test hook (#180)
-* feature: `ParseLevel` is now case-insensitive (#326)
-* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
-* performance: avoid re-allocations on `WithFields` (#335)
-
-# 0.9.0
-
-* logrus/text_formatter: don't emit empty msg
-* logrus/hooks/airbrake: move out of main repository
-* logrus/hooks/sentry: move out of main repository
-* logrus/hooks/papertrail: move out of main repository
-* logrus/hooks/bugsnag: move out of main repository
-* logrus/core: run tests with `-race`
-* logrus/core: detect TTY based on `stderr`
-* logrus/core: support `WithError` on logger
-* logrus/core: Solaris support
-
-# 0.8.7
-
-* logrus/core: fix possible race (#216)
-* logrus/doc: small typo fixes and doc improvements
-
-
-# 0.8.6
-
-* hooks/raven: allow passing an initialized client
-
-# 0.8.5
-
-* logrus/core: revert #208
-
-# 0.8.4
-
-* formatter/text: fix data race (#218)
-
-# 0.8.3
-
-* logrus/core: fix entry log level (#208)
-* logrus/core: improve performance of text formatter by 40%
-* logrus/core: expose `LevelHooks` type
-* logrus/core: add support for DragonflyBSD and NetBSD
-* formatter/text: print structs more verbosely
-
-# 0.8.2
-
-* logrus: fix more Fatal family functions
-
-# 0.8.1
-
-* logrus: fix not exiting on `Fatalf` and `Fatalln`
-
-# 0.8.0
-
-* logrus: defaults to stderr instead of stdout
-* hooks/sentry: add special field for `*http.Request`
-* formatter/text: ignore Windows for colors
-
-# 0.7.3
-
-* formatter/\*: allow configuration of timestamp layout
-
-# 0.7.2
-
-* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
deleted file mode 100644
index f090cb4..0000000
--- a/vendor/github.com/sirupsen/logrus/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Simon Eskildsen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
deleted file mode 100644
index 5152b6a..0000000
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ /dev/null
@@ -1,513 +0,0 @@
-# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
-
-Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger.
-
-**Logrus is in maintenance-mode.** We will not be introducing new features. It's
-simply too hard to do in a way that won't break many people's projects, which is
-the last thing you want from your Logging library (again...).
-
-This does not mean Logrus is dead. Logrus will continue to be maintained for
-security, (backwards compatible) bug fixes, and performance (where we are
-limited by the interface).
-
-I believe Logrus' biggest contribution is to have played a part in today's
-widespread use of structured logging in Golang. There doesn't seem to be a
-reason to do a major, breaking iteration into Logrus V2, since the fantastic Go
-community has built those independently. Many fantastic alternatives have sprung
-up. Logrus would look like those, had it been re-designed with what we know
-about structured logging in Go today. Check out, for example,
-[Zerolog][zerolog], [Zap][zap], and [Apex][apex].
-
-[zerolog]: https://github.com/rs/zerolog
-[zap]: https://github.com/uber-go/zap
-[apex]: https://github.com/apex/log
-
-**Seeing weird case-sensitive problems?** It's in the past been possible to
-import Logrus as both upper- and lower-case. Due to the Go package environment,
-this caused issues in the community and we needed a standard. Some environments
-experienced problems with the upper-case variant, so the lower-case was decided.
-Everything using `logrus` will need to use the lower-case:
-`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
-
-To fix Glide, see [these
-comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
-For an in-depth explanation of the casing issue, see [this
-comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
-
-Nicely color-coded in development (when a TTY is attached, otherwise just
-plain text):
-
-
-
-With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
-or Splunk:
-
-```json
-{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
-ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
-
-{"level":"warning","msg":"The group's number increased tremendously!",
-"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
-"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
-"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
-
-{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
-"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
-```
-
-With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
-attached, the output is compatible with the
-[logfmt](http://godoc.org/github.com/kr/logfmt) format:
-
-```text
-time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
-time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
-time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
-time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
-time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
-time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
-```
-To ensure this behaviour even if a TTY is attached, set your formatter as follows:
-
-```go
- log.SetFormatter(&log.TextFormatter{
- DisableColors: true,
- FullTimestamp: true,
- })
-```
-
-#### Logging Method Name
-
-If you wish to add the calling method as a field, instruct the logger via:
-```go
-log.SetReportCaller(true)
-```
-This adds the caller as 'method' like so:
-
-```json
-{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
-"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
-```
-
-```text
-time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
-```
-Note that this does add measurable overhead - the cost will depend on the version of Go, but is
-between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
-environment via benchmarks:
-```
-go test -bench=.*CallerTracing
-```
-
-
-#### Case-sensitivity
-
-The organization's name was changed to lower-case--and this will not be changed
-back. If you are getting import conflicts due to case sensitivity, please use
-the lower-case import: `github.com/sirupsen/logrus`.
-
-#### Example
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
-```go
-package main
-
-import (
- log "github.com/sirupsen/logrus"
-)
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- }).Info("A walrus appears")
-}
-```
-
-Note that it's completely api-compatible with the stdlib logger, so you can
-replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
-and you'll now have the flexibility of Logrus. You can customize it all you
-want:
-
-```go
-package main
-
-import (
- "os"
- log "github.com/sirupsen/logrus"
-)
-
-func init() {
- // Log as JSON instead of the default ASCII formatter.
- log.SetFormatter(&log.JSONFormatter{})
-
- // Output to stdout instead of the default stderr
- // Can be any io.Writer, see below for File example
- log.SetOutput(os.Stdout)
-
- // Only log the warning severity or above.
- log.SetLevel(log.WarnLevel)
-}
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 100,
- }).Fatal("The ice breaks!")
-
- // A common pattern is to re-use fields between logging statements by re-using
- // the logrus.Entry returned from WithFields()
- contextLogger := log.WithFields(log.Fields{
- "common": "this is a common field",
- "other": "I also should be logged always",
- })
-
- contextLogger.Info("I'll be logged with common and other field")
- contextLogger.Info("Me too")
-}
-```
-
-For more advanced usage such as logging to multiple locations from the same
-application, you can also create an instance of the `logrus` Logger:
-
-```go
-package main
-
-import (
- "os"
- "github.com/sirupsen/logrus"
-)
-
-// Create a new instance of the logger. You can have any number of instances.
-var log = logrus.New()
-
-func main() {
- // The API for setting attributes is a little different than the package level
- // exported logger. See Godoc.
- log.Out = os.Stdout
-
- // You could set this to any `io.Writer` such as a file
- // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
- // if err == nil {
- // log.Out = file
- // } else {
- // log.Info("Failed to log to file, using default stderr")
- // }
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-}
-```
-
-#### Fields
-
-Logrus encourages careful, structured logging through logging fields instead of
-long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
-to send event %s to topic %s with key %d")`, you should log the much more
-discoverable:
-
-```go
-log.WithFields(log.Fields{
- "event": event,
- "topic": topic,
- "key": key,
-}).Fatal("Failed to send event")
-```
-
-We've found this API forces you to think about logging in a way that produces
-much more useful logging messages. We've been in countless situations where just
-a single added field to a log statement that was already there would've saved us
-hours. The `WithFields` call is optional.
-
-In general, with Logrus using any of the `printf`-family functions should be
-seen as a hint you should add a field, however, you can still use the
-`printf`-family functions with Logrus.
-
-#### Default Fields
-
-Often it's helpful to have fields _always_ attached to log statements in an
-application or parts of one. For example, you may want to always log the
-`request_id` and `user_ip` in the context of a request. Instead of writing
-`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
-every line, you can create a `logrus.Entry` to pass around instead:
-
-```go
-requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
-requestLogger.Info("something happened on that request") # will log request_id and user_ip
-requestLogger.Warn("something not great happened")
-```
-
-#### Hooks
-
-You can add hooks for logging levels. For example to send errors to an exception
-tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
-multiple places simultaneously, e.g. syslog.
-
-Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
-`init`:
-
-```go
-import (
- log "github.com/sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
- logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
- "log/syslog"
-)
-
-func init() {
-
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(airbrake.NewHook(123, "xyz", "production"))
-
- hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
- if err != nil {
- log.Error("Unable to connect to local syslog daemon")
- } else {
- log.AddHook(hook)
- }
-}
-```
-Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-
-A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
-
-
-#### Level logging
-
-Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
-
-```go
-log.Trace("Something very low level.")
-log.Debug("Useful debugging information.")
-log.Info("Something noteworthy happened!")
-log.Warn("You should probably take a look at this.")
-log.Error("Something failed but I'm not quitting.")
-// Calls os.Exit(1) after logging
-log.Fatal("Bye.")
-// Calls panic() after logging
-log.Panic("I'm bailing.")
-```
-
-You can set the logging level on a `Logger`, then it will only log entries with
-that severity or anything above it:
-
-```go
-// Will log anything that is info or above (warn, error, fatal, panic). Default.
-log.SetLevel(log.InfoLevel)
-```
-
-It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
-environment if your application has that.
-
-#### Entries
-
-Besides the fields added with `WithField` or `WithFields` some fields are
-automatically added to all logging events:
-
-1. `time`. The timestamp when the entry was created.
-2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
- the `AddFields` call. E.g. `Failed to send event.`
-3. `level`. The logging level. E.g. `info`.
-
-#### Environments
-
-Logrus has no notion of environment.
-
-If you wish for hooks and formatters to only be used in specific environments,
-you should handle that yourself. For example, if your application has a global
-variable `Environment`, which is a string representation of the environment you
-could do:
-
-```go
-import (
- log "github.com/sirupsen/logrus"
-)
-
-init() {
- // do something here to set environment depending on an environment variable
- // or command-line flag
- if Environment == "production" {
- log.SetFormatter(&log.JSONFormatter{})
- } else {
- // The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(&log.TextFormatter{})
- }
-}
-```
-
-This configuration is how `logrus` was intended to be used, but JSON in
-production is mostly only useful if you do log aggregation with tools like
-Splunk or Logstash.
-
-#### Formatters
-
-The built-in logging formatters are:
-
-* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
- without colors.
- * *Note:* to force colored output when there is no TTY, set the `ForceColors`
- field to `true`. To force no colored output even if there is a TTY set the
- `DisableColors` field to `true`. For Windows, see
- [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
- * When colors are enabled, levels are truncated to 4 characters by default. To disable
- truncation set the `DisableLevelTruncation` field to `true`.
- * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
-* `logrus.JSONFormatter`. Logs fields as JSON.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
-
-Third party logging formatters:
-
-* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
-* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
-* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
-* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
-* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo.
-* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
-* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files.
-* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added.
-
-You can define your formatter by implementing the `Formatter` interface,
-requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
-`Fields` type (`map[string]interface{}`) with all your fields as well as the
-default ones (see Entries section above):
-
-```go
-type MyJSONFormatter struct {
-}
-
-log.SetFormatter(new(MyJSONFormatter))
-
-func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
- // Note this doesn't include Time, Level and Message which are available on
- // the Entry. Consult `godoc` on information about those fields or read the
- // source of the official loggers.
- serialized, err := json.Marshal(entry.Data)
- if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
- }
- return append(serialized, '\n'), nil
-}
-```
-
-#### Logger as an `io.Writer`
-
-Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
-
-```go
-w := logger.Writer()
-defer w.Close()
-
-srv := http.Server{
- // create a stdlib log.Logger that writes to
- // logrus.Logger.
- ErrorLog: log.New(w, "", 0),
-}
-```
-
-Each line written to that writer will be printed the usual way, using formatters
-and hooks. The level for those entries is `info`.
-
-This means that we can override the standard library logger easily:
-
-```go
-logger := logrus.New()
-logger.Formatter = &logrus.JSONFormatter{}
-
-// Use logrus for standard log output
-// Note that `log` here references stdlib's log
-// Not logrus imported under the name `log`.
-log.SetOutput(logger.Writer())
-```
-
-#### Rotation
-
-Log rotation is not provided with Logrus. Log rotation should be done by an
-external program (like `logrotate(8)`) that can compress and delete old log
-entries. It should not be a feature of the application-level logger.
-
-#### Tools
-
-| Tool | Description |
-| ---- | ----------- |
-|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.|
-|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
-
-#### Testing
-
-Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
-
-* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook
-* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
-
-```go
-import(
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestSomething(t*testing.T){
- logger, hook := test.NewNullLogger()
- logger.Error("Helloerror")
-
- assert.Equal(t, 1, len(hook.Entries))
- assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
- assert.Equal(t, "Helloerror", hook.LastEntry().Message)
-
- hook.Reset()
- assert.Nil(t, hook.LastEntry())
-}
-```
-
-#### Fatal handlers
-
-Logrus can register one or more functions that will be called when any `fatal`
-level message is logged. The registered handlers will be executed before
-logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need
-to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
-
-```
-...
-handler := func() {
- // gracefully shutdown something...
-}
-logrus.RegisterExitHandler(handler)
-...
-```
-
-#### Thread safety
-
-By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
-If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
-
-Situation when locking is not needed includes:
-
-* You have no hooks registered, or hooks calling is already thread-safe.
-
-* Writing to logger.Out is already thread-safe, for example:
-
- 1) logger.Out is protected by locks.
-
- 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing)
-
- (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
deleted file mode 100644
index 8fd189e..0000000
--- a/vendor/github.com/sirupsen/logrus/alt_exit.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package logrus
-
-// The following code was sourced and modified from the
-// https://github.com/tebeka/atexit package governed by the following license:
-//
-// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-import (
- "fmt"
- "os"
-)
-
-var handlers = []func(){}
-
-func runHandler(handler func()) {
- defer func() {
- if err := recover(); err != nil {
- fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
- }
- }()
-
- handler()
-}
-
-func runHandlers() {
- for _, handler := range handlers {
- runHandler(handler)
- }
-}
-
-// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
-func Exit(code int) {
- runHandlers()
- os.Exit(code)
-}
-
-// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
-// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
-// any Fatal log entry is made.
-//
-// This method is useful when a caller wishes to use logrus to log a fatal
-// message but also needs to gracefully shutdown. An example usecase could be
-// closing database connections, or sending a alert that the application is
-// closing.
-func RegisterExitHandler(handler func()) {
- handlers = append(handlers, handler)
-}
-
-// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
-// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
-// any Fatal log entry is made.
-//
-// This method is useful when a caller wishes to use logrus to log a fatal
-// message but also needs to gracefully shutdown. An example usecase could be
-// closing database connections, or sending a alert that the application is
-// closing.
-func DeferExitHandler(handler func()) {
- handlers = append([]func(){handler}, handlers...)
-}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
deleted file mode 100644
index df9d65c..0000000
--- a/vendor/github.com/sirupsen/logrus/appveyor.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-version: "{build}"
-platform: x64
-clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
- GOPATH: c:\gopath
-branches:
- only:
- - master
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
-build_script:
- - go get -t
- - go test
diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go
deleted file mode 100644
index 4545dec..0000000
--- a/vendor/github.com/sirupsen/logrus/buffer_pool.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "sync"
-)
-
-var (
- bufferPool BufferPool
-)
-
-type BufferPool interface {
- Put(*bytes.Buffer)
- Get() *bytes.Buffer
-}
-
-type defaultPool struct {
- pool *sync.Pool
-}
-
-func (p *defaultPool) Put(buf *bytes.Buffer) {
- p.pool.Put(buf)
-}
-
-func (p *defaultPool) Get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func getBuffer() *bytes.Buffer {
- return bufferPool.Get()
-}
-
-func putBuffer(buf *bytes.Buffer) {
- buf.Reset()
- bufferPool.Put(buf)
-}
-
-// SetBufferPool allows to replace the default logrus buffer pool
-// to better meets the specific needs of an application.
-func SetBufferPool(bp BufferPool) {
- bufferPool = bp
-}
-
-func init() {
- SetBufferPool(&defaultPool{
- pool: &sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
- },
- })
-}
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
deleted file mode 100644
index da67aba..0000000
--- a/vendor/github.com/sirupsen/logrus/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
-
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
- package main
-
- import (
- log "github.com/sirupsen/logrus"
- )
-
- func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "number": 1,
- "size": 10,
- }).Info("A walrus appears")
- }
-
-Output:
- time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
-
-For a full guide visit https://github.com/sirupsen/logrus
-*/
-package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
deleted file mode 100644
index 07a1e5f..0000000
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ /dev/null
@@ -1,431 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "context"
- "fmt"
- "os"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-var (
-
- // qualified package name, cached at first use
- logrusPackage string
-
- // Positions in the call stack when tracing to report the calling method
- minimumCallerDepth int
-
- // Used for caller information initialisation
- callerInitOnce sync.Once
-)
-
-const (
- maximumCallerDepth int = 25
- knownLogrusFrames int = 4
-)
-
-func init() {
- // start at the bottom of the stack before the package-name cache is primed
- minimumCallerDepth = 1
-}
-
-// Defines the key when adding errors using WithError.
-var ErrorKey = "error"
-
-// An entry is the final or intermediate Logrus logging entry. It contains all
-// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
-// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
-// reused and passed around as much as you wish to avoid field duplication.
-type Entry struct {
- Logger *Logger
-
- // Contains all the fields set by the user.
- Data Fields
-
- // Time at which the log entry was created
- Time time.Time
-
- // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
- // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
- Level Level
-
- // Calling method, with package name
- Caller *runtime.Frame
-
- // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
- Message string
-
- // When formatter is called in entry.log(), a Buffer may be set to entry
- Buffer *bytes.Buffer
-
- // Contains the context set by the user. Useful for hook processing etc.
- Context context.Context
-
- // err may contain a field formatting error
- err string
-}
-
-func NewEntry(logger *Logger) *Entry {
- return &Entry{
- Logger: logger,
- // Default is three fields, plus one optional. Give a little extra room.
- Data: make(Fields, 6),
- }
-}
-
-func (entry *Entry) Dup() *Entry {
- data := make(Fields, len(entry.Data))
- for k, v := range entry.Data {
- data[k] = v
- }
- return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
-}
-
-// Returns the bytes representation of this entry from the formatter.
-func (entry *Entry) Bytes() ([]byte, error) {
- return entry.Logger.Formatter.Format(entry)
-}
-
-// Returns the string representation from the reader and ultimately the
-// formatter.
-func (entry *Entry) String() (string, error) {
- serialized, err := entry.Bytes()
- if err != nil {
- return "", err
- }
- str := string(serialized)
- return str, nil
-}
-
-// Add an error as single field (using the key defined in ErrorKey) to the Entry.
-func (entry *Entry) WithError(err error) *Entry {
- return entry.WithField(ErrorKey, err)
-}
-
-// Add a context to the Entry.
-func (entry *Entry) WithContext(ctx context.Context) *Entry {
- dataCopy := make(Fields, len(entry.Data))
- for k, v := range entry.Data {
- dataCopy[k] = v
- }
- return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx}
-}
-
-// Add a single field to the Entry.
-func (entry *Entry) WithField(key string, value interface{}) *Entry {
- return entry.WithFields(Fields{key: value})
-}
-
-// Add a map of fields to the Entry.
-func (entry *Entry) WithFields(fields Fields) *Entry {
- data := make(Fields, len(entry.Data)+len(fields))
- for k, v := range entry.Data {
- data[k] = v
- }
- fieldErr := entry.err
- for k, v := range fields {
- isErrField := false
- if t := reflect.TypeOf(v); t != nil {
- switch {
- case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
- isErrField = true
- }
- }
- if isErrField {
- tmp := fmt.Sprintf("can not add field %q", k)
- if fieldErr != "" {
- fieldErr = entry.err + ", " + tmp
- } else {
- fieldErr = tmp
- }
- } else {
- data[k] = v
- }
- }
- return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
-}
-
-// Overrides the time of the Entry.
-func (entry *Entry) WithTime(t time.Time) *Entry {
- dataCopy := make(Fields, len(entry.Data))
- for k, v := range entry.Data {
- dataCopy[k] = v
- }
- return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context}
-}
-
-// getPackageName reduces a fully qualified function name to the package name
-// There really ought to be to be a better way...
-func getPackageName(f string) string {
- for {
- lastPeriod := strings.LastIndex(f, ".")
- lastSlash := strings.LastIndex(f, "/")
- if lastPeriod > lastSlash {
- f = f[:lastPeriod]
- } else {
- break
- }
- }
-
- return f
-}
-
-// getCaller retrieves the name of the first non-logrus calling function
-func getCaller() *runtime.Frame {
- // cache this package's fully-qualified name
- callerInitOnce.Do(func() {
- pcs := make([]uintptr, maximumCallerDepth)
- _ = runtime.Callers(0, pcs)
-
- // dynamic get the package name and the minimum caller depth
- for i := 0; i < maximumCallerDepth; i++ {
- funcName := runtime.FuncForPC(pcs[i]).Name()
- if strings.Contains(funcName, "getCaller") {
- logrusPackage = getPackageName(funcName)
- break
- }
- }
-
- minimumCallerDepth = knownLogrusFrames
- })
-
- // Restrict the lookback frames to avoid runaway lookups
- pcs := make([]uintptr, maximumCallerDepth)
- depth := runtime.Callers(minimumCallerDepth, pcs)
- frames := runtime.CallersFrames(pcs[:depth])
-
- for f, again := frames.Next(); again; f, again = frames.Next() {
- pkg := getPackageName(f.Function)
-
- // If the caller isn't part of this package, we're done
- if pkg != logrusPackage {
- return &f //nolint:scopelint
- }
- }
-
- // if we got here, we failed to find the caller's context
- return nil
-}
-
-func (entry Entry) HasCaller() (has bool) {
- return entry.Logger != nil &&
- entry.Logger.ReportCaller &&
- entry.Caller != nil
-}
-
-func (entry *Entry) log(level Level, msg string) {
- var buffer *bytes.Buffer
-
- newEntry := entry.Dup()
-
- if newEntry.Time.IsZero() {
- newEntry.Time = time.Now()
- }
-
- newEntry.Level = level
- newEntry.Message = msg
-
- newEntry.Logger.mu.Lock()
- reportCaller := newEntry.Logger.ReportCaller
- newEntry.Logger.mu.Unlock()
-
- if reportCaller {
- newEntry.Caller = getCaller()
- }
-
- newEntry.fireHooks()
-
- buffer = getBuffer()
- defer func() {
- newEntry.Buffer = nil
- putBuffer(buffer)
- }()
- buffer.Reset()
- newEntry.Buffer = buffer
-
- newEntry.write()
-
- newEntry.Buffer = nil
-
- // To avoid Entry#log() returning a value that only would make sense for
- // panic() to use in Entry#Panic(), we avoid the allocation by checking
- // directly here.
- if level <= PanicLevel {
- panic(newEntry)
- }
-}
-
-func (entry *Entry) fireHooks() {
- var tmpHooks LevelHooks
- entry.Logger.mu.Lock()
- tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
- for k, v := range entry.Logger.Hooks {
- tmpHooks[k] = v
- }
- entry.Logger.mu.Unlock()
-
- err := tmpHooks.Fire(entry.Level, entry)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
- }
-}
-
-func (entry *Entry) write() {
- serialized, err := entry.Logger.Formatter.Format(entry)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
- return
- }
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
- if _, err := entry.Logger.Out.Write(serialized); err != nil {
- fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
- }
-}
-
-func (entry *Entry) Log(level Level, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(level) {
- entry.log(level, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Trace(args ...interface{}) {
- entry.Log(TraceLevel, args...)
-}
-
-func (entry *Entry) Debug(args ...interface{}) {
- entry.Log(DebugLevel, args...)
-}
-
-func (entry *Entry) Print(args ...interface{}) {
- entry.Info(args...)
-}
-
-func (entry *Entry) Info(args ...interface{}) {
- entry.Log(InfoLevel, args...)
-}
-
-func (entry *Entry) Warn(args ...interface{}) {
- entry.Log(WarnLevel, args...)
-}
-
-func (entry *Entry) Warning(args ...interface{}) {
- entry.Warn(args...)
-}
-
-func (entry *Entry) Error(args ...interface{}) {
- entry.Log(ErrorLevel, args...)
-}
-
-func (entry *Entry) Fatal(args ...interface{}) {
- entry.Log(FatalLevel, args...)
- entry.Logger.Exit(1)
-}
-
-func (entry *Entry) Panic(args ...interface{}) {
- entry.Log(PanicLevel, args...)
-}
-
-// Entry Printf family functions
-
-func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(level) {
- entry.Log(level, fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Tracef(format string, args ...interface{}) {
- entry.Logf(TraceLevel, format, args...)
-}
-
-func (entry *Entry) Debugf(format string, args ...interface{}) {
- entry.Logf(DebugLevel, format, args...)
-}
-
-func (entry *Entry) Infof(format string, args ...interface{}) {
- entry.Logf(InfoLevel, format, args...)
-}
-
-func (entry *Entry) Printf(format string, args ...interface{}) {
- entry.Infof(format, args...)
-}
-
-func (entry *Entry) Warnf(format string, args ...interface{}) {
- entry.Logf(WarnLevel, format, args...)
-}
-
-func (entry *Entry) Warningf(format string, args ...interface{}) {
- entry.Warnf(format, args...)
-}
-
-func (entry *Entry) Errorf(format string, args ...interface{}) {
- entry.Logf(ErrorLevel, format, args...)
-}
-
-func (entry *Entry) Fatalf(format string, args ...interface{}) {
- entry.Logf(FatalLevel, format, args...)
- entry.Logger.Exit(1)
-}
-
-func (entry *Entry) Panicf(format string, args ...interface{}) {
- entry.Logf(PanicLevel, format, args...)
-}
-
-// Entry Println family functions
-
-func (entry *Entry) Logln(level Level, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(level) {
- entry.Log(level, entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Traceln(args ...interface{}) {
- entry.Logln(TraceLevel, args...)
-}
-
-func (entry *Entry) Debugln(args ...interface{}) {
- entry.Logln(DebugLevel, args...)
-}
-
-func (entry *Entry) Infoln(args ...interface{}) {
- entry.Logln(InfoLevel, args...)
-}
-
-func (entry *Entry) Println(args ...interface{}) {
- entry.Infoln(args...)
-}
-
-func (entry *Entry) Warnln(args ...interface{}) {
- entry.Logln(WarnLevel, args...)
-}
-
-func (entry *Entry) Warningln(args ...interface{}) {
- entry.Warnln(args...)
-}
-
-func (entry *Entry) Errorln(args ...interface{}) {
- entry.Logln(ErrorLevel, args...)
-}
-
-func (entry *Entry) Fatalln(args ...interface{}) {
- entry.Logln(FatalLevel, args...)
- entry.Logger.Exit(1)
-}
-
-func (entry *Entry) Panicln(args ...interface{}) {
- entry.Logln(PanicLevel, args...)
-}
-
-// Sprintlnn => Sprint no newline. This is to get the behavior of how
-// fmt.Sprintln where spaces are always added between operands, regardless of
-// their type. Instead of vendoring the Sprintln implementation to spare a
-// string allocation, we do the simplest thing.
-func (entry *Entry) sprintlnn(args ...interface{}) string {
- msg := fmt.Sprintln(args...)
- return msg[:len(msg)-1]
-}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
deleted file mode 100644
index 017c30c..0000000
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package logrus
-
-import (
- "context"
- "io"
- "time"
-)
-
-var (
- // std is the name of the standard logger in stdlib `log`
- std = New()
-)
-
-func StandardLogger() *Logger {
- return std
-}
-
-// SetOutput sets the standard logger output.
-func SetOutput(out io.Writer) {
- std.SetOutput(out)
-}
-
-// SetFormatter sets the standard logger formatter.
-func SetFormatter(formatter Formatter) {
- std.SetFormatter(formatter)
-}
-
-// SetReportCaller sets whether the standard logger will include the calling
-// method as a field.
-func SetReportCaller(include bool) {
- std.SetReportCaller(include)
-}
-
-// SetLevel sets the standard logger level.
-func SetLevel(level Level) {
- std.SetLevel(level)
-}
-
-// GetLevel returns the standard logger level.
-func GetLevel() Level {
- return std.GetLevel()
-}
-
-// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
-func IsLevelEnabled(level Level) bool {
- return std.IsLevelEnabled(level)
-}
-
-// AddHook adds a hook to the standard logger hooks.
-func AddHook(hook Hook) {
- std.AddHook(hook)
-}
-
-// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
-func WithError(err error) *Entry {
- return std.WithField(ErrorKey, err)
-}
-
-// WithContext creates an entry from the standard logger and adds a context to it.
-func WithContext(ctx context.Context) *Entry {
- return std.WithContext(ctx)
-}
-
-// WithField creates an entry from the standard logger and adds a field to
-// it. If you want multiple fields, use `WithFields`.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithField(key string, value interface{}) *Entry {
- return std.WithField(key, value)
-}
-
-// WithFields creates an entry from the standard logger and adds multiple
-// fields to it. This is simply a helper for `WithField`, invoking it
-// once for each field.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithFields(fields Fields) *Entry {
- return std.WithFields(fields)
-}
-
-// WithTime creates an entry from the standard logger and overrides the time of
-// logs generated with it.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithTime(t time.Time) *Entry {
- return std.WithTime(t)
-}
-
-// Trace logs a message at level Trace on the standard logger.
-func Trace(args ...interface{}) {
- std.Trace(args...)
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func Debug(args ...interface{}) {
- std.Debug(args...)
-}
-
-// Print logs a message at level Info on the standard logger.
-func Print(args ...interface{}) {
- std.Print(args...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func Info(args ...interface{}) {
- std.Info(args...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func Warn(args ...interface{}) {
- std.Warn(args...)
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func Warning(args ...interface{}) {
- std.Warning(args...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func Error(args ...interface{}) {
- std.Error(args...)
-}
-
-// Panic logs a message at level Panic on the standard logger.
-func Panic(args ...interface{}) {
- std.Panic(args...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
-func Fatal(args ...interface{}) {
- std.Fatal(args...)
-}
-
-// TraceFn logs a message from a func at level Trace on the standard logger.
-func TraceFn(fn LogFunction) {
- std.TraceFn(fn)
-}
-
-// DebugFn logs a message from a func at level Debug on the standard logger.
-func DebugFn(fn LogFunction) {
- std.DebugFn(fn)
-}
-
-// PrintFn logs a message from a func at level Info on the standard logger.
-func PrintFn(fn LogFunction) {
- std.PrintFn(fn)
-}
-
-// InfoFn logs a message from a func at level Info on the standard logger.
-func InfoFn(fn LogFunction) {
- std.InfoFn(fn)
-}
-
-// WarnFn logs a message from a func at level Warn on the standard logger.
-func WarnFn(fn LogFunction) {
- std.WarnFn(fn)
-}
-
-// WarningFn logs a message from a func at level Warn on the standard logger.
-func WarningFn(fn LogFunction) {
- std.WarningFn(fn)
-}
-
-// ErrorFn logs a message from a func at level Error on the standard logger.
-func ErrorFn(fn LogFunction) {
- std.ErrorFn(fn)
-}
-
-// PanicFn logs a message from a func at level Panic on the standard logger.
-func PanicFn(fn LogFunction) {
- std.PanicFn(fn)
-}
-
-// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
-func FatalFn(fn LogFunction) {
- std.FatalFn(fn)
-}
-
-// Tracef logs a message at level Trace on the standard logger.
-func Tracef(format string, args ...interface{}) {
- std.Tracef(format, args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func Debugf(format string, args ...interface{}) {
- std.Debugf(format, args...)
-}
-
-// Printf logs a message at level Info on the standard logger.
-func Printf(format string, args ...interface{}) {
- std.Printf(format, args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func Infof(format string, args ...interface{}) {
- std.Infof(format, args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func Warnf(format string, args ...interface{}) {
- std.Warnf(format, args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func Warningf(format string, args ...interface{}) {
- std.Warningf(format, args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func Errorf(format string, args ...interface{}) {
- std.Errorf(format, args...)
-}
-
-// Panicf logs a message at level Panic on the standard logger.
-func Panicf(format string, args ...interface{}) {
- std.Panicf(format, args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
-func Fatalf(format string, args ...interface{}) {
- std.Fatalf(format, args...)
-}
-
-// Traceln logs a message at level Trace on the standard logger.
-func Traceln(args ...interface{}) {
- std.Traceln(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger.
-func Debugln(args ...interface{}) {
- std.Debugln(args...)
-}
-
-// Println logs a message at level Info on the standard logger.
-func Println(args ...interface{}) {
- std.Println(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger.
-func Infoln(args ...interface{}) {
- std.Infoln(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger.
-func Warnln(args ...interface{}) {
- std.Warnln(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger.
-func Warningln(args ...interface{}) {
- std.Warningln(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger.
-func Errorln(args ...interface{}) {
- std.Errorln(args...)
-}
-
-// Panicln logs a message at level Panic on the standard logger.
-func Panicln(args ...interface{}) {
- std.Panicln(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
-func Fatalln(args ...interface{}) {
- std.Fatalln(args...)
-}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
deleted file mode 100644
index 4088837..0000000
--- a/vendor/github.com/sirupsen/logrus/formatter.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package logrus
-
-import "time"
-
-// Default key names for the default fields
-const (
- defaultTimestampFormat = time.RFC3339
- FieldKeyMsg = "msg"
- FieldKeyLevel = "level"
- FieldKeyTime = "time"
- FieldKeyLogrusError = "logrus_error"
- FieldKeyFunc = "func"
- FieldKeyFile = "file"
-)
-
-// The Formatter interface is used to implement a custom Formatter. It takes an
-// `Entry`. It exposes all the fields, including the default ones:
-//
-// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
-// * `entry.Data["time"]`. The timestamp.
-// * `entry.Data["level"]. The level the entry was logged at.
-//
-// Any additional fields added with `WithField` or `WithFields` are also in
-// `entry.Data`. Format is expected to return an array of bytes which are then
-// logged to `logger.Out`.
-type Formatter interface {
- Format(*Entry) ([]byte, error)
-}
-
-// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
-// dumping it. If this code wasn't there doing:
-//
-// logrus.WithField("level", 1).Info("hello")
-//
-// Would just silently drop the user provided level. Instead with this code
-// it'll logged as:
-//
-// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
-//
-// It's not exported because it's still using Data in an opinionated way. It's to
-// avoid code duplication between the two default formatters.
-func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
- timeKey := fieldMap.resolve(FieldKeyTime)
- if t, ok := data[timeKey]; ok {
- data["fields."+timeKey] = t
- delete(data, timeKey)
- }
-
- msgKey := fieldMap.resolve(FieldKeyMsg)
- if m, ok := data[msgKey]; ok {
- data["fields."+msgKey] = m
- delete(data, msgKey)
- }
-
- levelKey := fieldMap.resolve(FieldKeyLevel)
- if l, ok := data[levelKey]; ok {
- data["fields."+levelKey] = l
- delete(data, levelKey)
- }
-
- logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
- if l, ok := data[logrusErrKey]; ok {
- data["fields."+logrusErrKey] = l
- delete(data, logrusErrKey)
- }
-
- // If reportCaller is not set, 'func' will not conflict.
- if reportCaller {
- funcKey := fieldMap.resolve(FieldKeyFunc)
- if l, ok := data[funcKey]; ok {
- data["fields."+funcKey] = l
- }
- fileKey := fieldMap.resolve(FieldKeyFile)
- if l, ok := data[fileKey]; ok {
- data["fields."+fileKey] = l
- }
- }
-}
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
deleted file mode 100644
index 3f151cd..0000000
--- a/vendor/github.com/sirupsen/logrus/hooks.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package logrus
-
-// A hook to be fired when logging on the logging levels returned from
-// `Levels()` on your implementation of the interface. Note that this is not
-// fired in a goroutine or a channel with workers, you should handle such
-// functionality yourself if your call is non-blocking and you don't wish for
-// the logging calls for levels returned from `Levels()` to block.
-type Hook interface {
- Levels() []Level
- Fire(*Entry) error
-}
-
-// Internal type for storing the hooks on a logger instance.
-type LevelHooks map[Level][]Hook
-
-// Add a hook to an instance of logger. This is called with
-// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
-func (hooks LevelHooks) Add(hook Hook) {
- for _, level := range hook.Levels() {
- hooks[level] = append(hooks[level], hook)
- }
-}
-
-// Fire all the hooks for the passed level. Used by `entry.log` to fire
-// appropriate hooks for a log entry.
-func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
- for _, hook := range hooks[level] {
- if err := hook.Fire(entry); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
deleted file mode 100644
index c96dc56..0000000
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "runtime"
-)
-
-type fieldKey string
-
-// FieldMap allows customization of the key names for default fields.
-type FieldMap map[fieldKey]string
-
-func (f FieldMap) resolve(key fieldKey) string {
- if k, ok := f[key]; ok {
- return k
- }
-
- return string(key)
-}
-
-// JSONFormatter formats logs into parsable json
-type JSONFormatter struct {
- // TimestampFormat sets the format used for marshaling timestamps.
- // The format to use is the same than for time.Format or time.Parse from the standard
- // library.
- // The standard Library already provides a set of predefined format.
- TimestampFormat string
-
- // DisableTimestamp allows disabling automatic timestamps in output
- DisableTimestamp bool
-
- // DisableHTMLEscape allows disabling html escaping in output
- DisableHTMLEscape bool
-
- // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
- DataKey string
-
- // FieldMap allows users to customize the names of keys for default fields.
- // As an example:
- // formatter := &JSONFormatter{
- // FieldMap: FieldMap{
- // FieldKeyTime: "@timestamp",
- // FieldKeyLevel: "@level",
- // FieldKeyMsg: "@message",
- // FieldKeyFunc: "@caller",
- // },
- // }
- FieldMap FieldMap
-
- // CallerPrettyfier can be set by the user to modify the content
- // of the function and file keys in the json data when ReportCaller is
- // activated. If any of the returned value is the empty string the
- // corresponding key will be removed from json fields.
- CallerPrettyfier func(*runtime.Frame) (function string, file string)
-
- // PrettyPrint will indent all json logs
- PrettyPrint bool
-}
-
-// Format renders a single log entry
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
- data := make(Fields, len(entry.Data)+4)
- for k, v := range entry.Data {
- switch v := v.(type) {
- case error:
- // Otherwise errors are ignored by `encoding/json`
- // https://github.com/sirupsen/logrus/issues/137
- data[k] = v.Error()
- default:
- data[k] = v
- }
- }
-
- if f.DataKey != "" {
- newData := make(Fields, 4)
- newData[f.DataKey] = data
- data = newData
- }
-
- prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
-
- timestampFormat := f.TimestampFormat
- if timestampFormat == "" {
- timestampFormat = defaultTimestampFormat
- }
-
- if entry.err != "" {
- data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
- }
- if !f.DisableTimestamp {
- data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
- }
- data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
- data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
- if entry.HasCaller() {
- funcVal := entry.Caller.Function
- fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
- if f.CallerPrettyfier != nil {
- funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
- }
- if funcVal != "" {
- data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
- }
- if fileVal != "" {
- data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
- }
- }
-
- var b *bytes.Buffer
- if entry.Buffer != nil {
- b = entry.Buffer
- } else {
- b = &bytes.Buffer{}
- }
-
- encoder := json.NewEncoder(b)
- encoder.SetEscapeHTML(!f.DisableHTMLEscape)
- if f.PrettyPrint {
- encoder.SetIndent("", " ")
- }
- if err := encoder.Encode(data); err != nil {
- return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err)
- }
-
- return b.Bytes(), nil
-}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
deleted file mode 100644
index 3377044..0000000
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ /dev/null
@@ -1,404 +0,0 @@
-package logrus
-
-import (
- "context"
- "io"
- "os"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// LogFunction For big messages, it can be more efficient to pass a function
-// and only call it if the log level is actually enables rather than
-// generating the log message and then checking if the level is enabled
-type LogFunction func() []interface{}
-
-type Logger struct {
- // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
- // file, or leave it default which is `os.Stderr`. You can also set this to
- // something more adventurous, such as logging to Kafka.
- Out io.Writer
- // Hooks for the logger instance. These allow firing events based on logging
- // levels and log entries. For example, to send errors to an error tracking
- // service, log to StatsD or dump the core on fatal errors.
- Hooks LevelHooks
- // All log entries pass through the formatter before logged to Out. The
- // included formatters are `TextFormatter` and `JSONFormatter` for which
- // TextFormatter is the default. In development (when a TTY is attached) it
- // logs with colors, but to a file it wouldn't. You can easily implement your
- // own that implements the `Formatter` interface, see the `README` or included
- // formatters for examples.
- Formatter Formatter
-
- // Flag for whether to log caller info (off by default)
- ReportCaller bool
-
- // The logging level the logger should log at. This is typically (and defaults
- // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
- // logged.
- Level Level
- // Used to sync writing to the log. Locking is enabled by Default
- mu MutexWrap
- // Reusable empty entry
- entryPool sync.Pool
- // Function to exit the application, defaults to `os.Exit()`
- ExitFunc exitFunc
-}
-
-type exitFunc func(int)
-
-type MutexWrap struct {
- lock sync.Mutex
- disabled bool
-}
-
-func (mw *MutexWrap) Lock() {
- if !mw.disabled {
- mw.lock.Lock()
- }
-}
-
-func (mw *MutexWrap) Unlock() {
- if !mw.disabled {
- mw.lock.Unlock()
- }
-}
-
-func (mw *MutexWrap) Disable() {
- mw.disabled = true
-}
-
-// Creates a new logger. Configuration should be set by changing `Formatter`,
-// `Out` and `Hooks` directly on the default logger instance. You can also just
-// instantiate your own:
-//
-// var log = &logrus.Logger{
-// Out: os.Stderr,
-// Formatter: new(logrus.TextFormatter),
-// Hooks: make(logrus.LevelHooks),
-// Level: logrus.DebugLevel,
-// }
-//
-// It's recommended to make this a global instance called `log`.
-func New() *Logger {
- return &Logger{
- Out: os.Stderr,
- Formatter: new(TextFormatter),
- Hooks: make(LevelHooks),
- Level: InfoLevel,
- ExitFunc: os.Exit,
- ReportCaller: false,
- }
-}
-
-func (logger *Logger) newEntry() *Entry {
- entry, ok := logger.entryPool.Get().(*Entry)
- if ok {
- return entry
- }
- return NewEntry(logger)
-}
-
-func (logger *Logger) releaseEntry(entry *Entry) {
- entry.Data = map[string]interface{}{}
- logger.entryPool.Put(entry)
-}
-
-// WithField allocates a new entry and adds a field to it.
-// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to
-// this new returned entry.
-// If you want multiple fields, use `WithFields`.
-func (logger *Logger) WithField(key string, value interface{}) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithField(key, value)
-}
-
-// Adds a struct of fields to the log entry. All it does is call `WithField` for
-// each `Field`.
-func (logger *Logger) WithFields(fields Fields) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithFields(fields)
-}
-
-// Add an error as single field to the log entry. All it does is call
-// `WithError` for the given `error`.
-func (logger *Logger) WithError(err error) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithError(err)
-}
-
-// Add a context to the log entry.
-func (logger *Logger) WithContext(ctx context.Context) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithContext(ctx)
-}
-
-// Overrides the time of the log entry.
-func (logger *Logger) WithTime(t time.Time) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithTime(t)
-}
-
-func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
- if logger.IsLevelEnabled(level) {
- entry := logger.newEntry()
- entry.Logf(level, format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Tracef(format string, args ...interface{}) {
- logger.Logf(TraceLevel, format, args...)
-}
-
-func (logger *Logger) Debugf(format string, args ...interface{}) {
- logger.Logf(DebugLevel, format, args...)
-}
-
-func (logger *Logger) Infof(format string, args ...interface{}) {
- logger.Logf(InfoLevel, format, args...)
-}
-
-func (logger *Logger) Printf(format string, args ...interface{}) {
- entry := logger.newEntry()
- entry.Printf(format, args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warnf(format string, args ...interface{}) {
- logger.Logf(WarnLevel, format, args...)
-}
-
-func (logger *Logger) Warningf(format string, args ...interface{}) {
- logger.Warnf(format, args...)
-}
-
-func (logger *Logger) Errorf(format string, args ...interface{}) {
- logger.Logf(ErrorLevel, format, args...)
-}
-
-func (logger *Logger) Fatalf(format string, args ...interface{}) {
- logger.Logf(FatalLevel, format, args...)
- logger.Exit(1)
-}
-
-func (logger *Logger) Panicf(format string, args ...interface{}) {
- logger.Logf(PanicLevel, format, args...)
-}
-
-func (logger *Logger) Log(level Level, args ...interface{}) {
- if logger.IsLevelEnabled(level) {
- entry := logger.newEntry()
- entry.Log(level, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) LogFn(level Level, fn LogFunction) {
- if logger.IsLevelEnabled(level) {
- entry := logger.newEntry()
- entry.Log(level, fn()...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Trace(args ...interface{}) {
- logger.Log(TraceLevel, args...)
-}
-
-func (logger *Logger) Debug(args ...interface{}) {
- logger.Log(DebugLevel, args...)
-}
-
-func (logger *Logger) Info(args ...interface{}) {
- logger.Log(InfoLevel, args...)
-}
-
-func (logger *Logger) Print(args ...interface{}) {
- entry := logger.newEntry()
- entry.Print(args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warn(args ...interface{}) {
- logger.Log(WarnLevel, args...)
-}
-
-func (logger *Logger) Warning(args ...interface{}) {
- logger.Warn(args...)
-}
-
-func (logger *Logger) Error(args ...interface{}) {
- logger.Log(ErrorLevel, args...)
-}
-
-func (logger *Logger) Fatal(args ...interface{}) {
- logger.Log(FatalLevel, args...)
- logger.Exit(1)
-}
-
-func (logger *Logger) Panic(args ...interface{}) {
- logger.Log(PanicLevel, args...)
-}
-
-func (logger *Logger) TraceFn(fn LogFunction) {
- logger.LogFn(TraceLevel, fn)
-}
-
-func (logger *Logger) DebugFn(fn LogFunction) {
- logger.LogFn(DebugLevel, fn)
-}
-
-func (logger *Logger) InfoFn(fn LogFunction) {
- logger.LogFn(InfoLevel, fn)
-}
-
-func (logger *Logger) PrintFn(fn LogFunction) {
- entry := logger.newEntry()
- entry.Print(fn()...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) WarnFn(fn LogFunction) {
- logger.LogFn(WarnLevel, fn)
-}
-
-func (logger *Logger) WarningFn(fn LogFunction) {
- logger.WarnFn(fn)
-}
-
-func (logger *Logger) ErrorFn(fn LogFunction) {
- logger.LogFn(ErrorLevel, fn)
-}
-
-func (logger *Logger) FatalFn(fn LogFunction) {
- logger.LogFn(FatalLevel, fn)
- logger.Exit(1)
-}
-
-func (logger *Logger) PanicFn(fn LogFunction) {
- logger.LogFn(PanicLevel, fn)
-}
-
-func (logger *Logger) Logln(level Level, args ...interface{}) {
- if logger.IsLevelEnabled(level) {
- entry := logger.newEntry()
- entry.Logln(level, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Traceln(args ...interface{}) {
- logger.Logln(TraceLevel, args...)
-}
-
-func (logger *Logger) Debugln(args ...interface{}) {
- logger.Logln(DebugLevel, args...)
-}
-
-func (logger *Logger) Infoln(args ...interface{}) {
- logger.Logln(InfoLevel, args...)
-}
-
-func (logger *Logger) Println(args ...interface{}) {
- entry := logger.newEntry()
- entry.Println(args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warnln(args ...interface{}) {
- logger.Logln(WarnLevel, args...)
-}
-
-func (logger *Logger) Warningln(args ...interface{}) {
- logger.Warnln(args...)
-}
-
-func (logger *Logger) Errorln(args ...interface{}) {
- logger.Logln(ErrorLevel, args...)
-}
-
-func (logger *Logger) Fatalln(args ...interface{}) {
- logger.Logln(FatalLevel, args...)
- logger.Exit(1)
-}
-
-func (logger *Logger) Panicln(args ...interface{}) {
- logger.Logln(PanicLevel, args...)
-}
-
-func (logger *Logger) Exit(code int) {
- runHandlers()
- if logger.ExitFunc == nil {
- logger.ExitFunc = os.Exit
- }
- logger.ExitFunc(code)
-}
-
-//When file is opened with appending mode, it's safe to
-//write concurrently to a file (within 4k message on Linux).
-//In these cases user can choose to disable the lock.
-func (logger *Logger) SetNoLock() {
- logger.mu.Disable()
-}
-
-func (logger *Logger) level() Level {
- return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
-}
-
-// SetLevel sets the logger level.
-func (logger *Logger) SetLevel(level Level) {
- atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
-}
-
-// GetLevel returns the logger level.
-func (logger *Logger) GetLevel() Level {
- return logger.level()
-}
-
-// AddHook adds a hook to the logger hooks.
-func (logger *Logger) AddHook(hook Hook) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Hooks.Add(hook)
-}
-
-// IsLevelEnabled checks if the log level of the logger is greater than the level param
-func (logger *Logger) IsLevelEnabled(level Level) bool {
- return logger.level() >= level
-}
-
-// SetFormatter sets the logger formatter.
-func (logger *Logger) SetFormatter(formatter Formatter) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Formatter = formatter
-}
-
-// SetOutput sets the logger output.
-func (logger *Logger) SetOutput(output io.Writer) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Out = output
-}
-
-func (logger *Logger) SetReportCaller(reportCaller bool) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.ReportCaller = reportCaller
-}
-
-// ReplaceHooks replaces the logger hooks and returns the old ones
-func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
- logger.mu.Lock()
- oldHooks := logger.Hooks
- logger.Hooks = hooks
- logger.mu.Unlock()
- return oldHooks
-}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
deleted file mode 100644
index 2f16224..0000000
--- a/vendor/github.com/sirupsen/logrus/logrus.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package logrus
-
-import (
- "fmt"
- "log"
- "strings"
-)
-
-// Fields type, used to pass to `WithFields`.
-type Fields map[string]interface{}
-
-// Level type
-type Level uint32
-
-// Convert the Level to a string. E.g. PanicLevel becomes "panic".
-func (level Level) String() string {
- if b, err := level.MarshalText(); err == nil {
- return string(b)
- } else {
- return "unknown"
- }
-}
-
-// ParseLevel takes a string level and returns the Logrus log level constant.
-func ParseLevel(lvl string) (Level, error) {
- switch strings.ToLower(lvl) {
- case "panic":
- return PanicLevel, nil
- case "fatal":
- return FatalLevel, nil
- case "error":
- return ErrorLevel, nil
- case "warn", "warning":
- return WarnLevel, nil
- case "info":
- return InfoLevel, nil
- case "debug":
- return DebugLevel, nil
- case "trace":
- return TraceLevel, nil
- }
-
- var l Level
- return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (level *Level) UnmarshalText(text []byte) error {
- l, err := ParseLevel(string(text))
- if err != nil {
- return err
- }
-
- *level = l
-
- return nil
-}
-
-func (level Level) MarshalText() ([]byte, error) {
- switch level {
- case TraceLevel:
- return []byte("trace"), nil
- case DebugLevel:
- return []byte("debug"), nil
- case InfoLevel:
- return []byte("info"), nil
- case WarnLevel:
- return []byte("warning"), nil
- case ErrorLevel:
- return []byte("error"), nil
- case FatalLevel:
- return []byte("fatal"), nil
- case PanicLevel:
- return []byte("panic"), nil
- }
-
- return nil, fmt.Errorf("not a valid logrus level %d", level)
-}
-
-// A constant exposing all logging levels
-var AllLevels = []Level{
- PanicLevel,
- FatalLevel,
- ErrorLevel,
- WarnLevel,
- InfoLevel,
- DebugLevel,
- TraceLevel,
-}
-
-// These are the different logging levels. You can set the logging level to log
-// on your instance of logger, obtained with `logrus.New()`.
-const (
- // PanicLevel level, highest level of severity. Logs and then calls panic with the
- // message passed to Debug, Info, ...
- PanicLevel Level = iota
- // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
- // logging level is set to Panic.
- FatalLevel
- // ErrorLevel level. Logs. Used for errors that should definitely be noted.
- // Commonly used for hooks to send errors to an error tracking service.
- ErrorLevel
- // WarnLevel level. Non-critical entries that deserve eyes.
- WarnLevel
- // InfoLevel level. General operational entries about what's going on inside the
- // application.
- InfoLevel
- // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
- DebugLevel
- // TraceLevel level. Designates finer-grained informational events than the Debug.
- TraceLevel
-)
-
-// Won't compile if StdLogger can't be realized by a log.Logger
-var (
- _ StdLogger = &log.Logger{}
- _ StdLogger = &Entry{}
- _ StdLogger = &Logger{}
-)
-
-// StdLogger is what your logrus-enabled library should take, that way
-// it'll accept a stdlib logger and a logrus logger. There's no standard
-// interface, this is the closest we get, unfortunately.
-type StdLogger interface {
- Print(...interface{})
- Printf(string, ...interface{})
- Println(...interface{})
-
- Fatal(...interface{})
- Fatalf(string, ...interface{})
- Fatalln(...interface{})
-
- Panic(...interface{})
- Panicf(string, ...interface{})
- Panicln(...interface{})
-}
-
-// The FieldLogger interface generalizes the Entry and Logger types
-type FieldLogger interface {
- WithField(key string, value interface{}) *Entry
- WithFields(fields Fields) *Entry
- WithError(err error) *Entry
-
- Debugf(format string, args ...interface{})
- Infof(format string, args ...interface{})
- Printf(format string, args ...interface{})
- Warnf(format string, args ...interface{})
- Warningf(format string, args ...interface{})
- Errorf(format string, args ...interface{})
- Fatalf(format string, args ...interface{})
- Panicf(format string, args ...interface{})
-
- Debug(args ...interface{})
- Info(args ...interface{})
- Print(args ...interface{})
- Warn(args ...interface{})
- Warning(args ...interface{})
- Error(args ...interface{})
- Fatal(args ...interface{})
- Panic(args ...interface{})
-
- Debugln(args ...interface{})
- Infoln(args ...interface{})
- Println(args ...interface{})
- Warnln(args ...interface{})
- Warningln(args ...interface{})
- Errorln(args ...interface{})
- Fatalln(args ...interface{})
- Panicln(args ...interface{})
-
- // IsDebugEnabled() bool
- // IsInfoEnabled() bool
- // IsWarnEnabled() bool
- // IsErrorEnabled() bool
- // IsFatalEnabled() bool
- // IsPanicEnabled() bool
-}
-
-// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
-// here for consistancy. Do not use. Use Logger or Entry instead.
-type Ext1FieldLogger interface {
- FieldLogger
- Tracef(format string, args ...interface{})
- Trace(args ...interface{})
- Traceln(args ...interface{})
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
deleted file mode 100644
index 2403de9..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build appengine
-
-package logrus
-
-import (
- "io"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- return true
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
deleted file mode 100644
index 4997899..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build darwin dragonfly freebsd netbsd openbsd
-// +build !js
-
-package logrus
-
-import "golang.org/x/sys/unix"
-
-const ioctlReadTermios = unix.TIOCGETA
-
-func isTerminal(fd int) bool {
- _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
- return err == nil
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
deleted file mode 100644
index ebdae3e..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_js.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build js
-
-package logrus
-
-func isTerminal(fd int) bool {
- return false
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
deleted file mode 100644
index 97af92c..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build js nacl plan9
-
-package logrus
-
-import (
- "io"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- return false
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
deleted file mode 100644
index 3293fb3..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !appengine,!js,!windows,!nacl,!plan9
-
-package logrus
-
-import (
- "io"
- "os"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- switch v := w.(type) {
- case *os.File:
- return isTerminal(int(v.Fd()))
- default:
- return false
- }
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
deleted file mode 100644
index f6710b3..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package logrus
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func isTerminal(fd int) bool {
- _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
- return err == nil
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
deleted file mode 100644
index 04748b8..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build linux aix zos
-// +build !js
-
-package logrus
-
-import "golang.org/x/sys/unix"
-
-const ioctlReadTermios = unix.TCGETS
-
-func isTerminal(fd int) bool {
- _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
- return err == nil
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
deleted file mode 100644
index 2879eb5..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build !appengine,!js,windows
-
-package logrus
-
-import (
- "io"
- "os"
-
- "golang.org/x/sys/windows"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- switch v := w.(type) {
- case *os.File:
- handle := windows.Handle(v.Fd())
- var mode uint32
- if err := windows.GetConsoleMode(handle, &mode); err != nil {
- return false
- }
- mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
- if err := windows.SetConsoleMode(handle, mode); err != nil {
- return false
- }
- return true
- }
- return false
-}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
deleted file mode 100644
index be2c6ef..0000000
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ /dev/null
@@ -1,339 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "fmt"
- "os"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "time"
- "unicode/utf8"
-)
-
-const (
- red = 31
- yellow = 33
- blue = 36
- gray = 37
-)
-
-var baseTimestamp time.Time
-
-func init() {
- baseTimestamp = time.Now()
-}
-
-// TextFormatter formats logs into text
-type TextFormatter struct {
- // Set to true to bypass checking for a TTY before outputting colors.
- ForceColors bool
-
- // Force disabling colors.
- DisableColors bool
-
- // Force quoting of all values
- ForceQuote bool
-
- // DisableQuote disables quoting for all values.
- // DisableQuote will have a lower priority than ForceQuote.
- // If both of them are set to true, quote will be forced on all values.
- DisableQuote bool
-
- // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
- EnvironmentOverrideColors bool
-
- // Disable timestamp logging. useful when output is redirected to logging
- // system that already adds timestamps.
- DisableTimestamp bool
-
- // Enable logging the full timestamp when a TTY is attached instead of just
- // the time passed since beginning of execution.
- FullTimestamp bool
-
- // TimestampFormat to use for display when a full timestamp is printed.
- // The format to use is the same than for time.Format or time.Parse from the standard
- // library.
- // The standard Library already provides a set of predefined format.
- TimestampFormat string
-
- // The fields are sorted by default for a consistent output. For applications
- // that log extremely frequently and don't use the JSON formatter this may not
- // be desired.
- DisableSorting bool
-
- // The keys sorting function, when uninitialized it uses sort.Strings.
- SortingFunc func([]string)
-
- // Disables the truncation of the level text to 4 characters.
- DisableLevelTruncation bool
-
- // PadLevelText Adds padding the level text so that all the levels output at the same length
- // PadLevelText is a superset of the DisableLevelTruncation option
- PadLevelText bool
-
- // QuoteEmptyFields will wrap empty fields in quotes if true
- QuoteEmptyFields bool
-
- // Whether the logger's out is to a terminal
- isTerminal bool
-
- // FieldMap allows users to customize the names of keys for default fields.
- // As an example:
- // formatter := &TextFormatter{
- // FieldMap: FieldMap{
- // FieldKeyTime: "@timestamp",
- // FieldKeyLevel: "@level",
- // FieldKeyMsg: "@message"}}
- FieldMap FieldMap
-
- // CallerPrettyfier can be set by the user to modify the content
- // of the function and file keys in the data when ReportCaller is
- // activated. If any of the returned value is the empty string the
- // corresponding key will be removed from fields.
- CallerPrettyfier func(*runtime.Frame) (function string, file string)
-
- terminalInitOnce sync.Once
-
- // The max length of the level text, generated dynamically on init
- levelTextMaxLength int
-}
-
-func (f *TextFormatter) init(entry *Entry) {
- if entry.Logger != nil {
- f.isTerminal = checkIfTerminal(entry.Logger.Out)
- }
- // Get the max length of the level text
- for _, level := range AllLevels {
- levelTextLength := utf8.RuneCount([]byte(level.String()))
- if levelTextLength > f.levelTextMaxLength {
- f.levelTextMaxLength = levelTextLength
- }
- }
-}
-
-func (f *TextFormatter) isColored() bool {
- isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
-
- if f.EnvironmentOverrideColors {
- switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); {
- case ok && force != "0":
- isColored = true
- case ok && force == "0", os.Getenv("CLICOLOR") == "0":
- isColored = false
- }
- }
-
- return isColored && !f.DisableColors
-}
-
-// Format renders a single log entry
-func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
- data := make(Fields)
- for k, v := range entry.Data {
- data[k] = v
- }
- prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
- keys := make([]string, 0, len(data))
- for k := range data {
- keys = append(keys, k)
- }
-
- var funcVal, fileVal string
-
- fixedKeys := make([]string, 0, 4+len(data))
- if !f.DisableTimestamp {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
- }
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
- if entry.Message != "" {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
- }
- if entry.err != "" {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
- }
- if entry.HasCaller() {
- if f.CallerPrettyfier != nil {
- funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
- } else {
- funcVal = entry.Caller.Function
- fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
- }
-
- if funcVal != "" {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
- }
- if fileVal != "" {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
- }
- }
-
- if !f.DisableSorting {
- if f.SortingFunc == nil {
- sort.Strings(keys)
- fixedKeys = append(fixedKeys, keys...)
- } else {
- if !f.isColored() {
- fixedKeys = append(fixedKeys, keys...)
- f.SortingFunc(fixedKeys)
- } else {
- f.SortingFunc(keys)
- }
- }
- } else {
- fixedKeys = append(fixedKeys, keys...)
- }
-
- var b *bytes.Buffer
- if entry.Buffer != nil {
- b = entry.Buffer
- } else {
- b = &bytes.Buffer{}
- }
-
- f.terminalInitOnce.Do(func() { f.init(entry) })
-
- timestampFormat := f.TimestampFormat
- if timestampFormat == "" {
- timestampFormat = defaultTimestampFormat
- }
- if f.isColored() {
- f.printColored(b, entry, keys, data, timestampFormat)
- } else {
-
- for _, key := range fixedKeys {
- var value interface{}
- switch {
- case key == f.FieldMap.resolve(FieldKeyTime):
- value = entry.Time.Format(timestampFormat)
- case key == f.FieldMap.resolve(FieldKeyLevel):
- value = entry.Level.String()
- case key == f.FieldMap.resolve(FieldKeyMsg):
- value = entry.Message
- case key == f.FieldMap.resolve(FieldKeyLogrusError):
- value = entry.err
- case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
- value = funcVal
- case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
- value = fileVal
- default:
- value = data[key]
- }
- f.appendKeyValue(b, key, value)
- }
- }
-
- b.WriteByte('\n')
- return b.Bytes(), nil
-}
-
-func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
- var levelColor int
- switch entry.Level {
- case DebugLevel, TraceLevel:
- levelColor = gray
- case WarnLevel:
- levelColor = yellow
- case ErrorLevel, FatalLevel, PanicLevel:
- levelColor = red
- case InfoLevel:
- levelColor = blue
- default:
- levelColor = blue
- }
-
- levelText := strings.ToUpper(entry.Level.String())
- if !f.DisableLevelTruncation && !f.PadLevelText {
- levelText = levelText[0:4]
- }
- if f.PadLevelText {
- // Generates the format string used in the next line, for example "%-6s" or "%-7s".
- // Based on the max level text length.
- formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s"
- // Formats the level text by appending spaces up to the max length, for example:
- // - "INFO "
- // - "WARNING"
- levelText = fmt.Sprintf(formatString, levelText)
- }
-
- // Remove a single newline if it already exists in the message to keep
- // the behavior of logrus text_formatter the same as the stdlib log package
- entry.Message = strings.TrimSuffix(entry.Message, "\n")
-
- caller := ""
- if entry.HasCaller() {
- funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
- fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
-
- if f.CallerPrettyfier != nil {
- funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
- }
-
- if fileVal == "" {
- caller = funcVal
- } else if funcVal == "" {
- caller = fileVal
- } else {
- caller = fileVal + " " + funcVal
- }
- }
-
- switch {
- case f.DisableTimestamp:
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
- case !f.FullTimestamp:
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
- default:
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
- }
- for _, k := range keys {
- v := data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
- f.appendValue(b, v)
- }
-}
-
-func (f *TextFormatter) needsQuoting(text string) bool {
- if f.ForceQuote {
- return true
- }
- if f.QuoteEmptyFields && len(text) == 0 {
- return true
- }
- if f.DisableQuote {
- return false
- }
- for _, ch := range text {
- if !((ch >= 'a' && ch <= 'z') ||
- (ch >= 'A' && ch <= 'Z') ||
- (ch >= '0' && ch <= '9') ||
- ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
- return true
- }
- }
- return false
-}
-
-func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
- if b.Len() > 0 {
- b.WriteByte(' ')
- }
- b.WriteString(key)
- b.WriteByte('=')
- f.appendValue(b, value)
-}
-
-func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
- stringVal, ok := value.(string)
- if !ok {
- stringVal = fmt.Sprint(value)
- }
-
- if !f.needsQuoting(stringVal) {
- b.WriteString(stringVal)
- } else {
- b.WriteString(fmt.Sprintf("%q", stringVal))
- }
-}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
deleted file mode 100644
index 72e8e3a..0000000
--- a/vendor/github.com/sirupsen/logrus/writer.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package logrus
-
-import (
- "bufio"
- "io"
- "runtime"
-)
-
-// Writer at INFO level. See WriterLevel for details.
-func (logger *Logger) Writer() *io.PipeWriter {
- return logger.WriterLevel(InfoLevel)
-}
-
-// WriterLevel returns an io.Writer that can be used to write arbitrary text to
-// the logger at the given log level. Each line written to the writer will be
-// printed in the usual way using formatters and hooks. The writer is part of an
-// io.Pipe and it is the callers responsibility to close the writer when done.
-// This can be used to override the standard library logger easily.
-func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
- return NewEntry(logger).WriterLevel(level)
-}
-
-func (entry *Entry) Writer() *io.PipeWriter {
- return entry.WriterLevel(InfoLevel)
-}
-
-func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
- reader, writer := io.Pipe()
-
- var printFunc func(args ...interface{})
-
- switch level {
- case TraceLevel:
- printFunc = entry.Trace
- case DebugLevel:
- printFunc = entry.Debug
- case InfoLevel:
- printFunc = entry.Info
- case WarnLevel:
- printFunc = entry.Warn
- case ErrorLevel:
- printFunc = entry.Error
- case FatalLevel:
- printFunc = entry.Fatal
- case PanicLevel:
- printFunc = entry.Panic
- default:
- printFunc = entry.Print
- }
-
- go entry.writerScanner(reader, printFunc)
- runtime.SetFinalizer(writer, writerFinalizer)
-
- return writer
-}
-
-func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
- scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- printFunc(scanner.Text())
- }
- if err := scanner.Err(); err != nil {
- entry.Errorf("Error while reading from Writer: %s", err)
- }
- reader.Close()
-}
-
-func writerFinalizer(writer *io.PipeWriter) {
- writer.Close()
-}
diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go
deleted file mode 100644
index 3d6f516..0000000
--- a/vendor/golang.org/x/net/internal/socks/client.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package socks
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "strconv"
- "time"
-)
-
-var (
- noDeadline = time.Time{}
- aLongTimeAgo = time.Unix(1, 0)
-)
-
-func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) {
- host, port, err := splitHostPort(address)
- if err != nil {
- return nil, err
- }
- if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() {
- c.SetDeadline(deadline)
- defer c.SetDeadline(noDeadline)
- }
- if ctx != context.Background() {
- errCh := make(chan error, 1)
- done := make(chan struct{})
- defer func() {
- close(done)
- if ctxErr == nil {
- ctxErr = <-errCh
- }
- }()
- go func() {
- select {
- case <-ctx.Done():
- c.SetDeadline(aLongTimeAgo)
- errCh <- ctx.Err()
- case <-done:
- errCh <- nil
- }
- }()
- }
-
- b := make([]byte, 0, 6+len(host)) // the size here is just an estimate
- b = append(b, Version5)
- if len(d.AuthMethods) == 0 || d.Authenticate == nil {
- b = append(b, 1, byte(AuthMethodNotRequired))
- } else {
- ams := d.AuthMethods
- if len(ams) > 255 {
- return nil, errors.New("too many authentication methods")
- }
- b = append(b, byte(len(ams)))
- for _, am := range ams {
- b = append(b, byte(am))
- }
- }
- if _, ctxErr = c.Write(b); ctxErr != nil {
- return
- }
-
- if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil {
- return
- }
- if b[0] != Version5 {
- return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
- }
- am := AuthMethod(b[1])
- if am == AuthMethodNoAcceptableMethods {
- return nil, errors.New("no acceptable authentication methods")
- }
- if d.Authenticate != nil {
- if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil {
- return
- }
- }
-
- b = b[:0]
- b = append(b, Version5, byte(d.cmd), 0)
- if ip := net.ParseIP(host); ip != nil {
- if ip4 := ip.To4(); ip4 != nil {
- b = append(b, AddrTypeIPv4)
- b = append(b, ip4...)
- } else if ip6 := ip.To16(); ip6 != nil {
- b = append(b, AddrTypeIPv6)
- b = append(b, ip6...)
- } else {
- return nil, errors.New("unknown address type")
- }
- } else {
- if len(host) > 255 {
- return nil, errors.New("FQDN too long")
- }
- b = append(b, AddrTypeFQDN)
- b = append(b, byte(len(host)))
- b = append(b, host...)
- }
- b = append(b, byte(port>>8), byte(port))
- if _, ctxErr = c.Write(b); ctxErr != nil {
- return
- }
-
- if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil {
- return
- }
- if b[0] != Version5 {
- return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0])))
- }
- if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded {
- return nil, errors.New("unknown error " + cmdErr.String())
- }
- if b[2] != 0 {
- return nil, errors.New("non-zero reserved field")
- }
- l := 2
- var a Addr
- switch b[3] {
- case AddrTypeIPv4:
- l += net.IPv4len
- a.IP = make(net.IP, net.IPv4len)
- case AddrTypeIPv6:
- l += net.IPv6len
- a.IP = make(net.IP, net.IPv6len)
- case AddrTypeFQDN:
- if _, err := io.ReadFull(c, b[:1]); err != nil {
- return nil, err
- }
- l += int(b[0])
- default:
- return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3])))
- }
- if cap(b) < l {
- b = make([]byte, l)
- } else {
- b = b[:l]
- }
- if _, ctxErr = io.ReadFull(c, b); ctxErr != nil {
- return
- }
- if a.IP != nil {
- copy(a.IP, b)
- } else {
- a.Name = string(b[:len(b)-2])
- }
- a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1])
- return &a, nil
-}
-
-func splitHostPort(address string) (string, int, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return "", 0, err
- }
- portnum, err := strconv.Atoi(port)
- if err != nil {
- return "", 0, err
- }
- if 1 > portnum || portnum > 0xffff {
- return "", 0, errors.New("port number out of range " + port)
- }
- return host, portnum, nil
-}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
deleted file mode 100644
index 97db234..0000000
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package socks provides a SOCKS version 5 client implementation.
-//
-// SOCKS protocol version 5 is defined in RFC 1928.
-// Username/Password authentication for SOCKS version 5 is defined in
-// RFC 1929.
-package socks
-
-import (
- "context"
- "errors"
- "io"
- "net"
- "strconv"
-)
-
-// A Command represents a SOCKS command.
-type Command int
-
-func (cmd Command) String() string {
- switch cmd {
- case CmdConnect:
- return "socks connect"
- case cmdBind:
- return "socks bind"
- default:
- return "socks " + strconv.Itoa(int(cmd))
- }
-}
-
-// An AuthMethod represents a SOCKS authentication method.
-type AuthMethod int
-
-// A Reply represents a SOCKS command reply code.
-type Reply int
-
-func (code Reply) String() string {
- switch code {
- case StatusSucceeded:
- return "succeeded"
- case 0x01:
- return "general SOCKS server failure"
- case 0x02:
- return "connection not allowed by ruleset"
- case 0x03:
- return "network unreachable"
- case 0x04:
- return "host unreachable"
- case 0x05:
- return "connection refused"
- case 0x06:
- return "TTL expired"
- case 0x07:
- return "command not supported"
- case 0x08:
- return "address type not supported"
- default:
- return "unknown code: " + strconv.Itoa(int(code))
- }
-}
-
-// Wire protocol constants.
-const (
- Version5 = 0x05
-
- AddrTypeIPv4 = 0x01
- AddrTypeFQDN = 0x03
- AddrTypeIPv6 = 0x04
-
- CmdConnect Command = 0x01 // establishes an active-open forward proxy connection
- cmdBind Command = 0x02 // establishes a passive-open forward proxy connection
-
- AuthMethodNotRequired AuthMethod = 0x00 // no authentication required
- AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password
- AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods
-
- StatusSucceeded Reply = 0x00
-)
-
-// An Addr represents a SOCKS-specific address.
-// Either Name or IP is used exclusively.
-type Addr struct {
- Name string // fully-qualified domain name
- IP net.IP
- Port int
-}
-
-func (a *Addr) Network() string { return "socks" }
-
-func (a *Addr) String() string {
- if a == nil {
- return "<nil>"
- }
- port := strconv.Itoa(a.Port)
- if a.IP == nil {
- return net.JoinHostPort(a.Name, port)
- }
- return net.JoinHostPort(a.IP.String(), port)
-}
-
-// A Conn represents a forward proxy connection.
-type Conn struct {
- net.Conn
-
- boundAddr net.Addr
-}
-
-// BoundAddr returns the address assigned by the proxy server for
-// connecting to the command target address from the proxy server.
-func (c *Conn) BoundAddr() net.Addr {
- if c == nil {
- return nil
- }
- return c.boundAddr
-}
-
-// A Dialer holds SOCKS-specific options.
-type Dialer struct {
- cmd Command // either CmdConnect or cmdBind
- proxyNetwork string // network between a proxy server and a client
- proxyAddress string // proxy server address
-
- // ProxyDial specifies the optional dial function for
- // establishing the transport connection.
- ProxyDial func(context.Context, string, string) (net.Conn, error)
-
- // AuthMethods specifies the list of request authentication
- // methods.
- // If empty, SOCKS client requests only AuthMethodNotRequired.
- AuthMethods []AuthMethod
-
- // Authenticate specifies the optional authentication
- // function. It must be non-nil when AuthMethods is not empty.
- // It must return an error when the authentication is failed.
- Authenticate func(context.Context, io.ReadWriter, AuthMethod) error
-}
-
-// DialContext connects to the provided address on the provided
-// network.
-//
-// The returned error value may be a net.OpError. When the Op field of
-// net.OpError contains "socks", the Source field contains a proxy
-// server address and the Addr field contains a command target
-// address.
-//
-// See func Dial of the net package of standard library for a
-// description of the network and address parameters.
-func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if ctx == nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
- }
- var err error
- var c net.Conn
- if d.ProxyDial != nil {
- c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress)
- } else {
- var dd net.Dialer
- c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress)
- }
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- a, err := d.connect(ctx, c, address)
- if err != nil {
- c.Close()
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- return &Conn{Conn: c, boundAddr: a}, nil
-}
-
-// DialWithConn initiates a connection from SOCKS server to the target
-// network and address using the connection c that is already
-// connected to the SOCKS server.
-//
-// It returns the connection's local address assigned by the SOCKS
-// server.
-func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if ctx == nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")}
- }
- a, err := d.connect(ctx, c, address)
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- return a, nil
-}
-
-// Dial connects to the provided address on the provided network.
-//
-// Unlike DialContext, it returns a raw transport connection instead
-// of a forward proxy connection.
-//
-// Deprecated: Use DialContext or DialWithConn instead.
-func (d *Dialer) Dial(network, address string) (net.Conn, error) {
- if err := d.validateTarget(network, address); err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- var err error
- var c net.Conn
- if d.ProxyDial != nil {
- c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress)
- } else {
- c, err = net.Dial(d.proxyNetwork, d.proxyAddress)
- }
- if err != nil {
- proxy, dst, _ := d.pathAddrs(address)
- return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err}
- }
- if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil {
- c.Close()
- return nil, err
- }
- return c, nil
-}
-
-func (d *Dialer) validateTarget(network, address string) error {
- switch network {
- case "tcp", "tcp6", "tcp4":
- default:
- return errors.New("network not implemented")
- }
- switch d.cmd {
- case CmdConnect, cmdBind:
- default:
- return errors.New("command not implemented")
- }
- return nil
-}
-
-func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) {
- for i, s := range []string{d.proxyAddress, address} {
- host, port, err := splitHostPort(s)
- if err != nil {
- return nil, nil, err
- }
- a := &Addr{Port: port}
- a.IP = net.ParseIP(host)
- if a.IP == nil {
- a.Name = host
- }
- if i == 0 {
- proxy = a
- } else {
- dst = a
- }
- }
- return
-}
-
-// NewDialer returns a new Dialer that dials through the provided
-// proxy server's network and address.
-func NewDialer(network, address string) *Dialer {
- return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect}
-}
-
-const (
- authUsernamePasswordVersion = 0x01
- authStatusSucceeded = 0x00
-)
-
-// UsernamePassword are the credentials for the username/password
-// authentication method.
-type UsernamePassword struct {
- Username string
- Password string
-}
-
-// Authenticate authenticates a pair of username and password with the
-// proxy server.
-func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error {
- switch auth {
- case AuthMethodNotRequired:
- return nil
- case AuthMethodUsernamePassword:
- if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) == 0 || len(up.Password) > 255 {
- return errors.New("invalid username/password")
- }
- b := []byte{authUsernamePasswordVersion}
- b = append(b, byte(len(up.Username)))
- b = append(b, up.Username...)
- b = append(b, byte(len(up.Password)))
- b = append(b, up.Password...)
- // TODO(mikio): handle IO deadlines and cancelation if
- // necessary
- if _, err := rw.Write(b); err != nil {
- return err
- }
- if _, err := io.ReadFull(rw, b[:2]); err != nil {
- return err
- }
- if b[0] != authUsernamePasswordVersion {
- return errors.New("invalid username/password version")
- }
- if b[1] != authStatusSucceeded {
- return errors.New("username/password authentication failed")
- }
- return nil
- }
- return errors.New("unsupported authentication method " + strconv.Itoa(int(auth)))
-}
diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go
deleted file mode 100644
index 811c2e4..0000000
--- a/vendor/golang.org/x/net/proxy/dial.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-)
-
-// A ContextDialer dials using a context.
-type ContextDialer interface {
- DialContext(ctx context.Context, network, address string) (net.Conn, error)
-}
-
-// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment.
-//
-// The passed ctx is only used for returning the Conn, not the lifetime of the Conn.
-//
-// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer
-// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout.
-//
-// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
-func Dial(ctx context.Context, network, address string) (net.Conn, error) {
- d := FromEnvironment()
- if xd, ok := d.(ContextDialer); ok {
- return xd.DialContext(ctx, network, address)
- }
- return dialContext(ctx, d, network, address)
-}
-
-// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout
-// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed.
-func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) {
- var (
- conn net.Conn
- done = make(chan struct{}, 1)
- err error
- )
- go func() {
- conn, err = d.Dial(network, address)
- close(done)
- if conn != nil && ctx.Err() != nil {
- conn.Close()
- }
- }()
- select {
- case <-ctx.Done():
- err = ctx.Err()
- case <-done:
- }
- return conn, err
-}
diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go
deleted file mode 100644
index 3d66bde..0000000
--- a/vendor/golang.org/x/net/proxy/direct.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-)
-
-type direct struct{}
-
-// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext.
-var Direct = direct{}
-
-var (
- _ Dialer = Direct
- _ ContextDialer = Direct
-)
-
-// Dial directly invokes net.Dial with the supplied parameters.
-func (direct) Dial(network, addr string) (net.Conn, error) {
- return net.Dial(network, addr)
-}
-
-// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters.
-func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) {
- var d net.Dialer
- return d.DialContext(ctx, network, addr)
-}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
deleted file mode 100644
index 573fe79..0000000
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
- "strings"
-)
-
-// A PerHost directs connections to a default Dialer unless the host name
-// requested matches one of a number of exceptions.
-type PerHost struct {
- def, bypass Dialer
-
- bypassNetworks []*net.IPNet
- bypassIPs []net.IP
- bypassZones []string
- bypassHosts []string
-}
-
-// NewPerHost returns a PerHost Dialer that directs connections to either
-// defaultDialer or bypass, depending on whether the connection matches one of
-// the configured rules.
-func NewPerHost(defaultDialer, bypass Dialer) *PerHost {
- return &PerHost{
- def: defaultDialer,
- bypass: bypass,
- }
-}
-
-// Dial connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
-
- return p.dialerForRequest(host).Dial(network, addr)
-}
-
-// DialContext connects to the address addr on the given network through either
-// defaultDialer or bypass.
-func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) {
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return nil, err
- }
- d := p.dialerForRequest(host)
- if x, ok := d.(ContextDialer); ok {
- return x.DialContext(ctx, network, addr)
- }
- return dialContext(ctx, d, network, addr)
-}
-
-func (p *PerHost) dialerForRequest(host string) Dialer {
- if ip := net.ParseIP(host); ip != nil {
- for _, net := range p.bypassNetworks {
- if net.Contains(ip) {
- return p.bypass
- }
- }
- for _, bypassIP := range p.bypassIPs {
- if bypassIP.Equal(ip) {
- return p.bypass
- }
- }
- return p.def
- }
-
- for _, zone := range p.bypassZones {
- if strings.HasSuffix(host, zone) {
- return p.bypass
- }
- if host == zone[1:] {
- // For a zone ".example.com", we match "example.com"
- // too.
- return p.bypass
- }
- }
- for _, bypassHost := range p.bypassHosts {
- if bypassHost == host {
- return p.bypass
- }
- }
- return p.def
-}
-
-// AddFromString parses a string that contains comma-separated values
-// specifying hosts that should use the bypass proxy. Each value is either an
-// IP address, a CIDR range, a zone (*.example.com) or a host name
-// (localhost). A best effort is made to parse the string and errors are
-// ignored.
-func (p *PerHost) AddFromString(s string) {
- hosts := strings.Split(s, ",")
- for _, host := range hosts {
- host = strings.TrimSpace(host)
- if len(host) == 0 {
- continue
- }
- if strings.Contains(host, "/") {
- // We assume that it's a CIDR address like 127.0.0.0/8
- if _, net, err := net.ParseCIDR(host); err == nil {
- p.AddNetwork(net)
- }
- continue
- }
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
- continue
- }
- if strings.HasPrefix(host, "*.") {
- p.AddZone(host[1:])
- continue
- }
- p.AddHost(host)
- }
-}
-
-// AddIP specifies an IP address that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match an IP.
-func (p *PerHost) AddIP(ip net.IP) {
- p.bypassIPs = append(p.bypassIPs, ip)
-}
-
-// AddNetwork specifies an IP range that will use the bypass proxy. Note that
-// this will only take effect if a literal IP address is dialed. A connection
-// to a named host will never match.
-func (p *PerHost) AddNetwork(net *net.IPNet) {
- p.bypassNetworks = append(p.bypassNetworks, net)
-}
-
-// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
-// "example.com" matches "example.com" and all of its subdomains.
-func (p *PerHost) AddZone(zone string) {
- if strings.HasSuffix(zone, ".") {
- zone = zone[:len(zone)-1]
- }
- if !strings.HasPrefix(zone, ".") {
- zone = "." + zone
- }
- p.bypassZones = append(p.bypassZones, zone)
-}
-
-// AddHost specifies a host name that will use the bypass proxy.
-func (p *PerHost) AddHost(host string) {
- if strings.HasSuffix(host, ".") {
- host = host[:len(host)-1]
- }
- p.bypassHosts = append(p.bypassHosts, host)
-}
diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go
deleted file mode 100644
index 9ff4b9a..0000000
--- a/vendor/golang.org/x/net/proxy/proxy.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proxy provides support for a variety of protocols to proxy network
-// data.
-package proxy // import "golang.org/x/net/proxy"
-
-import (
- "errors"
- "net"
- "net/url"
- "os"
- "sync"
-)
-
-// A Dialer is a means to establish a connection.
-// Custom dialers should also implement ContextDialer.
-type Dialer interface {
- // Dial connects to the given address via the proxy.
- Dial(network, addr string) (c net.Conn, err error)
-}
-
-// Auth contains authentication parameters that specific Dialers may require.
-type Auth struct {
- User, Password string
-}
-
-// FromEnvironment returns the dialer specified by the proxy-related
-// variables in the environment and makes underlying connections
-// directly.
-func FromEnvironment() Dialer {
- return FromEnvironmentUsing(Direct)
-}
-
-// FromEnvironmentUsing returns the dialer specify by the proxy-related
-// variables in the environment and makes underlying connections
-// using the provided forwarding Dialer (for instance, a *net.Dialer
-// with desired configuration).
-func FromEnvironmentUsing(forward Dialer) Dialer {
- allProxy := allProxyEnv.Get()
- if len(allProxy) == 0 {
- return forward
- }
-
- proxyURL, err := url.Parse(allProxy)
- if err != nil {
- return forward
- }
- proxy, err := FromURL(proxyURL, forward)
- if err != nil {
- return forward
- }
-
- noProxy := noProxyEnv.Get()
- if len(noProxy) == 0 {
- return proxy
- }
-
- perHost := NewPerHost(proxy, forward)
- perHost.AddFromString(noProxy)
- return perHost
-}
-
-// proxySchemes is a map from URL schemes to a function that creates a Dialer
-// from a URL with such a scheme.
-var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error)
-
-// RegisterDialerType takes a URL scheme and a function to generate Dialers from
-// a URL with that scheme and a forwarding Dialer. Registered schemes are used
-// by FromURL.
-func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) {
- if proxySchemes == nil {
- proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error))
- }
- proxySchemes[scheme] = f
-}
-
-// FromURL returns a Dialer given a URL specification and an underlying
-// Dialer for it to make network requests.
-func FromURL(u *url.URL, forward Dialer) (Dialer, error) {
- var auth *Auth
- if u.User != nil {
- auth = new(Auth)
- auth.User = u.User.Username()
- if p, ok := u.User.Password(); ok {
- auth.Password = p
- }
- }
-
- switch u.Scheme {
- case "socks5", "socks5h":
- addr := u.Hostname()
- port := u.Port()
- if port == "" {
- port = "1080"
- }
- return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward)
- }
-
- // If the scheme doesn't match any of the built-in schemes, see if it
- // was registered by another package.
- if proxySchemes != nil {
- if f, ok := proxySchemes[u.Scheme]; ok {
- return f(u, forward)
- }
- }
-
- return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
-}
-
-var (
- allProxyEnv = &envOnce{
- names: []string{"ALL_PROXY", "all_proxy"},
- }
- noProxyEnv = &envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-// envOnce looks up an environment variable (optionally by multiple
-// names) once. It mitigates expensive lookups on some platforms
-// (e.g. Windows).
-// (Borrowed from net/http/transport.go)
-type envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// reset is used by tests
-func (e *envOnce) reset() {
- e.once = sync.Once{}
- e.val = ""
-}
diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go
deleted file mode 100644
index c91651f..0000000
--- a/vendor/golang.org/x/net/proxy/socks5.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proxy
-
-import (
- "context"
- "net"
-
- "golang.org/x/net/internal/socks"
-)
-
-// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given
-// address with an optional username and password.
-// See RFC 1928 and RFC 1929.
-func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) {
- d := socks.NewDialer(network, address)
- if forward != nil {
- if f, ok := forward.(ContextDialer); ok {
- d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
- return f.DialContext(ctx, network, address)
- }
- } else {
- d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) {
- return dialContext(ctx, forward, network, address)
- }
- }
- }
- if auth != nil {
- up := socks.UsernamePassword{
- Username: auth.User,
- Password: auth.Password,
- }
- d.AuthMethods = []socks.AuthMethod{
- socks.AuthMethodNotRequired,
- socks.AuthMethodUsernamePassword,
- }
- d.Authenticate = up.Authenticate
- }
- return d, nil
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bf4f0a4..441ec68 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -74,45 +74,17 @@
github.com/cncf/xds/go/xds/type/v3
# github.com/containerd/containerd v1.5.8
## explicit; go 1.16
-github.com/containerd/containerd/errdefs
-github.com/containerd/containerd/log
-github.com/containerd/containerd/platforms
# github.com/danjacques/gofslock v0.0.0-20220131014315-6e321f4509c8
## explicit; go 1.0
github.com/danjacques/gofslock/fslock
# github.com/docker/distribution v2.7.1+incompatible
## explicit
-github.com/docker/distribution/digestset
-github.com/docker/distribution/reference
-github.com/docker/distribution/registry/api/errcode
# github.com/docker/docker v20.10.12+incompatible
## explicit
-github.com/docker/docker/api
-github.com/docker/docker/api/types
-github.com/docker/docker/api/types/blkiodev
-github.com/docker/docker/api/types/container
-github.com/docker/docker/api/types/events
-github.com/docker/docker/api/types/filters
-github.com/docker/docker/api/types/image
-github.com/docker/docker/api/types/mount
-github.com/docker/docker/api/types/network
-github.com/docker/docker/api/types/registry
-github.com/docker/docker/api/types/strslice
-github.com/docker/docker/api/types/swarm
-github.com/docker/docker/api/types/swarm/runtime
-github.com/docker/docker/api/types/time
-github.com/docker/docker/api/types/versions
-github.com/docker/docker/api/types/volume
-github.com/docker/docker/client
-github.com/docker/docker/errdefs
# github.com/docker/go-connections v0.4.0
## explicit
-github.com/docker/go-connections/nat
-github.com/docker/go-connections/sockets
-github.com/docker/go-connections/tlsconfig
# github.com/docker/go-units v0.4.0
## explicit
-github.com/docker/go-units
# github.com/envoyproxy/go-control-plane v0.10.1
## explicit; go 1.11
github.com/envoyproxy/go-control-plane/envoy/admin/v3
@@ -171,7 +143,6 @@
github.com/fsnotify/fsnotify
# github.com/gogo/protobuf v1.3.2
## explicit; go 1.15
-github.com/gogo/protobuf/proto
# github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
## explicit
github.com/golang-collections/collections/set
@@ -264,11 +235,8 @@
github.com/op/go-logging
# github.com/opencontainers/go-digest v1.0.0
## explicit; go 1.13
-github.com/opencontainers/go-digest
# github.com/opencontainers/image-spec v1.0.2
## explicit
-github.com/opencontainers/image-spec/specs-go
-github.com/opencontainers/image-spec/specs-go/v1
# github.com/pborman/uuid v1.2.1
## explicit
github.com/pborman/uuid
@@ -300,7 +268,6 @@
## explicit; go 1.16
# github.com/sirupsen/logrus v1.8.1
## explicit; go 1.13
-github.com/sirupsen/logrus
# github.com/smartystreets/assertions v1.2.1
## explicit; go 1.17
# github.com/texttheater/golang-levenshtein v1.0.1
@@ -452,9 +419,7 @@
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
-golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
-golang.org/x/net/proxy
golang.org/x/net/trace
# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
## explicit; go 1.11