Revert "mod: do: bump google.golang.org/api from 0.153.0 to 0.156.0"
diff --git a/go.mod b/go.mod
index 46546b2..8e4bd4e 100644
--- a/go.mod
+++ b/go.mod
@@ -16,22 +16,22 @@
github.com/prometheus/client_golang v1.17.0
github.com/stretchr/testify v1.8.4
github.com/ulikunitz/xz v0.5.11
- golang.org/x/net v0.20.0
- golang.org/x/oauth2 v0.16.0
+ golang.org/x/net v0.19.0
+ golang.org/x/oauth2 v0.15.0
golang.org/x/perf v0.0.0-20230221235046-aebcfb61e84c
- golang.org/x/sync v0.6.0
- golang.org/x/sys v0.16.0
+ golang.org/x/sync v0.5.0
+ golang.org/x/sys v0.15.0
golang.org/x/tools v0.14.0
- google.golang.org/api v0.156.0
+ google.golang.org/api v0.153.0
google.golang.org/appengine/v2 v2.0.5
- google.golang.org/genproto v0.0.0-20231212172506-995d672761c0
+ google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17
gopkg.in/yaml.v3 v3.0.1
)
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
- cloud.google.com/go v0.111.0 // indirect
+ cloud.google.com/go v0.110.10 // indirect
cloud.google.com/go/compute v1.23.3 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
@@ -73,14 +73,12 @@
github.com/ettle/strcase v0.1.1 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fatih/structtag v1.2.0 // indirect
- github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/firefart/nonamedreturns v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
github.com/ghostiam/protogetter v0.2.3 // indirect
github.com/go-critic/go-critic v0.9.0 // indirect
- github.com/go-logr/logr v1.3.0 // indirect
- github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
github.com/go-toolsmith/astequal v1.1.0 // indirect
@@ -105,7 +103,7 @@
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/safehtml v0.1.0 // indirect
- github.com/google/uuid v1.5.0 // indirect
+ github.com/google/uuid v1.4.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect
@@ -203,26 +201,22 @@
gitlab.com/bosi/decorder v0.4.1 // indirect
go-simpler.org/sloglint v0.1.2 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
- go.opentelemetry.io/otel v1.21.0 // indirect
- go.opentelemetry.io/otel/metric v1.21.0 // indirect
- go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.tmz.dev/musttag v0.7.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
- golang.org/x/crypto v0.18.0 // indirect
+ golang.org/x/crypto v0.16.0 // indirect
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.5.0 // indirect
- google.golang.org/appengine v1.6.8 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 // indirect
- google.golang.org/grpc v1.60.1 // indirect
- google.golang.org/protobuf v1.32.0 // indirect
+ golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
+ google.golang.org/appengine v1.6.7 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect
+ google.golang.org/grpc v1.59.0 // indirect
+ google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
honnef.co/go/tools v0.4.6 // indirect
diff --git a/go.sum b/go.sum
index 279834a..e1ec581 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
-cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
+cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
+cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -150,7 +150,6 @@
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
@@ -169,7 +168,6 @@
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=
github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0=
github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=
@@ -178,8 +176,8 @@
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
-github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
-github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
+github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=
github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
@@ -209,11 +207,7 @@
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -342,8 +336,8 @@
github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8=
github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
-github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
+github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
@@ -668,17 +662,6 @@
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
-go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
-go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
-go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
-go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
-go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
-go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
-go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s=
go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
@@ -698,8 +681,8 @@
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
-golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
+golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -803,8 +786,8 @@
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
-golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -816,8 +799,8 @@
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
-golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
+golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/perf v0.0.0-20230221235046-aebcfb61e84c h1:xR7iBj/IHpQuWFrftdwa1ttoVJKSDg+37NHA2XblmrU=
golang.org/x/perf v0.0.0-20230221235046-aebcfb61e84c/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -833,8 +816,8 @@
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -894,8 +877,8 @@
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
-golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -1000,6 +983,7 @@
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
@@ -1027,17 +1011,16 @@
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ=
-google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY=
+google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
+google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
-google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
-google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/appengine/v2 v2.0.5 h1:4C+F3Cd3L2nWEfSmFEZDPjQvDwL8T0YCeZBysZifP3k=
google.golang.org/appengine/v2 v2.0.5/go.mod h1:WoEXGoXNfa0mLvaH5sV3ZSGXwVmy8yf7Z1JKf3J3wLI=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -1076,12 +1059,12 @@
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
-google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o=
-google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917 h1:6G8oQ016D88m1xAKljMlBOOGWDZkes4kMhgGFlf8WcQ=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917/go.mod h1:xtjpI3tXFPP051KaWnhvxkiubL/6dJ18vLVf7q2pTOU=
+google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
+google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
+google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
+google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc=
google.golang.org/grpc v0.0.0-20170208002647-2a6bf6142e96/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1099,8 +1082,8 @@
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
-google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -1114,8 +1097,8 @@
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
-google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
index cb4a64b..38a67ba 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-individual.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json
@@ -1,7 +1,7 @@
{
"auth": "0.1.0",
"auth/oauth2adapt": "0.1.0",
- "bigquery": "1.57.1",
+ "bigquery": "1.57.0",
"bigtable": "1.20.0",
"datastore": "1.15.0",
"errorreporting": "0.3.0",
@@ -10,6 +10,6 @@
"profiler": "0.4.0",
"pubsub": "1.33.0",
"pubsublite": "1.8.1",
- "spanner": "1.53.0",
- "storage": "1.35.1"
+ "spanner": "1.51.0",
+ "storage": "1.34.0"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
index 74dd7d1..12fed1b 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json
@@ -1,134 +1,131 @@
{
- "accessapproval": "1.7.4",
- "accesscontextmanager": "1.8.4",
- "advisorynotifications": "1.2.3",
- "ai": "0.1.4",
- "aiplatform": "1.55.0",
- "alloydb": "1.7.0",
- "analytics": "0.21.6",
- "apigateway": "1.6.4",
- "apigeeconnect": "1.6.4",
- "apigeeregistry": "0.8.2",
- "apikeys": "1.1.4",
- "appengine": "1.8.4",
- "area120": "0.8.4",
- "artifactregistry": "1.14.6",
- "asset": "1.15.3",
- "assuredworkloads": "1.11.4",
- "automl": "1.13.4",
- "baremetalsolution": "1.2.3",
- "batch": "1.7.0",
- "beyondcorp": "1.0.3",
- "billing": "1.17.4",
- "binaryauthorization": "1.8.0",
- "certificatemanager": "1.7.4",
- "channel": "1.17.3",
- "cloudbuild": "1.15.0",
- "clouddms": "1.7.3",
- "cloudprofiler": "0.1.0",
- "cloudtasks": "1.12.4",
- "commerce": "0.1.3",
- "compute": "1.23.3",
+ "accessapproval": "1.7.3",
+ "accesscontextmanager": "1.8.3",
+ "advisorynotifications": "1.2.2",
+ "ai": "0.1.3",
+ "aiplatform": "1.51.2",
+ "alloydb": "1.6.2",
+ "analytics": "0.21.5",
+ "apigateway": "1.6.3",
+ "apigeeconnect": "1.6.3",
+ "apigeeregistry": "0.8.1",
+ "apikeys": "1.1.3",
+ "appengine": "1.8.3",
+ "area120": "0.8.3",
+ "artifactregistry": "1.14.5",
+ "asset": "1.15.2",
+ "assuredworkloads": "1.11.3",
+ "automl": "1.13.3",
+ "baremetalsolution": "1.2.2",
+ "batch": "1.6.2",
+ "beyondcorp": "1.0.2",
+ "billing": "1.17.3",
+ "binaryauthorization": "1.7.2",
+ "certificatemanager": "1.7.3",
+ "channel": "1.17.2",
+ "cloudbuild": "1.14.2",
+ "clouddms": "1.7.2",
+ "cloudtasks": "1.12.3",
+ "commerce": "0.1.2",
+ "compute": "1.23.2",
"compute/metadata": "0.2.3",
- "confidentialcomputing": "1.4.0",
- "config": "0.1.4",
- "contactcenterinsights": "1.12.1",
- "container": "1.29.0",
- "containeranalysis": "0.11.3",
- "datacatalog": "1.19.0",
- "dataflow": "0.9.4",
- "dataform": "0.9.1",
- "datafusion": "1.7.4",
- "datalabeling": "0.8.4",
- "dataplex": "1.12.0",
- "dataproc": "2.3.0",
- "dataqna": "0.8.4",
- "datastream": "1.10.3",
- "deploy": "1.15.0",
- "dialogflow": "1.46.0",
- "discoveryengine": "1.2.3",
- "dlp": "1.11.1",
- "documentai": "1.23.6",
- "domains": "0.9.4",
- "edgecontainer": "1.1.4",
- "edgenetwork": "0.1.0",
- "essentialcontacts": "1.6.5",
- "eventarc": "1.13.3",
- "filestore": "1.8.0",
- "functions": "1.15.4",
- "gkebackup": "1.3.4",
- "gkeconnect": "0.8.4",
- "gkehub": "0.14.4",
- "gkemulticloud": "1.0.3",
- "grafeas": "0.3.4",
- "gsuiteaddons": "1.6.4",
- "iam": "1.1.5",
- "iap": "1.9.3",
- "ids": "1.4.4",
- "iot": "1.7.4",
- "kms": "1.15.5",
- "language": "1.12.2",
- "lifesciences": "0.9.4",
- "longrunning": "0.5.4",
- "managedidentities": "1.6.4",
- "maps": "1.6.1",
- "mediatranslation": "0.8.4",
- "memcache": "1.10.4",
- "metastore": "1.13.3",
- "migrationcenter": "0.2.3",
- "monitoring": "1.16.3",
- "netapp": "0.2.3",
- "networkconnectivity": "1.14.3",
- "networkmanagement": "1.9.3",
- "networksecurity": "0.9.4",
- "notebooks": "1.11.2",
- "optimization": "1.6.2",
- "orchestration": "1.8.4",
- "orgpolicy": "1.11.4",
- "osconfig": "1.12.4",
- "oslogin": "1.12.2",
- "phishingprotection": "0.8.4",
- "policysimulator": "0.2.2",
- "policytroubleshooter": "1.10.2",
- "privatecatalog": "0.9.4",
- "rapidmigrationassessment": "1.0.4",
- "recaptchaenterprise": "2.9.0",
- "recommendationengine": "0.8.4",
- "recommender": "1.11.3",
- "redis": "1.14.1",
- "resourcemanager": "1.9.4",
- "resourcesettings": "1.6.4",
- "retail": "1.14.4",
- "run": "1.3.3",
- "scheduler": "1.10.5",
- "secretmanager": "1.11.4",
- "securesourcemanager": "0.1.2",
- "security": "1.15.4",
- "securitycenter": "1.24.3",
- "servicecontrol": "1.12.4",
- "servicedirectory": "1.11.3",
- "servicemanagement": "1.9.5",
- "serviceusage": "1.8.3",
- "shell": "1.7.4",
- "shopping": "0.2.2",
- "speech": "1.21.0",
- "storageinsights": "1.0.4",
- "storagetransfer": "1.10.3",
- "support": "1.0.3",
- "talent": "1.6.5",
- "telcoautomation": "0.1.1",
- "texttospeech": "1.7.4",
- "tpu": "1.6.4",
- "trace": "1.10.4",
- "translate": "1.9.3",
- "video": "1.20.3",
- "videointelligence": "1.11.4",
- "vision": "2.7.5",
- "vmmigration": "1.7.4",
- "vmwareengine": "1.0.3",
- "vpcaccess": "1.7.4",
- "webrisk": "1.9.4",
- "websecurityscanner": "1.6.4",
- "workflows": "1.12.3",
- "workstations": "0.5.3"
+ "confidentialcomputing": "1.3.2",
+ "config": "0.1.3",
+ "contactcenterinsights": "1.11.2",
+ "container": "1.27.0",
+ "containeranalysis": "0.11.2",
+ "datacatalog": "1.18.2",
+ "dataflow": "0.9.3",
+ "dataform": "0.9.0",
+ "datafusion": "1.7.3",
+ "datalabeling": "0.8.3",
+ "dataplex": "1.11.0",
+ "dataproc": "2.2.2",
+ "dataqna": "0.8.3",
+ "datastream": "1.10.2",
+ "deploy": "1.14.1",
+ "dialogflow": "1.44.2",
+ "discoveryengine": "1.2.2",
+ "dlp": "1.11.0",
+ "documentai": "1.23.4",
+ "domains": "0.9.3",
+ "edgecontainer": "1.1.3",
+ "essentialcontacts": "1.6.4",
+ "eventarc": "1.13.2",
+ "filestore": "1.7.3",
+ "functions": "1.15.3",
+ "gkebackup": "1.3.3",
+ "gkeconnect": "0.8.3",
+ "gkehub": "0.14.3",
+ "gkemulticloud": "1.0.2",
+ "grafeas": "0.3.3",
+ "gsuiteaddons": "1.6.3",
+ "iam": "1.1.4",
+ "iap": "1.9.2",
+ "ids": "1.4.3",
+ "iot": "1.7.3",
+ "kms": "1.15.4",
+ "language": "1.12.1",
+ "lifesciences": "0.9.3",
+ "longrunning": "0.5.3",
+ "managedidentities": "1.6.3",
+ "maps": "1.6.0",
+ "mediatranslation": "0.8.3",
+ "memcache": "1.10.3",
+ "metastore": "1.13.2",
+ "migrationcenter": "0.2.2",
+ "monitoring": "1.16.2",
+ "netapp": "0.2.2",
+ "networkconnectivity": "1.14.2",
+ "networkmanagement": "1.9.2",
+ "networksecurity": "0.9.3",
+ "notebooks": "1.11.1",
+ "optimization": "1.6.1",
+ "orchestration": "1.8.3",
+ "orgpolicy": "1.11.3",
+ "osconfig": "1.12.3",
+ "oslogin": "1.12.1",
+ "phishingprotection": "0.8.3",
+ "policysimulator": "0.2.1",
+ "policytroubleshooter": "1.10.1",
+ "privatecatalog": "0.9.3",
+ "rapidmigrationassessment": "1.0.3",
+ "recaptchaenterprise": "2.8.2",
+ "recommendationengine": "0.8.3",
+ "recommender": "1.11.2",
+ "redis": "1.14.0",
+ "resourcemanager": "1.9.3",
+ "resourcesettings": "1.6.3",
+ "retail": "1.14.3",
+ "run": "1.3.2",
+ "scheduler": "1.10.3",
+ "secretmanager": "1.11.3",
+ "securesourcemanager": "0.1.1",
+ "security": "1.15.3",
+ "securitycenter": "1.24.1",
+ "servicecontrol": "1.12.3",
+ "servicedirectory": "1.11.2",
+ "servicemanagement": "1.9.4",
+ "serviceusage": "1.8.2",
+ "shell": "1.7.3",
+ "shopping": "0.2.1",
+ "speech": "1.20.0",
+ "storageinsights": "1.0.3",
+ "storagetransfer": "1.10.2",
+ "support": "1.0.2",
+ "talent": "1.6.4",
+ "texttospeech": "1.7.3",
+ "tpu": "1.6.3",
+ "trace": "1.10.3",
+ "translate": "1.9.2",
+ "video": "1.20.2",
+ "videointelligence": "1.11.3",
+ "vision": "2.7.4",
+ "vmmigration": "1.7.3",
+ "vmwareengine": "1.0.2",
+ "vpcaccess": "1.7.3",
+ "webrisk": "1.9.3",
+ "websecurityscanner": "1.6.3",
+ "workflows": "1.12.2",
+ "workstations": "0.5.2"
}
diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json
index 6d4c016..f1afd65 100644
--- a/vendor/cloud.google.com/go/.release-please-manifest.json
+++ b/vendor/cloud.google.com/go/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.111.0"
+ ".": "0.110.10"
}
diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md
index acb4bc5..a418e2e 100644
--- a/vendor/cloud.google.com/go/CHANGES.md
+++ b/vendor/cloud.google.com/go/CHANGES.md
@@ -1,17 +1,5 @@
# Changes
-## [0.111.0](https://github.com/googleapis/google-cloud-go/compare/v0.110.10...v0.111.0) (2023-11-29)
-
-
-### Features
-
-* **internal/trace:** Add OpenTelemetry support ([#8655](https://github.com/googleapis/google-cloud-go/issues/8655)) ([7a46b54](https://github.com/googleapis/google-cloud-go/commit/7a46b5428f239871993d66be2c7c667121f60a6f)), refs [#2205](https://github.com/googleapis/google-cloud-go/issues/2205)
-
-
-### Bug Fixes
-
-* **all:** Bump google.golang.org/api to v0.149.0 ([#8959](https://github.com/googleapis/google-cloud-go/issues/8959)) ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
-
## [0.110.10](https://github.com/googleapis/google-cloud-go/compare/v0.110.9...v0.110.10) (2023-10-31)
diff --git a/vendor/cloud.google.com/go/go.work b/vendor/cloud.google.com/go/go.work
index 6080ac6..09b4ea6 100644
--- a/vendor/cloud.google.com/go/go.work
+++ b/vendor/cloud.google.com/go/go.work
@@ -32,7 +32,6 @@
./channel
./cloudbuild
./clouddms
- ./cloudprofiler
./cloudtasks
./commerce
./compute
@@ -59,7 +58,6 @@
./documentai
./domains
./edgecontainer
- ./edgenetwork
./errorreporting
./essentialcontacts
./eventarc
@@ -143,7 +141,6 @@
./storagetransfer
./support
./talent
- ./telcoautomation
./texttospeech
./tpu
./trace
diff --git a/vendor/cloud.google.com/go/go.work.sum b/vendor/cloud.google.com/go/go.work.sum
index 3cfd067..1d48c1d 100644
--- a/vendor/cloud.google.com/go/go.work.sum
+++ b/vendor/cloud.google.com/go/go.work.sum
@@ -1,48 +1,40 @@
cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0 h1:ugYJK/neZQtQeh2jc5xNoDFiMQojlAkoqJMRb7vTu1U=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.18.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 h1:tk85AYGwOf6VNtoOQi8w/kVDi2vmPxp3/OU2FsUpdcA=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.21.0 h1:OEgjQy1rH4Fbn5IpuI9d0uhLl+j6DkDvh9Q2Ucd6GK8=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.21.0/go.mod h1:EUfJ8lb3pjD8VasPPwqIvG2XVCE6DOT8tY5tcwbWA+A=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.45.0 h1:o/Nf55GfyLwGDaHkVAkRGgBXeExce73L6N9w2PZTB3k=
-github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.45.0/go.mod h1:qkFPtMouQjW5ugdHIOthiTbweVHUTqbS0Qsu55KqXks=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/cloudprober/cloudprober v0.12.8/go.mod h1:RBgsmwfacACvW/VX6/iNVEK+WnNXDhj/5WvII8N76KQ=
github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
-github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/googleapis/gax-go/v2 v2.9.1/go.mod h1:4FG3gMrVZlyMp5itSYKMU9z/lBE7+SbnUOvzH2HqbEY=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-go.opentelemetry.io/contrib/detectors/gcp v1.21.0 h1:K0k6FDSGs65yAka7HFKOqRHagqlMImHUUztERrCkpQc=
-go.opentelemetry.io/contrib/detectors/gcp v1.21.0/go.mod h1:G+Ci5NwilrCP1bj79ApA5wKQnKKBQI46as55sz+y0rQ=
-go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
-go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
-go.opentelemetry.io/otel/bridge/opencensus v0.43.0 h1:E/sf+2slCUb7wqh5FHwhdwKWTA+VXyMMAcFNlKVf4yw=
-go.opentelemetry.io/otel/bridge/opencensus v0.43.0/go.mod h1:2xuXI78Xp9cttLsJMF/Y08cJUqckLt0kLasn+vcHR5w=
-go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
-go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
-go.opentelemetry.io/otel/sdk v1.20.0 h1:5Jf6imeFZlZtKv9Qbo6qt2ZkmWtdWx/wzcCbNUlAWGM=
-go.opentelemetry.io/otel/sdk v1.20.0/go.mod h1:rmkSx1cZCm/tn16iWDn1GQbLtsW/LvsdEEFzCSRM6V0=
-go.opentelemetry.io/otel/sdk/metric v1.20.0 h1:5eD40l/H2CqdKmbSV7iht2KMK0faAIL2pVYzJOWobGk=
-go.opentelemetry.io/otel/sdk/metric v1.20.0/go.mod h1:AGvpC+YF/jblITiafMTYgvRBUiwi9hZf0EYE2E5XlS8=
-go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
-go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
-golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
-golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
-google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4=
-google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg=
+google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0=
+google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
+google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:CCviP9RmpZ1mxVr8MUjCnSiY09IbAXZxhLE6EhHIdPU=
+google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk=
+google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE=
+google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0=
+google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:SUBoKXbI1Efip18FClrQVGjWcyd0QZd8KkvdP34t7ww=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230629202037-9506855d4529/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230911183012-2d3300fd4832/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:qDbnxtViX5J6CvFbxeNUSzKgVlDLJ/6L+caxye9+Flo=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:itlFWGBbEyD32PUeJsTG8h8Wz7iJXfVK4gt1EJ+pAG0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0=
+google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index 46c4094..31d1720 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -619,16 +619,6 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/cloudprofiler/apiv2": {
- "api_shortname": "cloudprofiler",
- "distribution_name": "cloud.google.com/go/cloudprofiler/apiv2",
- "description": "Cloud Profiler API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudprofiler/latest/apiv2",
- "release_level": "preview",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/cloudtasks/apiv2": {
"api_shortname": "cloudtasks",
"distribution_name": "cloud.google.com/go/cloudtasks/apiv2",
@@ -1029,16 +1019,6 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/edgenetwork/apiv1": {
- "api_shortname": "edgenetwork",
- "distribution_name": "cloud.google.com/go/edgenetwork/apiv1",
- "description": "Distributed Cloud Edge Network API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1",
- "release_level": "preview",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/errorreporting": {
"api_shortname": "clouderrorreporting",
"distribution_name": "cloud.google.com/go/errorreporting",
@@ -1119,16 +1099,6 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/firestore/apiv1/admin": {
- "api_shortname": "firestore",
- "distribution_name": "cloud.google.com/go/firestore/apiv1/admin",
- "description": "Cloud Firestore API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin",
- "release_level": "stable",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/functions/apiv1": {
"api_shortname": "cloudfunctions",
"distribution_name": "cloud.google.com/go/functions/apiv1",
@@ -2309,16 +2279,6 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/telcoautomation/apiv1": {
- "api_shortname": "telcoautomation",
- "distribution_name": "cloud.google.com/go/telcoautomation/apiv1",
- "description": "Telco Automation API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1",
- "release_level": "preview",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/texttospeech/apiv1": {
"api_shortname": "texttospeech",
"distribution_name": "cloud.google.com/go/texttospeech/apiv1",
diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go
index f6b8825..c201d34 100644
--- a/vendor/cloud.google.com/go/internal/trace/trace.go
+++ b/vendor/cloud.google.com/go/internal/trace/trace.go
@@ -16,94 +16,35 @@
import (
"context"
- "errors"
"fmt"
- "os"
- "strings"
"go.opencensus.io/trace"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- ottrace "go.opentelemetry.io/otel/trace"
+ "golang.org/x/xerrors"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
-const (
- telemetryPlatformTracingOpenCensus = "opencensus"
- telemetryPlatformTracingOpenTelemetry = "opentelemetry"
- telemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
-)
-
-var (
- // TODO(chrisdsmith): Should the name of the OpenTelemetry tracer be public and mutable?
- openTelemetryTracerName string = "cloud.google.com/go"
- openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace(
- os.Getenv(telemetryPlatformTracingVar)), telemetryPlatformTracingOpenTelemetry)
-)
-
-// IsOpenCensusTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the
-// case-insensitive value "opentelemetry".
-func IsOpenCensusTracingEnabled() bool {
- return !IsOpenTelemetryTracingEnabled()
-}
-
-// IsOpenTelemetryTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
-// case-insensitive value "opentelemetry".
-func IsOpenTelemetryTracingEnabled() bool {
- return openTelemetryTracingEnabled
-}
-
-// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until [TBD], at which time the default will
-// switch to "opentelemetry" and explicitly setting the environment variable to
-// "opencensus" will be required to continue using OpenCensus tracing.
+// StartSpan adds a span to the trace with the given name.
func StartSpan(ctx context.Context, name string) context.Context {
- if IsOpenTelemetryTracingEnabled() {
- ctx, _ = otel.GetTracerProvider().Tracer(openTelemetryTracerName).Start(ctx, name)
- } else {
- ctx, _ = trace.StartSpan(ctx, name)
- }
+ ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
-// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until [TBD], at which time the default will
-// switch to "opentelemetry" and explicitly setting the environment variable to
-// "opencensus" will be required to continue using OpenCensus tracing.
+// EndSpan ends a span with the given error.
func EndSpan(ctx context.Context, err error) {
- if IsOpenTelemetryTracingEnabled() {
- span := ottrace.SpanFromContext(ctx)
- if err != nil {
- span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
- span.RecordError(err)
- }
- span.End()
- } else {
- span := trace.FromContext(ctx)
- if err != nil {
- span.SetStatus(toStatus(err))
- }
- span.End()
+ span := trace.FromContext(ctx)
+ if err != nil {
+ span.SetStatus(toStatus(err))
}
+ span.End()
}
-// toStatus converts an error to an equivalent OpenCensus status.
+// toStatus interrogates an error and converts it to an appropriate
+// OpenCensus status.
func toStatus(err error) trace.Status {
var err2 *googleapi.Error
- if ok := errors.As(err, &err2); ok {
+ if ok := xerrors.As(err, &err2); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
@@ -112,18 +53,6 @@
}
}
-// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description.
-func toOpenTelemetryStatusDescription(err error) string {
- var err2 *googleapi.Error
- if ok := errors.As(err, &err2); ok {
- return err2.Message
- } else if s, ok := status.FromError(err); ok {
- return s.Message()
- } else {
- return err.Error()
- }
-}
-
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
@@ -157,33 +86,10 @@
}
}
-// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then:
-// * calls Span.Annotatef if OpenCensus is enabled; or
-// * calls Span.AddEvent if OpenTelemetry is enabled.
-//
-// If IsOpenCensusTracingEnabled returns true, the expected span must be an
-// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected
-// span must be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until [TBD], at which time the default will
-// switch to "opentelemetry" and explicitly setting the environment variable to
-// "opencensus" will be required to continue using OpenCensus tracing.
+// TODO: (odeke-em): perhaps just pass around spans due to the cost
+// incurred from using trace.FromContext(ctx) yet we could avoid
+// throwing away the work done by ctx, span := trace.StartSpan.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
- if IsOpenTelemetryTracingEnabled() {
- attrs := otAttrs(attrMap)
- ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...))
- } else {
- attrs := ocAttrs(attrMap)
- // TODO: (odeke-em): perhaps just pass around spans due to the cost
- // incurred from using trace.FromContext(ctx) yet we could avoid
- // throwing away the work done by ctx, span := trace.StartSpan.
- trace.FromContext(ctx).Annotatef(attrs, format, args...)
- }
-}
-
-// ocAttrs converts a generic map to OpenCensus attributes.
-func ocAttrs(attrMap map[string]interface{}) []trace.Attribute {
var attrs []trace.Attribute
for k, v := range attrMap {
var a trace.Attribute
@@ -201,27 +107,5 @@
}
attrs = append(attrs, a)
}
- return attrs
-}
-
-// otAttrs converts a generic map to OpenTelemetry attributes.
-func otAttrs(attrMap map[string]interface{}) []attribute.KeyValue {
- var attrs []attribute.KeyValue
- for k, v := range attrMap {
- var a attribute.KeyValue
- switch v := v.(type) {
- case string:
- a = attribute.Key(k).String(v)
- case bool:
- a = attribute.Key(k).Bool(v)
- case int:
- a = attribute.Key(k).Int(v)
- case int64:
- a = attribute.Key(k).Int64(v)
- default:
- a = attribute.Key(k).String(fmt.Sprintf("%#v", v))
- }
- attrs = append(attrs, a)
- }
- return attrs
+ trace.FromContext(ctx).Annotatef(attrs, format, args...)
}
diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
index db5a40d..d2bd9b4 100644
--- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
+++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json
@@ -81,9 +81,6 @@
"clouddms": {
"component": "clouddms"
},
- "cloudprofiler": {
- "component": "cloudprofiler"
- },
"cloudtasks": {
"component": "cloudtasks"
},
@@ -159,9 +156,6 @@
"edgecontainer": {
"component": "edgecontainer"
},
- "edgenetwork": {
- "component": "edgenetwork"
- },
"essentialcontacts": {
"component": "essentialcontacts"
},
@@ -354,9 +348,6 @@
"talent": {
"component": "talent"
},
- "telcoautomation": {
- "component": "telcoautomation"
- },
"texttospeech": {
"component": "texttospeech"
},
diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml
new file mode 100644
index 0000000..bfc4212
--- /dev/null
+++ b/vendor/github.com/felixge/httpsnoop/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
index 4e12afd..2d84889 100644
--- a/vendor/github.com/felixge/httpsnoop/Makefile
+++ b/vendor/github.com/felixge/httpsnoop/Makefile
@@ -1,7 +1,7 @@
.PHONY: ci generate clean
ci: clean generate
- go test -race -v ./...
+ go test -v ./...
generate:
go generate .
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
index cf6b42f..ddcecd1 100644
--- a/vendor/github.com/felixge/httpsnoop/README.md
+++ b/vendor/github.com/felixge/httpsnoop/README.md
@@ -7,8 +7,8 @@
Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
which is also exposed for users interested in a more low-level API.
-[](https://pkg.go.dev/github.com/felixge/httpsnoop)
-[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
+[](https://godoc.org/github.com/felixge/httpsnoop)
+[](https://travis-ci.org/felixge/httpsnoop)
## Usage Example
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
index bec7b71..b77cc7c 100644
--- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
@@ -52,7 +52,7 @@
return func(code int) {
next(code)
- if !(code >= 100 && code <= 199) && !headerWritten {
+ if !headerWritten {
m.Code = code
headerWritten = true
}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
index 101cedd..31cbdfb 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
@@ -1,5 +1,5 @@
// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT
package httpsnoop
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
index e0951df..ab99c07 100644
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
@@ -1,5 +1,5 @@
// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
+// Code generated by "httpsnoop/codegen"; DO NOT EDIT
package httpsnoop
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
deleted file mode 100644
index 0cffafa..0000000
--- a/vendor/github.com/go-logr/logr/.golangci.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-run:
- timeout: 1m
- tests: true
-
-linters:
- disable-all: true
- enable:
- - asciicheck
- - errcheck
- - forcetypeassert
- - gocritic
- - gofmt
- - goimports
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - typecheck
- - unused
-
-issues:
- exclude-use-default: false
- max-issues-per-linter: 0
- max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
deleted file mode 100644
index c356960..0000000
--- a/vendor/github.com/go-logr/logr/CHANGELOG.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# CHANGELOG
-
-## v1.0.0-rc1
-
-This is the first logged release. Major changes (including breaking changes)
-have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
deleted file mode 100644
index 5d37e29..0000000
--- a/vendor/github.com/go-logr/logr/CONTRIBUTING.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Contributing
-
-Logr is open to pull-requests, provided they fit within the intended scope of
-the project. Specifically, this library aims to be VERY small and minimalist,
-with no external dependencies.
-
-## Compatibility
-
-This project intends to follow [semantic versioning](http://semver.org) and
-is very strict about compatibility. Any proposed changes MUST follow those
-rules.
-
-## Performance
-
-As a logging library, logr must be as light-weight as possible. Any proposed
-code change must include results of running the [benchmark](./benchmark)
-before and after the change.
diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/github.com/go-logr/logr/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
deleted file mode 100644
index a8c29bf..0000000
--- a/vendor/github.com/go-logr/logr/README.md
+++ /dev/null
@@ -1,393 +0,0 @@
-# A minimal logging API for Go
-
-[](https://pkg.go.dev/github.com/go-logr/logr)
-[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
-
-logr offers an(other) opinion on how Go programs and libraries can do logging
-without becoming coupled to a particular logging implementation. This is not
-an implementation of logging - it is an API. In fact it is two APIs with two
-different sets of users.
-
-The `Logger` type is intended for application and library authors. It provides
-a relatively small API which can be used everywhere you want to emit logs. It
-defers the actual act of writing logs (to files, to stdout, or whatever) to the
-`LogSink` interface.
-
-The `LogSink` interface is intended for logging library implementers. It is a
-pure interface which can be implemented by logging frameworks to provide the actual logging
-functionality.
-
-This decoupling allows application and library developers to write code in
-terms of `logr.Logger` (which has very low dependency fan-out) while the
-implementation of logging is managed "up stack" (e.g. in or near `main()`.)
-Application developers can then switch out implementations as necessary.
-
-Many people assert that libraries should not be logging, and as such efforts
-like this are pointless. Those people are welcome to convince the authors of
-the tens-of-thousands of libraries that *DO* write logs that they are all
-wrong. In the meantime, logr takes a more practical approach.
-
-## Typical usage
-
-Somewhere, early in an application's life, it will make a decision about which
-logging library (implementation) it actually wants to use. Something like:
-
-```
- func main() {
- // ... other setup code ...
-
- // Create the "root" logger. We have chosen the "logimpl" implementation,
- // which takes some initial parameters and returns a logr.Logger.
- logger := logimpl.New(param1, param2)
-
- // ... other setup code ...
-```
-
-Most apps will call into other libraries, create structures to govern the flow,
-etc. The `logr.Logger` object can be passed to these other libraries, stored
-in structs, or even used as a package-global variable, if needed. For example:
-
-```
- app := createTheAppObject(logger)
- app.Run()
-```
-
-Outside of this early setup, no other packages need to know about the choice of
-implementation. They write logs in terms of the `logr.Logger` that they
-received:
-
-```
- type appObject struct {
- // ... other fields ...
- logger logr.Logger
- // ... other fields ...
- }
-
- func (app *appObject) Run() {
- app.logger.Info("starting up", "timestamp", time.Now())
-
- // ... app code ...
-```
-
-## Background
-
-If the Go standard library had defined an interface for logging, this project
-probably would not be needed. Alas, here we are.
-
-When the Go developers started developing such an interface with
-[slog](https://github.com/golang/go/issues/56345), they adopted some of the
-logr design but also left out some parts and changed others:
-
-| Feature | logr | slog |
-|---------|------|------|
-| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
-| Low-level API | `LogSink` | `Handler` |
-| Stack unwinding | done by `LogSink` | done by `Logger` |
-| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
-| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
-| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
-| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
-| Passing logger via context | `NewContext`, `FromContext` | no API |
-| Adding a name to a logger | `WithName` | no API |
-| Modify verbosity of log entries in a call chain | `V` | no API |
-| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
-
-The high-level slog API is explicitly meant to be one of many different APIs
-that can be layered on top of a shared `slog.Handler`. logr is one such
-alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
-package.
-
-### Inspiration
-
-Before you consider this package, please read [this blog post by the
-inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
-he has to say, and it largely aligns with our own experiences.
-
-### Differences from Dave's ideas
-
-The main differences are:
-
-1. Dave basically proposes doing away with the notion of a logging API in favor
-of `fmt.Printf()`. We disagree, especially when you consider things like output
-locations, timestamps, file and line decorations, and structured logging. This
-package restricts the logging API to just 2 types of logs: info and error.
-
-Info logs are things you want to tell the user which are not errors. Error
-logs are, well, errors. If your code receives an `error` from a subordinate
-function call and is logging that `error` *and not returning it*, use error
-logs.
-
-2. Verbosity-levels on info logs. This gives developers a chance to indicate
-arbitrary grades of importance for info logs, without assigning names with
-semantic meaning such as "warning", "trace", and "debug." Superficially this
-may feel very similar, but the primary difference is the lack of semantics.
-Because verbosity is a numerical value, it's safe to assume that an app running
-with higher verbosity means more (and less important) logs will be generated.
-
-## Implementations (non-exhaustive)
-
-There are implementations for the following logging libraries:
-
-- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
-- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
-- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
-- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
-- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
-- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
-- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
-- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
-- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
-- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
-- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
-- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
-- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
-
-## slog interoperability
-
-Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
-and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
-`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
-As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
-slog API. `slogr` itself leaves that to the caller.
-
-## Using a `logr.Sink` as backend for slog
-
-Ideally, a logr sink implementation should support both logr and slog by
-implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
-of a conflict in the parameters of the common `Enabled` method, it is [not
-possible to implement both slog.Handler and logr.Sink in the same
-type](https://github.com/golang/go/issues/59110).
-
-If both are supported, log calls can go from the high-level APIs to the backend
-without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
-convert back and forth without adding additional wrappers, with one exception:
-when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
-`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
-log calls.
-
-Such an implementation should also support values that implement specific
-interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
-`slog.GroupValue`). logr does not convert those.
-
-Not supporting slog has several drawbacks:
-- Recording source code locations works correctly if the handler gets called
- through `slog.Logger`, but may be wrong in other cases. That's because a
- `logr.Sink` does its own stack unwinding instead of using the program counter
- provided by the high-level API.
-- slog levels <= 0 can be mapped to logr levels by negating the level without a
- loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
- used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
- because logr does not support "more important than info" levels.
-- The slog group concept is supported by prefixing each key in a key/value
- pair with the group names, separated by a dot. For structured output like
- JSON it would be better to group the key/value pairs inside an object.
-- Special slog values and interfaces don't work as expected.
-- The overhead is likely to be higher.
-
-These drawbacks are severe enough that applications using a mixture of slog and
-logr should switch to a different backend.
-
-## Using a `slog.Handler` as backend for logr
-
-Using a plain `slog.Handler` without support for logr works better than the
-other direction:
-- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
- by negating them.
-- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
- counter is passed to the `slog.Handler`.
-- Names added via `Logger.WithName` are gathered and recorded in an additional
- attribute with `logger` as key and the names separated by slash as value.
-- `Logger.Error` is turned into a log record with `slog.LevelError` as level
- and an additional attribute with `err` as key, if an error was provided.
-
-The main drawback is that `logr.Marshaler` will not be supported. Types should
-ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
-with logr implementations without slog support is not important, then
-`slog.Valuer` is sufficient.
-
-## Context support for slog
-
-Storing a logger in a `context.Context` is not supported by
-slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
-to fill this gap:
-
- func HandlerFromContext(ctx context.Context) slog.Handler {
- logger, err := logr.FromContext(ctx)
- if err == nil {
- return slogr.NewSlogHandler(logger)
- }
- return slog.Default().Handler()
- }
-
- func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
- return logr.NewContext(ctx, slogr.NewLogr(handler))
- }
-
-The downside is that storing and retrieving a `slog.Handler` needs more
-allocations compared to using a `logr.Logger`. Therefore the recommendation is
-to use the `logr.Logger` API in code which uses contextual logging.
-
-## FAQ
-
-### Conceptual
-
-#### Why structured logging?
-
-- **Structured logs are more easily queryable**: Since you've got
- key-value pairs, it's much easier to query your structured logs for
- particular values by filtering on the contents of a particular key --
- think searching request logs for error codes, Kubernetes reconcilers for
- the name and namespace of the reconciled object, etc.
-
-- **Structured logging makes it easier to have cross-referenceable logs**:
- Similarly to searchability, if you maintain conventions around your
- keys, it becomes easy to gather all log lines related to a particular
- concept.
-
-- **Structured logs allow better dimensions of filtering**: if you have
- structure to your logs, you've got more precise control over how much
- information is logged -- you might choose in a particular configuration
- to log certain keys but not others, only log lines where a certain key
- matches a certain value, etc., instead of just having v-levels and names
- to key off of.
-
-- **Structured logs better represent structured data**: sometimes, the
- data that you want to log is inherently structured (think tuple-link
- objects.) Structured logs allow you to preserve that structure when
- outputting.
-
-#### Why V-levels?
-
-**V-levels give operators an easy way to control the chattiness of log
-operations**. V-levels provide a way for a given package to distinguish
-the relative importance or verbosity of a given log message. Then, if
-a particular logger or package is logging too many messages, the user
-of the package can simply change the v-levels for that library.
-
-#### Why not named levels, like Info/Warning/Error?
-
-Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
-from Dave's ideas](#differences-from-daves-ideas).
-
-#### Why not allow format strings, too?
-
-**Format strings negate many of the benefits of structured logs**:
-
-- They're not easily searchable without resorting to fuzzy searching,
- regular expressions, etc.
-
-- They don't store structured data well, since contents are flattened into
- a string.
-
-- They're not cross-referenceable.
-
-- They don't compress easily, since the message is not constant.
-
-(Unless you turn positional parameters into key-value pairs with numerical
-keys, at which point you've gotten key-value logging with meaningless
-keys.)
-
-### Practical
-
-#### Why key-value pairs, and not a map?
-
-Key-value pairs are *much* easier to optimize, especially around
-allocations. Zap (a structured logger that inspired logr's interface) has
-[performance measurements](https://github.com/uber-go/zap#performance)
-that show this quite nicely.
-
-While the interface ends up being a little less obvious, you get
-potentially better performance, plus avoid making users type
-`map[string]string{}` every time they want to log.
-
-#### What if my V-levels differ between libraries?
-
-That's fine. Control your V-levels on a per-logger basis, and use the
-`WithName` method to pass different loggers to different libraries.
-
-Generally, you should take care to ensure that you have relatively
-consistent V-levels within a given logger, however, as this makes deciding
-on what verbosity of logs to request easier.
-
-#### But I really want to use a format string!
-
-That's not actually a question. Assuming your question is "how do
-I convert my mental model of logging with format strings to logging with
-constant messages":
-
-1. Figure out what the error actually is, as you'd write in a TL;DR style,
- and use that as a message.
-
-2. For every place you'd write a format specifier, look to the word before
- it, and add that as a key value pair.
-
-For instance, consider the following examples (all taken from spots in the
-Kubernetes codebase):
-
-- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
- responseCode, err)` becomes `logger.Error(err, "client returned an
- error", "code", responseCode)`
-
-- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
- seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
- response when requesting url", "attempt", retries, "after
- seconds", seconds, "url", url)`
-
-If you *really* must use a format string, use it in a key's value, and
-call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
-reflect over type %T")` becomes `logger.Info("unable to reflect over
-type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
-this is necessary should be few and far between.
-
-#### How do I choose my V-levels?
-
-This is basically the only hard constraint: increase V-levels to denote
-more verbose or more debug-y logs.
-
-Otherwise, you can start out with `0` as "you always want to see this",
-`1` as "common logging that you might *possibly* want to turn off", and
-`10` as "I would like to performance-test your log collection stack."
-
-Then gradually choose levels in between as you need them, working your way
-down from 10 (for debug and trace style logs) and up from 1 (for chattier
-info-type logs). For reference, slog pre-defines -4 for debug logs
-(corresponds to 4 in logr), which matches what is
-[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
-
-#### How do I choose my keys?
-
-Keys are fairly flexible, and can hold more or less any string
-value. For best compatibility with implementations and consistency
-with existing code in other projects, there are a few conventions you
-should consider.
-
-- Make your keys human-readable.
-- Constant keys are generally a good idea.
-- Be consistent across your codebase.
-- Keys should naturally match parts of the message string.
-- Use lower case for simple keys and
- [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
- more complex ones. Kubernetes is one example of a project that has
- [adopted that
- convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
-
-While key names are mostly unrestricted (and spaces are acceptable),
-it's generally a good idea to stick to printable ascii characters, or at
-least match the general character set of your log lines.
-
-#### Why should keys be constant values?
-
-The point of structured logging is to make later log processing easier. Your
-keys are, effectively, the schema of each log message. If you use different
-keys across instances of the same log line, you will make your structured logs
-much harder to use. `Sprintf()` is for values, not for keys!
-
-#### Why is this not a pure interface?
-
-The Logger type is implemented as a struct in order to allow the Go compiler to
-optimize things like high-V `Info` logs that are not triggered. Not all of
-these implementations are implemented yet, but this structure was suggested as
-a way to ensure they *can* be implemented. All of the real work is behind the
-`LogSink` interface.
-
-[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
deleted file mode 100644
index 1ca756f..0000000
--- a/vendor/github.com/go-logr/logr/SECURITY.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Security Policy
-
-If you have discovered a security vulnerability in this project, please report it
-privately. **Do not disclose it as a public issue.** This gives us time to work with you
-to fix the issue before public exposure, reducing the chance that the exploit will be
-used before a patch is released.
-
-You may submit the report in the following ways:
-
-- send an email to go-logr-security@googlegroups.com
-- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
-
-Please provide the following information in your report:
-
-- A description of the vulnerability and its impact
-- How to reproduce the issue
-
-We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
deleted file mode 100644
index 99fe8be..0000000
--- a/vendor/github.com/go-logr/logr/discard.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2020 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-// Discard returns a Logger that discards all messages logged to it. It can be
-// used whenever the caller is not interested in the logs. Logger instances
-// produced by this function always compare as equal.
-func Discard() Logger {
- return New(nil)
-}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
deleted file mode 100644
index 12e5807..0000000
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ /dev/null
@@ -1,804 +0,0 @@
-/*
-Copyright 2021 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package funcr implements formatting of structured log messages and
-// optionally captures the call site and timestamp.
-//
-// The simplest way to use it is via its implementation of a
-// github.com/go-logr/logr.LogSink with output through an arbitrary
-// "write" function. See New and NewJSON for details.
-//
-// # Custom LogSinks
-//
-// For users who need more control, a funcr.Formatter can be embedded inside
-// your own custom LogSink implementation. This is useful when the LogSink
-// needs to implement additional methods, for example.
-//
-// # Formatting
-//
-// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
-// values which are being logged. When rendering a struct, funcr will use Go's
-// standard JSON tags (all except "string").
-package funcr
-
-import (
- "bytes"
- "encoding"
- "encoding/json"
- "fmt"
- "path/filepath"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/go-logr/logr"
-)
-
-// New returns a logr.Logger which is implemented by an arbitrary function.
-func New(fn func(prefix, args string), opts Options) logr.Logger {
- return logr.New(newSink(fn, NewFormatter(opts)))
-}
-
-// NewJSON returns a logr.Logger which is implemented by an arbitrary function
-// and produces JSON output.
-func NewJSON(fn func(obj string), opts Options) logr.Logger {
- fnWrapper := func(_, obj string) {
- fn(obj)
- }
- return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
-}
-
-// Underlier exposes access to the underlying logging function. Since
-// callers only have a logr.Logger, they have to know which
-// implementation is in use, so this interface is less of an
-// abstraction and more of a way to test type conversion.
-type Underlier interface {
- GetUnderlying() func(prefix, args string)
-}
-
-func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
- l := &fnlogger{
- Formatter: formatter,
- write: fn,
- }
- // For skipping fnlogger.Info and fnlogger.Error.
- l.Formatter.AddCallDepth(1)
- return l
-}
-
-// Options carries parameters which influence the way logs are generated.
-type Options struct {
- // LogCaller tells funcr to add a "caller" key to some or all log lines.
- // This has some overhead, so some users might not want it.
- LogCaller MessageClass
-
- // LogCallerFunc tells funcr to also log the calling function name. This
- // has no effect if caller logging is not enabled (see Options.LogCaller).
- LogCallerFunc bool
-
- // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
- // overhead, so some users might not want it.
- LogTimestamp bool
-
- // TimestampFormat tells funcr how to render timestamps when LogTimestamp
- // is enabled. If not specified, a default format will be used. For more
- // details, see docs for Go's time.Layout.
- TimestampFormat string
-
- // Verbosity tells funcr which V logs to produce. Higher values enable
- // more logs. Info logs at or below this level will be written, while logs
- // above this level will be discarded.
- Verbosity int
-
- // RenderBuiltinsHook allows users to mutate the list of key-value pairs
- // while a log line is being rendered. The kvList argument follows logr
- // conventions - each pair of slice elements is comprised of a string key
- // and an arbitrary value (verified and sanitized before calling this
- // hook). The value returned must follow the same conventions. This hook
- // can be used to audit or modify logged data. For example, you might want
- // to prefix all of funcr's built-in keys with some string. This hook is
- // only called for built-in (provided by funcr itself) key-value pairs.
- // Equivalent hooks are offered for key-value pairs saved via
- // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
- // for user-provided pairs (see RenderArgsHook).
- RenderBuiltinsHook func(kvList []any) []any
-
- // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
- // only called for key-value pairs saved via logr.Logger.WithValues. See
- // RenderBuiltinsHook for more details.
- RenderValuesHook func(kvList []any) []any
-
- // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
- // called for key-value pairs passed directly to Info and Error. See
- // RenderBuiltinsHook for more details.
- RenderArgsHook func(kvList []any) []any
-
- // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
- // that contains a struct, etc.) it may log. Every time it finds a struct,
- // slice, array, or map the depth is increased by one. When the maximum is
- // reached, the value will be converted to a string indicating that the max
- // depth has been exceeded. If this field is not specified, a default
- // value will be used.
- MaxLogDepth int
-}
-
-// MessageClass indicates which category or categories of messages to consider.
-type MessageClass int
-
-const (
- // None ignores all message classes.
- None MessageClass = iota
- // All considers all message classes.
- All
- // Info only considers info messages.
- Info
- // Error only considers error messages.
- Error
-)
-
-// fnlogger inherits some of its LogSink implementation from Formatter
-// and just needs to add some glue code.
-type fnlogger struct {
- Formatter
- write func(prefix, args string)
-}
-
-func (l fnlogger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
- return &l
-}
-
-func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
- l.Formatter.AddValues(kvList)
- return &l
-}
-
-func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
- return &l
-}
-
-func (l fnlogger) Info(level int, msg string, kvList ...any) {
- prefix, args := l.FormatInfo(level, msg, kvList)
- l.write(prefix, args)
-}
-
-func (l fnlogger) Error(err error, msg string, kvList ...any) {
- prefix, args := l.FormatError(err, msg, kvList)
- l.write(prefix, args)
-}
-
-func (l fnlogger) GetUnderlying() func(prefix, args string) {
- return l.write
-}
-
-// Assert conformance to the interfaces.
-var _ logr.LogSink = &fnlogger{}
-var _ logr.CallDepthLogSink = &fnlogger{}
-var _ Underlier = &fnlogger{}
-
-// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
-func NewFormatter(opts Options) Formatter {
- return newFormatter(opts, outputKeyValue)
-}
-
-// NewFormatterJSON constructs a Formatter which emits strict JSON.
-func NewFormatterJSON(opts Options) Formatter {
- return newFormatter(opts, outputJSON)
-}
-
-// Defaults for Options.
-const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
-const defaultMaxLogDepth = 16
-
-func newFormatter(opts Options, outfmt outputFormat) Formatter {
- if opts.TimestampFormat == "" {
- opts.TimestampFormat = defaultTimestampFormat
- }
- if opts.MaxLogDepth == 0 {
- opts.MaxLogDepth = defaultMaxLogDepth
- }
- f := Formatter{
- outputFormat: outfmt,
- prefix: "",
- values: nil,
- depth: 0,
- opts: &opts,
- }
- return f
-}
-
-// Formatter is an opaque struct which can be embedded in a LogSink
-// implementation. It should be constructed with NewFormatter. Some of
-// its methods directly implement logr.LogSink.
-type Formatter struct {
- outputFormat outputFormat
- prefix string
- values []any
- valuesStr string
- depth int
- opts *Options
-}
-
-// outputFormat indicates which outputFormat to use.
-type outputFormat int
-
-const (
- // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
- outputKeyValue outputFormat = iota
- // outputJSON emits strict JSON.
- outputJSON
-)
-
-// PseudoStruct is a list of key-value pairs that gets logged as a struct.
-type PseudoStruct []any
-
-// render produces a log line, ready to use.
-func (f Formatter) render(builtins, args []any) string {
- // Empirically bytes.Buffer is faster than strings.Builder for this.
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- if f.outputFormat == outputJSON {
- buf.WriteByte('{')
- }
- vals := builtins
- if hook := f.opts.RenderBuiltinsHook; hook != nil {
- vals = hook(f.sanitize(vals))
- }
- f.flatten(buf, vals, false, false) // keys are ours, no need to escape
- continuing := len(builtins) > 0
- if len(f.valuesStr) > 0 {
- if continuing {
- if f.outputFormat == outputJSON {
- buf.WriteByte(',')
- } else {
- buf.WriteByte(' ')
- }
- }
- continuing = true
- buf.WriteString(f.valuesStr)
- }
- vals = args
- if hook := f.opts.RenderArgsHook; hook != nil {
- vals = hook(f.sanitize(vals))
- }
- f.flatten(buf, vals, continuing, true) // escape user-provided keys
- if f.outputFormat == outputJSON {
- buf.WriteByte('}')
- }
- return buf.String()
-}
-
-// flatten renders a list of key-value pairs into a buffer. If continuing is
-// true, it assumes that the buffer has previous values and will emit a
-// separator (which depends on the output format) before the first pair it
-// writes. If escapeKeys is true, the keys are assumed to have
-// non-JSON-compatible characters in them and must be evaluated for escapes.
-//
-// This function returns a potentially modified version of kvList, which
-// ensures that there is a value for every key (adding a value if needed) and
-// that each key is a string (substituting a key if needed).
-func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
- // This logic overlaps with sanitize() but saves one type-cast per key,
- // which can be measurable.
- if len(kvList)%2 != 0 {
- kvList = append(kvList, noValue)
- }
- for i := 0; i < len(kvList); i += 2 {
- k, ok := kvList[i].(string)
- if !ok {
- k = f.nonStringKey(kvList[i])
- kvList[i] = k
- }
- v := kvList[i+1]
-
- if i > 0 || continuing {
- if f.outputFormat == outputJSON {
- buf.WriteByte(',')
- } else {
- // In theory the format could be something we don't understand. In
- // practice, we control it, so it won't be.
- buf.WriteByte(' ')
- }
- }
-
- if escapeKeys {
- buf.WriteString(prettyString(k))
- } else {
- // this is faster
- buf.WriteByte('"')
- buf.WriteString(k)
- buf.WriteByte('"')
- }
- if f.outputFormat == outputJSON {
- buf.WriteByte(':')
- } else {
- buf.WriteByte('=')
- }
- buf.WriteString(f.pretty(v))
- }
- return kvList
-}
-
-func (f Formatter) pretty(value any) string {
- return f.prettyWithFlags(value, 0, 0)
-}
-
-const (
- flagRawStruct = 0x1 // do not print braces on structs
-)
-
-// TODO: This is not fast. Most of the overhead goes here.
-func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
- if depth > f.opts.MaxLogDepth {
- return `"<max-log-depth-exceeded>"`
- }
-
- // Handle types that take full control of logging.
- if v, ok := value.(logr.Marshaler); ok {
- // Replace the value with what the type wants to get logged.
- // That then gets handled below via reflection.
- value = invokeMarshaler(v)
- }
-
- // Handle types that want to format themselves.
- switch v := value.(type) {
- case fmt.Stringer:
- value = invokeStringer(v)
- case error:
- value = invokeError(v)
- }
-
- // Handling the most common types without reflect is a small perf win.
- switch v := value.(type) {
- case bool:
- return strconv.FormatBool(v)
- case string:
- return prettyString(v)
- case int:
- return strconv.FormatInt(int64(v), 10)
- case int8:
- return strconv.FormatInt(int64(v), 10)
- case int16:
- return strconv.FormatInt(int64(v), 10)
- case int32:
- return strconv.FormatInt(int64(v), 10)
- case int64:
- return strconv.FormatInt(int64(v), 10)
- case uint:
- return strconv.FormatUint(uint64(v), 10)
- case uint8:
- return strconv.FormatUint(uint64(v), 10)
- case uint16:
- return strconv.FormatUint(uint64(v), 10)
- case uint32:
- return strconv.FormatUint(uint64(v), 10)
- case uint64:
- return strconv.FormatUint(v, 10)
- case uintptr:
- return strconv.FormatUint(uint64(v), 10)
- case float32:
- return strconv.FormatFloat(float64(v), 'f', -1, 32)
- case float64:
- return strconv.FormatFloat(v, 'f', -1, 64)
- case complex64:
- return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
- case complex128:
- return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
- case PseudoStruct:
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- v = f.sanitize(v)
- if flags&flagRawStruct == 0 {
- buf.WriteByte('{')
- }
- for i := 0; i < len(v); i += 2 {
- if i > 0 {
- buf.WriteByte(',')
- }
- k, _ := v[i].(string) // sanitize() above means no need to check success
- // arbitrary keys might need escaping
- buf.WriteString(prettyString(k))
- buf.WriteByte(':')
- buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
- }
- if flags&flagRawStruct == 0 {
- buf.WriteByte('}')
- }
- return buf.String()
- }
-
- buf := bytes.NewBuffer(make([]byte, 0, 256))
- t := reflect.TypeOf(value)
- if t == nil {
- return "null"
- }
- v := reflect.ValueOf(value)
- switch t.Kind() {
- case reflect.Bool:
- return strconv.FormatBool(v.Bool())
- case reflect.String:
- return prettyString(v.String())
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(int64(v.Int()), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return strconv.FormatUint(uint64(v.Uint()), 10)
- case reflect.Float32:
- return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
- case reflect.Float64:
- return strconv.FormatFloat(v.Float(), 'f', -1, 64)
- case reflect.Complex64:
- return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
- case reflect.Complex128:
- return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
- case reflect.Struct:
- if flags&flagRawStruct == 0 {
- buf.WriteByte('{')
- }
- printComma := false // testing i>0 is not enough because of JSON omitted fields
- for i := 0; i < t.NumField(); i++ {
- fld := t.Field(i)
- if fld.PkgPath != "" {
- // reflect says this field is only defined for non-exported fields.
- continue
- }
- if !v.Field(i).CanInterface() {
- // reflect isn't clear exactly what this means, but we can't use it.
- continue
- }
- name := ""
- omitempty := false
- if tag, found := fld.Tag.Lookup("json"); found {
- if tag == "-" {
- continue
- }
- if comma := strings.Index(tag, ","); comma != -1 {
- if n := tag[:comma]; n != "" {
- name = n
- }
- rest := tag[comma:]
- if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
- omitempty = true
- }
- } else {
- name = tag
- }
- }
- if omitempty && isEmpty(v.Field(i)) {
- continue
- }
- if printComma {
- buf.WriteByte(',')
- }
- printComma = true // if we got here, we are rendering a field
- if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
- buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
- continue
- }
- if name == "" {
- name = fld.Name
- }
- // field names can't contain characters which need escaping
- buf.WriteByte('"')
- buf.WriteString(name)
- buf.WriteByte('"')
- buf.WriteByte(':')
- buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
- }
- if flags&flagRawStruct == 0 {
- buf.WriteByte('}')
- }
- return buf.String()
- case reflect.Slice, reflect.Array:
- // If this is outputing as JSON make sure this isn't really a json.RawMessage.
- // If so just emit "as-is" and don't pretty it as that will just print
- // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
- if f.outputFormat == outputJSON {
- if rm, ok := value.(json.RawMessage); ok {
- // If it's empty make sure we emit an empty value as the array style would below.
- if len(rm) > 0 {
- buf.Write(rm)
- } else {
- buf.WriteString("null")
- }
- return buf.String()
- }
- }
- buf.WriteByte('[')
- for i := 0; i < v.Len(); i++ {
- if i > 0 {
- buf.WriteByte(',')
- }
- e := v.Index(i)
- buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
- }
- buf.WriteByte(']')
- return buf.String()
- case reflect.Map:
- buf.WriteByte('{')
- // This does not sort the map keys, for best perf.
- it := v.MapRange()
- i := 0
- for it.Next() {
- if i > 0 {
- buf.WriteByte(',')
- }
- // If a map key supports TextMarshaler, use it.
- keystr := ""
- if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
- txt, err := m.MarshalText()
- if err != nil {
- keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
- } else {
- keystr = string(txt)
- }
- keystr = prettyString(keystr)
- } else {
- // prettyWithFlags will produce already-escaped values
- keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
- if t.Key().Kind() != reflect.String {
- // JSON only does string keys. Unlike Go's standard JSON, we'll
- // convert just about anything to a string.
- keystr = prettyString(keystr)
- }
- }
- buf.WriteString(keystr)
- buf.WriteByte(':')
- buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
- i++
- }
- buf.WriteByte('}')
- return buf.String()
- case reflect.Ptr, reflect.Interface:
- if v.IsNil() {
- return "null"
- }
- return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
- }
- return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
-}
-
-func prettyString(s string) string {
- // Avoid escaping (which does allocations) if we can.
- if needsEscape(s) {
- return strconv.Quote(s)
- }
- b := bytes.NewBuffer(make([]byte, 0, 1024))
- b.WriteByte('"')
- b.WriteString(s)
- b.WriteByte('"')
- return b.String()
-}
-
-// needsEscape determines whether the input string needs to be escaped or not,
-// without doing any allocations.
-func needsEscape(s string) bool {
- for _, r := range s {
- if !strconv.IsPrint(r) || r == '\\' || r == '"' {
- return true
- }
- }
- return false
-}
-
-func isEmpty(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Complex64, reflect.Complex128:
- return v.Complex() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
-
-func invokeMarshaler(m logr.Marshaler) (ret any) {
- defer func() {
- if r := recover(); r != nil {
- ret = fmt.Sprintf("<panic: %s>", r)
- }
- }()
- return m.MarshalLog()
-}
-
-func invokeStringer(s fmt.Stringer) (ret string) {
- defer func() {
- if r := recover(); r != nil {
- ret = fmt.Sprintf("<panic: %s>", r)
- }
- }()
- return s.String()
-}
-
-func invokeError(e error) (ret string) {
- defer func() {
- if r := recover(); r != nil {
- ret = fmt.Sprintf("<panic: %s>", r)
- }
- }()
- return e.Error()
-}
-
-// Caller represents the original call site for a log line, after considering
-// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
-// Line fields will always be provided, while the Func field is optional.
-// Users can set the render hook fields in Options to examine logged key-value
-// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
-// field is enabled for the given MessageClass.
-type Caller struct {
- // File is the basename of the file for this call site.
- File string `json:"file"`
- // Line is the line number in the file for this call site.
- Line int `json:"line"`
- // Func is the function name for this call site, or empty if
- // Options.LogCallerFunc is not enabled.
- Func string `json:"function,omitempty"`
-}
-
-func (f Formatter) caller() Caller {
- // +1 for this frame, +1 for Info/Error.
- pc, file, line, ok := runtime.Caller(f.depth + 2)
- if !ok {
- return Caller{"<unknown>", 0, ""}
- }
- fn := ""
- if f.opts.LogCallerFunc {
- if fp := runtime.FuncForPC(pc); fp != nil {
- fn = fp.Name()
- }
- }
-
- return Caller{filepath.Base(file), line, fn}
-}
-
-const noValue = "<no-value>"
-
-func (f Formatter) nonStringKey(v any) string {
- return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
-}
-
-// snippet produces a short snippet string of an arbitrary value.
-func (f Formatter) snippet(v any) string {
- const snipLen = 16
-
- snip := f.pretty(v)
- if len(snip) > snipLen {
- snip = snip[:snipLen]
- }
- return snip
-}
-
-// sanitize ensures that a list of key-value pairs has a value for every key
-// (adding a value if needed) and that each key is a string (substituting a key
-// if needed).
-func (f Formatter) sanitize(kvList []any) []any {
- if len(kvList)%2 != 0 {
- kvList = append(kvList, noValue)
- }
- for i := 0; i < len(kvList); i += 2 {
- _, ok := kvList[i].(string)
- if !ok {
- kvList[i] = f.nonStringKey(kvList[i])
- }
- }
- return kvList
-}
-
-// Init configures this Formatter from runtime info, such as the call depth
-// imposed by logr itself.
-// Note that this receiver is a pointer, so depth can be saved.
-func (f *Formatter) Init(info logr.RuntimeInfo) {
- f.depth += info.CallDepth
-}
-
-// Enabled checks whether an info message at the given level should be logged.
-func (f Formatter) Enabled(level int) bool {
- return level <= f.opts.Verbosity
-}
-
-// GetDepth returns the current depth of this Formatter. This is useful for
-// implementations which do their own caller attribution.
-func (f Formatter) GetDepth() int {
- return f.depth
-}
-
-// FormatInfo renders an Info log message into strings. The prefix will be
-// empty when no names were set (via AddNames), or when the output is
-// configured for JSON.
-func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
- args := make([]any, 0, 64) // using a constant here impacts perf
- prefix = f.prefix
- if f.outputFormat == outputJSON {
- args = append(args, "logger", prefix)
- prefix = ""
- }
- if f.opts.LogTimestamp {
- args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
- }
- if policy := f.opts.LogCaller; policy == All || policy == Info {
- args = append(args, "caller", f.caller())
- }
- args = append(args, "level", level, "msg", msg)
- return prefix, f.render(args, kvList)
-}
-
-// FormatError renders an Error log message into strings. The prefix will be
-// empty when no names were set (via AddNames), or when the output is
-// configured for JSON.
-func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
- args := make([]any, 0, 64) // using a constant here impacts perf
- prefix = f.prefix
- if f.outputFormat == outputJSON {
- args = append(args, "logger", prefix)
- prefix = ""
- }
- if f.opts.LogTimestamp {
- args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
- }
- if policy := f.opts.LogCaller; policy == All || policy == Error {
- args = append(args, "caller", f.caller())
- }
- args = append(args, "msg", msg)
- var loggableErr any
- if err != nil {
- loggableErr = err.Error()
- }
- args = append(args, "error", loggableErr)
- return prefix, f.render(args, kvList)
-}
-
-// AddName appends the specified name. funcr uses '/' characters to separate
-// name elements. Callers should not pass '/' in the provided name string, but
-// this library does not actually enforce that.
-func (f *Formatter) AddName(name string) {
- if len(f.prefix) > 0 {
- f.prefix += "/"
- }
- f.prefix += name
-}
-
-// AddValues adds key-value pairs to the set of saved values to be logged with
-// each log line.
-func (f *Formatter) AddValues(kvList []any) {
- // Three slice args forces a copy.
- n := len(f.values)
- f.values = append(f.values[:n:n], kvList...)
-
- vals := f.values
- if hook := f.opts.RenderValuesHook; hook != nil {
- vals = hook(f.sanitize(vals))
- }
-
- // Pre-render values, so we don't have to do it on each Info/Error call.
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- f.flatten(buf, vals, false, true) // escape user-provided keys
- f.valuesStr = buf.String()
-}
-
-// AddCallDepth increases the number of stack-frames to skip when attributing
-// the log line to a file and line.
-func (f *Formatter) AddCallDepth(depth int) {
- f.depth += depth
-}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
deleted file mode 100644
index 2a5075a..0000000
--- a/vendor/github.com/go-logr/logr/logr.go
+++ /dev/null
@@ -1,563 +0,0 @@
-/*
-Copyright 2019 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// This design derives from Dave Cheney's blog:
-// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
-
-// Package logr defines a general-purpose logging API and abstract interfaces
-// to back that API. Packages in the Go ecosystem can depend on this package,
-// while callers can implement logging with whatever backend is appropriate.
-//
-// # Usage
-//
-// Logging is done using a Logger instance. Logger is a concrete type with
-// methods, which defers the actual logging to a LogSink interface. The main
-// methods of Logger are Info() and Error(). Arguments to Info() and Error()
-// are key/value pairs rather than printf-style formatted strings, emphasizing
-// "structured logging".
-//
-// With Go's standard log package, we might write:
-//
-// log.Printf("setting target value %s", targetValue)
-//
-// With logr's structured logging, we'd write:
-//
-// logger.Info("setting target", "value", targetValue)
-//
-// Errors are much the same. Instead of:
-//
-// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
-//
-// We'd write:
-//
-// logger.Error(err, "failed to open the pod bay door", "user", user)
-//
-// Info() and Error() are very similar, but they are separate methods so that
-// LogSink implementations can choose to do things like attach additional
-// information (such as stack traces) on calls to Error(). Error() messages are
-// always logged, regardless of the current verbosity. If there is no error
-// instance available, passing nil is valid.
-//
-// # Verbosity
-//
-// Often we want to log information only when the application in "verbose
-// mode". To write log lines that are more verbose, Logger has a V() method.
-// The higher the V-level of a log line, the less critical it is considered.
-// Log-lines with V-levels that are not enabled (as per the LogSink) will not
-// be written. Level V(0) is the default, and logger.V(0).Info() has the same
-// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
-// Error messages do not have a verbosity level and are always logged.
-//
-// Where we might have written:
-//
-// if flVerbose >= 2 {
-// log.Printf("an unusual thing happened")
-// }
-//
-// We can write:
-//
-// logger.V(2).Info("an unusual thing happened")
-//
-// # Logger Names
-//
-// Logger instances can have name strings so that all messages logged through
-// that instance have additional context. For example, you might want to add
-// a subsystem name:
-//
-// logger.WithName("compactor").Info("started", "time", time.Now())
-//
-// The WithName() method returns a new Logger, which can be passed to
-// constructors or other functions for further use. Repeated use of WithName()
-// will accumulate name "segments". These name segments will be joined in some
-// way by the LogSink implementation. It is strongly recommended that name
-// segments contain simple identifiers (letters, digits, and hyphen), and do
-// not contain characters that could muddle the log output or confuse the
-// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
-// quotes, etc).
-//
-// # Saved Values
-//
-// Logger instances can store any number of key/value pairs, which will be
-// logged alongside all messages logged through that instance. For example,
-// you might want to create a Logger instance per managed object:
-//
-// With the standard log package, we might write:
-//
-// log.Printf("decided to set field foo to value %q for object %s/%s",
-// targetValue, object.Namespace, object.Name)
-//
-// With logr we'd write:
-//
-// // Elsewhere: set up the logger to log the object name.
-// obj.logger = mainLogger.WithValues(
-// "name", obj.name, "namespace", obj.namespace)
-//
-// // later on...
-// obj.logger.Info("setting foo", "value", targetValue)
-//
-// # Best Practices
-//
-// Logger has very few hard rules, with the goal that LogSink implementations
-// might have a lot of freedom to differentiate. There are, however, some
-// things to consider.
-//
-// The log message consists of a constant message attached to the log line.
-// This should generally be a simple description of what's occurring, and should
-// never be a format string. Variable information can then be attached using
-// named values.
-//
-// Keys are arbitrary strings, but should generally be constant values. Values
-// may be any Go value, but how the value is formatted is determined by the
-// LogSink implementation.
-//
-// Logger instances are meant to be passed around by value. Code that receives
-// such a value can call its methods without having to check whether the
-// instance is ready for use.
-//
-// The zero logger (= Logger{}) is identical to Discard() and discards all log
-// entries. Code that receives a Logger by value can simply call it, the methods
-// will never crash. For cases where passing a logger is optional, a pointer to Logger
-// should be used.
-//
-// # Key Naming Conventions
-//
-// Keys are not strictly required to conform to any specification or regex, but
-// it is recommended that they:
-// - be human-readable and meaningful (not auto-generated or simple ordinals)
-// - be constant (not dependent on input data)
-// - contain only printable characters
-// - not contain whitespace or punctuation
-// - use lower case for simple keys and lowerCamelCase for more complex ones
-//
-// These guidelines help ensure that log data is processed properly regardless
-// of the log implementation. For example, log implementations will try to
-// output JSON data or will store data for later database (e.g. SQL) queries.
-//
-// While users are generally free to use key names of their choice, it's
-// generally best to avoid using the following keys, as they're frequently used
-// by implementations:
-// - "caller": the calling information (file/line) of a particular log line
-// - "error": the underlying error value in the `Error` method
-// - "level": the log level
-// - "logger": the name of the associated logger
-// - "msg": the log message
-// - "stacktrace": the stack trace associated with a particular log line or
-// error (often from the `Error` message)
-// - "ts": the timestamp for a log line
-//
-// Implementations are encouraged to make use of these keys to represent the
-// above concepts, when necessary (for example, in a pure-JSON output form, it
-// would be necessary to represent at least message and timestamp as ordinary
-// named values).
-//
-// # Break Glass
-//
-// Implementations may choose to give callers access to the underlying
-// logging implementation. The recommended pattern for this is:
-//
-// // Underlier exposes access to the underlying logging implementation.
-// // Since callers only have a logr.Logger, they have to know which
-// // implementation is in use, so this interface is less of an abstraction
-// // and more of way to test type conversion.
-// type Underlier interface {
-// GetUnderlying() <underlying-type>
-// }
-//
-// Logger grants access to the sink to enable type assertions like this:
-//
-// func DoSomethingWithImpl(log logr.Logger) {
-// if underlier, ok := log.GetSink().(impl.Underlier); ok {
-// implLogger := underlier.GetUnderlying()
-// ...
-// }
-// }
-//
-// Custom `With*` functions can be implemented by copying the complete
-// Logger struct and replacing the sink in the copy:
-//
-// // WithFooBar changes the foobar parameter in the log sink and returns a
-// // new logger with that modified sink. It does nothing for loggers where
-// // the sink doesn't support that parameter.
-// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
-// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
-// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
-// }
-// return log
-// }
-//
-// Don't use New to construct a new Logger with a LogSink retrieved from an
-// existing Logger. Source code attribution might not work correctly and
-// unexported fields in Logger get lost.
-//
-// Beware that the same LogSink instance may be shared by different logger
-// instances. Calling functions that modify the LogSink will affect all of
-// those.
-package logr
-
-import (
- "context"
-)
-
-// New returns a new Logger instance. This is primarily used by libraries
-// implementing LogSink, rather than end users. Passing a nil sink will create
-// a Logger which discards all log lines.
-func New(sink LogSink) Logger {
- logger := Logger{}
- logger.setSink(sink)
- if sink != nil {
- sink.Init(runtimeInfo)
- }
- return logger
-}
-
-// setSink stores the sink and updates any related fields. It mutates the
-// logger and thus is only safe to use for loggers that are not currently being
-// used concurrently.
-func (l *Logger) setSink(sink LogSink) {
- l.sink = sink
-}
-
-// GetSink returns the stored sink.
-func (l Logger) GetSink() LogSink {
- return l.sink
-}
-
-// WithSink returns a copy of the logger with the new sink.
-func (l Logger) WithSink(sink LogSink) Logger {
- l.setSink(sink)
- return l
-}
-
-// Logger is an interface to an abstract logging implementation. This is a
-// concrete type for performance reasons, but all the real work is passed on to
-// a LogSink. Implementations of LogSink should provide their own constructors
-// that return Logger, not LogSink.
-//
-// The underlying sink can be accessed through GetSink and be modified through
-// WithSink. This enables the implementation of custom extensions (see "Break
-// Glass" in the package documentation). Normally the sink should be used only
-// indirectly.
-type Logger struct {
- sink LogSink
- level int
-}
-
-// Enabled tests whether this Logger is enabled. For example, commandline
-// flags might be used to set the logging verbosity and disable some info logs.
-func (l Logger) Enabled() bool {
- // Some implementations of LogSink look at the caller in Enabled (e.g.
- // different verbosity levels per package or file), but we only pass one
- // CallDepth in (via Init). This means that all calls from Logger to the
- // LogSink's Enabled, Info, and Error methods must have the same number of
- // frames. In other words, Logger methods can't call other Logger methods
- // which call these LogSink methods unless we do it the same in all paths.
- return l.sink != nil && l.sink.Enabled(l.level)
-}
-
-// Info logs a non-error message with the given key/value pairs as context.
-//
-// The msg argument should be used to add some constant description to the log
-// line. The key/value pairs can then be used to add additional variable
-// information. The key/value pairs must alternate string keys and arbitrary
-// values.
-func (l Logger) Info(msg string, keysAndValues ...any) {
- if l.sink == nil {
- return
- }
- if l.sink.Enabled(l.level) { // see comment in Enabled
- if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
- withHelper.GetCallStackHelper()()
- }
- l.sink.Info(l.level, msg, keysAndValues...)
- }
-}
-
-// Error logs an error, with the given message and key/value pairs as context.
-// It functions similarly to Info, but may have unique behavior, and should be
-// preferred for logging errors (see the package documentations for more
-// information). The log message will always be emitted, regardless of
-// verbosity level.
-//
-// The msg argument should be used to add context to any underlying error,
-// while the err argument should be used to attach the actual error that
-// triggered this log line, if present. The err parameter is optional
-// and nil may be passed instead of an error instance.
-func (l Logger) Error(err error, msg string, keysAndValues ...any) {
- if l.sink == nil {
- return
- }
- if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
- withHelper.GetCallStackHelper()()
- }
- l.sink.Error(err, msg, keysAndValues...)
-}
-
-// V returns a new Logger instance for a specific verbosity level, relative to
-// this Logger. In other words, V-levels are additive. A higher verbosity
-// level means a log message is less important. Negative V-levels are treated
-// as 0.
-func (l Logger) V(level int) Logger {
- if l.sink == nil {
- return l
- }
- if level < 0 {
- level = 0
- }
- l.level += level
- return l
-}
-
-// GetV returns the verbosity level of the logger. If the logger's LogSink is
-// nil as in the Discard logger, this will always return 0.
-func (l Logger) GetV() int {
- // 0 if l.sink nil because of the if check in V above.
- return l.level
-}
-
-// WithValues returns a new Logger instance with additional key/value pairs.
-// See Info for documentation on how key/value pairs work.
-func (l Logger) WithValues(keysAndValues ...any) Logger {
- if l.sink == nil {
- return l
- }
- l.setSink(l.sink.WithValues(keysAndValues...))
- return l
-}
-
-// WithName returns a new Logger instance with the specified name element added
-// to the Logger's name. Successive calls with WithName append additional
-// suffixes to the Logger's name. It's strongly recommended that name segments
-// contain only letters, digits, and hyphens (see the package documentation for
-// more information).
-func (l Logger) WithName(name string) Logger {
- if l.sink == nil {
- return l
- }
- l.setSink(l.sink.WithName(name))
- return l
-}
-
-// WithCallDepth returns a Logger instance that offsets the call stack by the
-// specified number of frames when logging call site information, if possible.
-// This is useful for users who have helper functions between the "real" call
-// site and the actual calls to Logger methods. If depth is 0 the attribution
-// should be to the direct caller of this function. If depth is 1 the
-// attribution should skip 1 call frame, and so on. Successive calls to this
-// are additive.
-//
-// If the underlying log implementation supports a WithCallDepth(int) method,
-// it will be called and the result returned. If the implementation does not
-// support CallDepthLogSink, the original Logger will be returned.
-//
-// To skip one level, WithCallStackHelper() should be used instead of
-// WithCallDepth(1) because it works with implementions that support the
-// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
-func (l Logger) WithCallDepth(depth int) Logger {
- if l.sink == nil {
- return l
- }
- if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
- l.setSink(withCallDepth.WithCallDepth(depth))
- }
- return l
-}
-
-// WithCallStackHelper returns a new Logger instance that skips the direct
-// caller when logging call site information, if possible. This is useful for
-// users who have helper functions between the "real" call site and the actual
-// calls to Logger methods and want to support loggers which depend on marking
-// each individual helper function, like loggers based on testing.T.
-//
-// In addition to using that new logger instance, callers also must call the
-// returned function.
-//
-// If the underlying log implementation supports a WithCallDepth(int) method,
-// WithCallDepth(1) will be called to produce a new logger. If it supports a
-// WithCallStackHelper() method, that will be also called. If the
-// implementation does not support either of these, the original Logger will be
-// returned.
-func (l Logger) WithCallStackHelper() (func(), Logger) {
- if l.sink == nil {
- return func() {}, l
- }
- var helper func()
- if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
- l.setSink(withCallDepth.WithCallDepth(1))
- }
- if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
- helper = withHelper.GetCallStackHelper()
- } else {
- helper = func() {}
- }
- return helper, l
-}
-
-// IsZero returns true if this logger is an uninitialized zero value
-func (l Logger) IsZero() bool {
- return l.sink == nil
-}
-
-// contextKey is how we find Loggers in a context.Context.
-type contextKey struct{}
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v, nil
- }
-
- return Logger{}, notFoundError{}
-}
-
-// notFoundError exists to carry an IsNotFound method.
-type notFoundError struct{}
-
-func (notFoundError) Error() string {
- return "no logr.Logger was present"
-}
-
-func (notFoundError) IsNotFound() bool {
- return true
-}
-
-// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v
- }
-
- return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
- return context.WithValue(ctx, contextKey{}, logger)
-}
-
-// RuntimeInfo holds information that the logr "core" library knows which
-// LogSinks might want to know.
-type RuntimeInfo struct {
- // CallDepth is the number of call frames the logr library adds between the
- // end-user and the LogSink. LogSink implementations which choose to print
- // the original logging site (e.g. file & line) should climb this many
- // additional frames to find it.
- CallDepth int
-}
-
-// runtimeInfo is a static global. It must not be changed at run time.
-var runtimeInfo = RuntimeInfo{
- CallDepth: 1,
-}
-
-// LogSink represents a logging implementation. End-users will generally not
-// interact with this type.
-type LogSink interface {
- // Init receives optional information about the logr library for LogSink
- // implementations that need it.
- Init(info RuntimeInfo)
-
- // Enabled tests whether this LogSink is enabled at the specified V-level.
- // For example, commandline flags might be used to set the logging
- // verbosity and disable some info logs.
- Enabled(level int) bool
-
- // Info logs a non-error message with the given key/value pairs as context.
- // The level argument is provided for optional logging. This method will
- // only be called when Enabled(level) is true. See Logger.Info for more
- // details.
- Info(level int, msg string, keysAndValues ...any)
-
- // Error logs an error, with the given message and key/value pairs as
- // context. See Logger.Error for more details.
- Error(err error, msg string, keysAndValues ...any)
-
- // WithValues returns a new LogSink with additional key/value pairs. See
- // Logger.WithValues for more details.
- WithValues(keysAndValues ...any) LogSink
-
- // WithName returns a new LogSink with the specified name appended. See
- // Logger.WithName for more details.
- WithName(name string) LogSink
-}
-
-// CallDepthLogSink represents a LogSink that knows how to climb the call stack
-// to identify the original call site and can offset the depth by a specified
-// number of frames. This is useful for users who have helper functions
-// between the "real" call site and the actual calls to Logger methods.
-// Implementations that log information about the call site (such as file,
-// function, or line) would otherwise log information about the intermediate
-// helper functions.
-//
-// This is an optional interface and implementations are not required to
-// support it.
-type CallDepthLogSink interface {
- // WithCallDepth returns a LogSink that will offset the call
- // stack by the specified number of frames when logging call
- // site information.
- //
- // If depth is 0, the LogSink should skip exactly the number
- // of call frames defined in RuntimeInfo.CallDepth when Info
- // or Error are called, i.e. the attribution should be to the
- // direct caller of Logger.Info or Logger.Error.
- //
- // If depth is 1 the attribution should skip 1 call frame, and so on.
- // Successive calls to this are additive.
- WithCallDepth(depth int) LogSink
-}
-
-// CallStackHelperLogSink represents a LogSink that knows how to climb
-// the call stack to identify the original call site and can skip
-// intermediate helper functions if they mark themselves as
-// helper. Go's testing package uses that approach.
-//
-// This is useful for users who have helper functions between the
-// "real" call site and the actual calls to Logger methods.
-// Implementations that log information about the call site (such as
-// file, function, or line) would otherwise log information about the
-// intermediate helper functions.
-//
-// This is an optional interface and implementations are not required
-// to support it. Implementations that choose to support this must not
-// simply implement it as WithCallDepth(1), because
-// Logger.WithCallStackHelper will call both methods if they are
-// present. This should only be implemented for LogSinks that actually
-// need it, as with testing.T.
-type CallStackHelperLogSink interface {
- // GetCallStackHelper returns a function that must be called
- // to mark the direct caller as helper function when logging
- // call site information.
- GetCallStackHelper() func()
-}
-
-// Marshaler is an optional interface that logged values may choose to
-// implement. Loggers with structured output, such as JSON, should
-// log the object return by the MarshalLog method instead of the
-// original value.
-type Marshaler interface {
- // MarshalLog can be used to:
- // - ensure that structs are not logged as strings when the original
- // value has a String method: return a different type without a
- // String method
- // - select which fields of a complex type should get logged:
- // return a simpler struct with fewer fields
- // - log unexported fields: return a different struct
- // with exported fields
- //
- // It may return any value of any type.
- MarshalLog() any
-}
diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/github.com/go-logr/stdr/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
deleted file mode 100644
index 5158667..0000000
--- a/vendor/github.com/go-logr/stdr/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Minimal Go logging using logr and Go's standard library
-
-[](https://pkg.go.dev/github.com/go-logr/stdr)
-
-This package implements the [logr interface](https://github.com/go-logr/logr)
-in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
deleted file mode 100644
index 93a8aab..0000000
--- a/vendor/github.com/go-logr/stdr/stdr.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2019 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package stdr implements github.com/go-logr/logr.Logger in terms of
-// Go's standard log package.
-package stdr
-
-import (
- "log"
- "os"
-
- "github.com/go-logr/logr"
- "github.com/go-logr/logr/funcr"
-)
-
-// The global verbosity level. See SetVerbosity().
-var globalVerbosity int
-
-// SetVerbosity sets the global level against which all info logs will be
-// compared. If this is greater than or equal to the "V" of the logger, the
-// message will be logged. A higher value here means more logs will be written.
-// The previous verbosity value is returned. This is not concurrent-safe -
-// callers must be sure to call it from only one goroutine.
-func SetVerbosity(v int) int {
- old := globalVerbosity
- globalVerbosity = v
- return old
-}
-
-// New returns a logr.Logger which is implemented by Go's standard log package,
-// or something like it. If std is nil, this will use a default logger
-// instead.
-//
-// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
-func New(std StdLogger) logr.Logger {
- return NewWithOptions(std, Options{})
-}
-
-// NewWithOptions returns a logr.Logger which is implemented by Go's standard
-// log package, or something like it. See New for details.
-func NewWithOptions(std StdLogger, opts Options) logr.Logger {
- if std == nil {
- // Go's log.Default() is only available in 1.16 and higher.
- std = log.New(os.Stderr, "", log.LstdFlags)
- }
-
- if opts.Depth < 0 {
- opts.Depth = 0
- }
-
- fopts := funcr.Options{
- LogCaller: funcr.MessageClass(opts.LogCaller),
- }
-
- sl := &logger{
- Formatter: funcr.NewFormatter(fopts),
- std: std,
- }
-
- // For skipping our own logger.Info/Error.
- sl.Formatter.AddCallDepth(1 + opts.Depth)
-
- return logr.New(sl)
-}
-
-// Options carries parameters which influence the way logs are generated.
-type Options struct {
- // Depth biases the assumed number of call frames to the "true" caller.
- // This is useful when the calling code calls a function which then calls
- // stdr (e.g. a logging shim to another API). Values less than zero will
- // be treated as zero.
- Depth int
-
- // LogCaller tells stdr to add a "caller" key to some or all log lines.
- // Go's log package has options to log this natively, too.
- LogCaller MessageClass
-
- // TODO: add an option to log the date/time
-}
-
-// MessageClass indicates which category or categories of messages to consider.
-type MessageClass int
-
-const (
- // None ignores all message classes.
- None MessageClass = iota
- // All considers all message classes.
- All
- // Info only considers info messages.
- Info
- // Error only considers error messages.
- Error
-)
-
-// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
-// this adapter.
-type StdLogger interface {
- // Output is the same as log.Output and log.Logger.Output.
- Output(calldepth int, logline string) error
-}
-
-type logger struct {
- funcr.Formatter
- std StdLogger
-}
-
-var _ logr.LogSink = &logger{}
-var _ logr.CallDepthLogSink = &logger{}
-
-func (l logger) Enabled(level int) bool {
- return globalVerbosity >= level
-}
-
-func (l logger) Info(level int, msg string, kvList ...interface{}) {
- prefix, args := l.FormatInfo(level, msg, kvList)
- if prefix != "" {
- args = prefix + ": " + args
- }
- _ = l.std.Output(l.Formatter.GetDepth()+1, args)
-}
-
-func (l logger) Error(err error, msg string, kvList ...interface{}) {
- prefix, args := l.FormatError(err, msg, kvList)
- if prefix != "" {
- args = prefix + ": " + args
- }
- _ = l.std.Output(l.Formatter.GetDepth()+1, args)
-}
-
-func (l logger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
- return &l
-}
-
-func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
- l.Formatter.AddValues(kvList)
- return &l
-}
-
-func (l logger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
- return &l
-}
-
-// Underlier exposes access to the underlying logging implementation. Since
-// callers only have a logr.Logger, they have to know which implementation is
-// in use, so this interface is less of an abstraction and more of way to test
-// type conversion.
-type Underlier interface {
- GetUnderlying() StdLogger
-}
-
-// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
-// is itself an interface, the result may or may not be a Go log.Logger.
-func (l logger) GetUnderlying() StdLogger {
- return l.std
-}
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
index c9fb829..7ed347d 100644
--- a/vendor/github.com/google/uuid/CHANGELOG.md
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -1,12 +1,5 @@
# Changelog
-## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
-
-
-### Features
-
-* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
-
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index c351129..e6ef06c 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,23 +108,12 @@
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
+// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
- var t Time
- switch uuid.Version() {
- case 6:
- time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
- t = Time(time)
- case 7:
- time := binary.BigEndian.Uint64(uuid[:8])
- t = Time((time>>16)*10000 + g1582ns100)
- default: // forward compatible
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- t = Time(time)
- }
- return t
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index 5232b48..dc75f7d 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -186,59 +186,6 @@
return uuid
}
-// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
-// It returns an error if the format is invalid, otherwise nil.
-func Validate(s string) error {
- switch len(s) {
- // Standard UUID format
- case 36:
-
- // UUID with "urn:uuid:" prefix
- case 36 + 9:
- if !strings.EqualFold(s[:9], "urn:uuid:") {
- return fmt.Errorf("invalid urn prefix: %q", s[:9])
- }
- s = s[9:]
-
- // UUID enclosed in braces
- case 36 + 2:
- if s[0] != '{' || s[len(s)-1] != '}' {
- return fmt.Errorf("invalid bracketed UUID format")
- }
- s = s[1 : len(s)-1]
-
- // UUID without hyphens
- case 32:
- for i := 0; i < len(s); i += 2 {
- _, ok := xtob(s[i], s[i+1])
- if !ok {
- return errors.New("invalid UUID format")
- }
- }
-
- default:
- return invalidLengthError{len(s)}
- }
-
- // Check for standard UUID format
- if len(s) == 36 {
- if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
- return errors.New("invalid UUID format")
- }
- for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
- if _, ok := xtob(s[x], s[x+1]); !ok {
- return errors.New("invalid UUID format")
- }
- }
- }
-
- return nil
-}
-
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
deleted file mode 100644
index 339a959..0000000
--- a/vendor/github.com/google/uuid/version6.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2023 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import "encoding/binary"
-
-// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
-// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
-// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
-//
-// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
-//
-// NewV6 returns a Version 6 UUID based on the current NodeID and clock
-// sequence, and the current time. If the NodeID has not been set by SetNodeID
-// or SetNodeInterface then it will be set automatically. If the NodeID cannot
-// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
-// SetClockSequence then it will be set automatically. If GetTime fails to
-// return the current NewV6 returns Nil and an error.
-func NewV6() (UUID, error) {
- var uuid UUID
- now, seq, err := GetTime()
- if err != nil {
- return uuid, err
- }
-
- /*
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | time_high |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | time_mid | time_low_and_version |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |clk_seq_hi_res | clk_seq_low | node (0-1) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | node (2-5) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-
- binary.BigEndian.PutUint64(uuid[0:], uint64(now))
- binary.BigEndian.PutUint16(uuid[8:], seq)
-
- uuid[6] = 0x60 | (uuid[6] & 0x0F)
- uuid[8] = 0x80 | (uuid[8] & 0x3F)
-
- nodeMu.Lock()
- if nodeID == zeroID {
- setNodeInterface("")
- }
- copy(uuid[10:], nodeID[:])
- nodeMu.Unlock()
-
- return uuid, nil
-}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
deleted file mode 100644
index ba9dd5e..0000000
--- a/vendor/github.com/google/uuid/version7.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2023 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "io"
-)
-
-// UUID version 7 features a time-ordered value field derived from the widely
-// implemented and well known Unix Epoch timestamp source,
-// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
-// As well as improved entropy characteristics over versions 1 or 6.
-//
-// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
-//
-// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
-//
-// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
-// Uses the randomness pool if it was enabled with EnableRandPool.
-// On error, NewV7 returns Nil and an error
-func NewV7() (UUID, error) {
- uuid, err := NewRandom()
- if err != nil {
- return uuid, err
- }
- makeV7(uuid[:])
- return uuid, nil
-}
-
-// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
-// it use NewRandomFromReader fill random bits.
-// On error, NewV7FromReader returns Nil and an error.
-func NewV7FromReader(r io.Reader) (UUID, error) {
- uuid, err := NewRandomFromReader(r)
- if err != nil {
- return uuid, err
- }
-
- makeV7(uuid[:])
- return uuid, nil
-}
-
-// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
-// uuid[8] already has the right version number (Variant is 10)
-// see function NewV7 and NewV7FromReader
-func makeV7(uuid []byte) {
- /*
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms | ver | rand_a |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |var| rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
- _ = uuid[15] // bounds check
-
- t := timeNow().UnixMilli()
-
- uuid[0] = byte(t >> 40)
- uuid[1] = byte(t >> 32)
- uuid[2] = byte(t >> 24)
- uuid[3] = byte(t >> 16)
- uuid[4] = byte(t >> 8)
- uuid[5] = byte(t)
-
- uuid[6] = 0x70 | (uuid[6] & 0x0F)
- // uuid[8] has already has right version
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
deleted file mode 100644
index 67f8d73..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-import (
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-const (
- // ScopeName is the instrumentation scope name.
- ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
- // GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
- GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-// Filter is a predicate used to determine whether a given request in
-// interceptor info should be traced. A Filter must return true if
-// the request should be traced.
-type Filter func(*InterceptorInfo) bool
-
-// config is a group of options for this instrumentation.
-type config struct {
- Filter Filter
- Propagators propagation.TextMapPropagator
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
- SpanStartOptions []trace.SpanStartOption
-
- ReceivedEvent bool
- SentEvent bool
-
- tracer trace.Tracer
- meter metric.Meter
-
- rpcDuration metric.Float64Histogram
- rpcRequestSize metric.Int64Histogram
- rpcResponseSize metric.Int64Histogram
- rpcRequestsPerRPC metric.Int64Histogram
- rpcResponsesPerRPC metric.Int64Histogram
-}
-
-// Option applies an option value for a config.
-type Option interface {
- apply(*config)
-}
-
-// newConfig returns a config configured with all the passed Options.
-func newConfig(opts []Option, role string) *config {
- c := &config{
- Propagators: otel.GetTextMapPropagator(),
- TracerProvider: otel.GetTracerProvider(),
- MeterProvider: otel.GetMeterProvider(),
- }
- for _, o := range opts {
- o.apply(c)
- }
-
- c.tracer = c.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(SemVersion()),
- )
-
- c.meter = c.MeterProvider.Meter(
- ScopeName,
- metric.WithInstrumentationVersion(Version()),
- metric.WithSchemaURL(semconv.SchemaURL),
- )
-
- var err error
- c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
- metric.WithDescription("Measures the duration of inbound RPC."),
- metric.WithUnit("ms"))
- if err != nil {
- otel.Handle(err)
- }
-
- c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size",
- metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
- metric.WithUnit("By"))
- if err != nil {
- otel.Handle(err)
- }
-
- c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size",
- metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
- metric.WithUnit("By"))
- if err != nil {
- otel.Handle(err)
- }
-
- c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
- metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
- metric.WithUnit("{count}"))
- if err != nil {
- otel.Handle(err)
- }
-
- c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
- metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
- metric.WithUnit("{count}"))
- if err != nil {
- otel.Handle(err)
- }
-
- return c
-}
-
-type propagatorsOption struct{ p propagation.TextMapPropagator }
-
-func (o propagatorsOption) apply(c *config) {
- if o.p != nil {
- c.Propagators = o.p
- }
-}
-
-// WithPropagators returns an Option to use the Propagators when extracting
-// and injecting trace context from requests.
-func WithPropagators(p propagation.TextMapPropagator) Option {
- return propagatorsOption{p: p}
-}
-
-type tracerProviderOption struct{ tp trace.TracerProvider }
-
-func (o tracerProviderOption) apply(c *config) {
- if o.tp != nil {
- c.TracerProvider = o.tp
- }
-}
-
-// WithInterceptorFilter returns an Option to use the request filter.
-//
-// Deprecated: Use stats handlers instead.
-func WithInterceptorFilter(f Filter) Option {
- return interceptorFilterOption{f: f}
-}
-
-type interceptorFilterOption struct {
- f Filter
-}
-
-func (o interceptorFilterOption) apply(c *config) {
- if o.f != nil {
- c.Filter = o.f
- }
-}
-
-// WithTracerProvider returns an Option to use the TracerProvider when
-// creating a Tracer.
-func WithTracerProvider(tp trace.TracerProvider) Option {
- return tracerProviderOption{tp: tp}
-}
-
-type meterProviderOption struct{ mp metric.MeterProvider }
-
-func (o meterProviderOption) apply(c *config) {
- if o.mp != nil {
- c.MeterProvider = o.mp
- }
-}
-
-// WithMeterProvider returns an Option to use the MeterProvider when
-// creating a Meter. If this option is not provide the global MeterProvider will be used.
-func WithMeterProvider(mp metric.MeterProvider) Option {
- return meterProviderOption{mp: mp}
-}
-
-// Event type that can be recorded, see WithMessageEvents.
-type Event int
-
-// Different types of events that can be recorded, see WithMessageEvents.
-const (
- ReceivedEvents Event = iota
- SentEvents
-)
-
-type messageEventsProviderOption struct {
- events []Event
-}
-
-func (m messageEventsProviderOption) apply(c *config) {
- for _, e := range m.events {
- switch e {
- case ReceivedEvents:
- c.ReceivedEvent = true
- case SentEvents:
- c.SentEvent = true
- }
- }
-}
-
-// WithMessageEvents configures the Handler to record the specified events
-// (span.AddEvent) on spans. By default only summary attributes are added at the
-// end of the request.
-//
-// Valid events are:
-// - ReceivedEvents: Record the number of bytes read after every gRPC read operation.
-// - SentEvents: Record the number of bytes written after every gRPC write operation.
-func WithMessageEvents(events ...Event) Option {
- return messageEventsProviderOption{events: events}
-}
-
-type spanStartOption struct{ opts []trace.SpanStartOption }
-
-func (o spanStartOption) apply(c *config) {
- c.SpanStartOptions = append(c.SpanStartOptions, o.opts...)
-}
-
-// WithSpanOptions configures an additional set of
-// trace.SpanOptions, which are applied to each new span.
-func WithSpanOptions(opts ...trace.SpanStartOption) Option {
- return spanStartOption{opts}
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
deleted file mode 100644
index 958dcd8..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
-
-Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
-
-Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
-*/
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
deleted file mode 100644
index 3b487a9..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-// gRPC tracing middleware
-// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
-import (
- "context"
- "io"
- "net"
- "strconv"
- "time"
-
- "google.golang.org/grpc"
- grpc_codes "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
-
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/metric"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-type messageType attribute.KeyValue
-
-// Event adds an event of the messageType to the span associated with the
-// passed context with a message id.
-func (m messageType) Event(ctx context.Context, id int, _ interface{}) {
- span := trace.SpanFromContext(ctx)
- if !span.IsRecording() {
- return
- }
- span.AddEvent("message", trace.WithAttributes(
- attribute.KeyValue(m),
- RPCMessageIDKey.Int(id),
- ))
-}
-
-var (
- messageSent = messageType(RPCMessageTypeSent)
- messageReceived = messageType(RPCMessageTypeReceived)
-)
-
-// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
-// for use in a grpc.Dial call.
-//
-// Deprecated: Use [NewClientHandler] instead.
-func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
- cfg := newConfig(opts, "client")
- tracer := cfg.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(Version()),
- )
-
- return func(
- ctx context.Context,
- method string,
- req, reply interface{},
- cc *grpc.ClientConn,
- invoker grpc.UnaryInvoker,
- callOpts ...grpc.CallOption,
- ) error {
- i := &InterceptorInfo{
- Method: method,
- Type: UnaryClient,
- }
- if cfg.Filter != nil && !cfg.Filter(i) {
- return invoker(ctx, method, req, reply, cc, callOpts...)
- }
-
- name, attr, _ := telemetryAttributes(method, cc.Target())
-
- startOpts := append([]trace.SpanStartOption{
- trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attr...),
- },
- cfg.SpanStartOptions...,
- )
-
- ctx, span := tracer.Start(
- ctx,
- name,
- startOpts...,
- )
- defer span.End()
-
- ctx = inject(ctx, cfg.Propagators)
-
- if cfg.SentEvent {
- messageSent.Event(ctx, 1, req)
- }
-
- err := invoker(ctx, method, req, reply, cc, callOpts...)
-
- if cfg.ReceivedEvent {
- messageReceived.Event(ctx, 1, reply)
- }
-
- if err != nil {
- s, _ := status.FromError(err)
- span.SetStatus(codes.Error, s.Message())
- span.SetAttributes(statusCodeAttr(s.Code()))
- } else {
- span.SetAttributes(statusCodeAttr(grpc_codes.OK))
- }
-
- return err
- }
-}
-
-// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
-// SendMsg method call.
-type clientStream struct {
- grpc.ClientStream
- desc *grpc.StreamDesc
-
- span trace.Span
-
- receivedEvent bool
- sentEvent bool
-
- receivedMessageID int
- sentMessageID int
-}
-
-var _ = proto.Marshal
-
-func (w *clientStream) RecvMsg(m interface{}) error {
- err := w.ClientStream.RecvMsg(m)
-
- if err == nil && !w.desc.ServerStreams {
- w.endSpan(nil)
- } else if err == io.EOF {
- w.endSpan(nil)
- } else if err != nil {
- w.endSpan(err)
- } else {
- w.receivedMessageID++
-
- if w.receivedEvent {
- messageReceived.Event(w.Context(), w.receivedMessageID, m)
- }
- }
-
- return err
-}
-
-func (w *clientStream) SendMsg(m interface{}) error {
- err := w.ClientStream.SendMsg(m)
-
- w.sentMessageID++
-
- if w.sentEvent {
- messageSent.Event(w.Context(), w.sentMessageID, m)
- }
-
- if err != nil {
- w.endSpan(err)
- }
-
- return err
-}
-
-func (w *clientStream) Header() (metadata.MD, error) {
- md, err := w.ClientStream.Header()
- if err != nil {
- w.endSpan(err)
- }
-
- return md, err
-}
-
-func (w *clientStream) CloseSend() error {
- err := w.ClientStream.CloseSend()
- if err != nil {
- w.endSpan(err)
- }
-
- return err
-}
-
-func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
- return &clientStream{
- ClientStream: s,
- span: span,
- desc: desc,
- receivedEvent: cfg.ReceivedEvent,
- sentEvent: cfg.SentEvent,
- }
-}
-
-func (w *clientStream) endSpan(err error) {
- if err != nil {
- s, _ := status.FromError(err)
- w.span.SetStatus(codes.Error, s.Message())
- w.span.SetAttributes(statusCodeAttr(s.Code()))
- } else {
- w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
- }
-
- w.span.End()
-}
-
-// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
-// for use in a grpc.Dial call.
-//
-// Deprecated: Use [NewClientHandler] instead.
-func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
- cfg := newConfig(opts, "client")
- tracer := cfg.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(Version()),
- )
-
- return func(
- ctx context.Context,
- desc *grpc.StreamDesc,
- cc *grpc.ClientConn,
- method string,
- streamer grpc.Streamer,
- callOpts ...grpc.CallOption,
- ) (grpc.ClientStream, error) {
- i := &InterceptorInfo{
- Method: method,
- Type: StreamClient,
- }
- if cfg.Filter != nil && !cfg.Filter(i) {
- return streamer(ctx, desc, cc, method, callOpts...)
- }
-
- name, attr, _ := telemetryAttributes(method, cc.Target())
-
- startOpts := append([]trace.SpanStartOption{
- trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attr...),
- },
- cfg.SpanStartOptions...,
- )
-
- ctx, span := tracer.Start(
- ctx,
- name,
- startOpts...,
- )
-
- ctx = inject(ctx, cfg.Propagators)
-
- s, err := streamer(ctx, desc, cc, method, callOpts...)
- if err != nil {
- grpcStatus, _ := status.FromError(err)
- span.SetStatus(codes.Error, grpcStatus.Message())
- span.SetAttributes(statusCodeAttr(grpcStatus.Code()))
- span.End()
- return s, err
- }
- stream := wrapClientStream(ctx, s, desc, span, cfg)
- return stream, nil
- }
-}
-
-// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
-// for use in a grpc.NewServer call.
-//
-// Deprecated: Use [NewServerHandler] instead.
-func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
- cfg := newConfig(opts, "server")
- tracer := cfg.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(Version()),
- )
-
- return func(
- ctx context.Context,
- req interface{},
- info *grpc.UnaryServerInfo,
- handler grpc.UnaryHandler,
- ) (interface{}, error) {
- i := &InterceptorInfo{
- UnaryServerInfo: info,
- Type: UnaryServer,
- }
- if cfg.Filter != nil && !cfg.Filter(i) {
- return handler(ctx, req)
- }
-
- ctx = extract(ctx, cfg.Propagators)
- name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
-
- startOpts := append([]trace.SpanStartOption{
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attr...),
- },
- cfg.SpanStartOptions...,
- )
-
- ctx, span := tracer.Start(
- trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
- name,
- startOpts...,
- )
- defer span.End()
-
- if cfg.ReceivedEvent {
- messageReceived.Event(ctx, 1, req)
- }
-
- before := time.Now()
-
- resp, err := handler(ctx, req)
-
- s, _ := status.FromError(err)
- if err != nil {
- statusCode, msg := serverStatus(s)
- span.SetStatus(statusCode, msg)
- if cfg.SentEvent {
- messageSent.Event(ctx, 1, s.Proto())
- }
- } else {
- if cfg.SentEvent {
- messageSent.Event(ctx, 1, resp)
- }
- }
- grpcStatusCodeAttr := statusCodeAttr(s.Code())
- span.SetAttributes(grpcStatusCodeAttr)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
-
- metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
- cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...))
-
- return resp, err
- }
-}
-
-// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
-// SendMsg method call.
-type serverStream struct {
- grpc.ServerStream
- ctx context.Context
-
- receivedMessageID int
- sentMessageID int
-
- receivedEvent bool
- sentEvent bool
-}
-
-func (w *serverStream) Context() context.Context {
- return w.ctx
-}
-
-func (w *serverStream) RecvMsg(m interface{}) error {
- err := w.ServerStream.RecvMsg(m)
-
- if err == nil {
- w.receivedMessageID++
- if w.receivedEvent {
- messageReceived.Event(w.Context(), w.receivedMessageID, m)
- }
- }
-
- return err
-}
-
-func (w *serverStream) SendMsg(m interface{}) error {
- err := w.ServerStream.SendMsg(m)
-
- w.sentMessageID++
- if w.sentEvent {
- messageSent.Event(w.Context(), w.sentMessageID, m)
- }
-
- return err
-}
-
-func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream {
- return &serverStream{
- ServerStream: ss,
- ctx: ctx,
- receivedEvent: cfg.ReceivedEvent,
- sentEvent: cfg.SentEvent,
- }
-}
-
-// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
-// for use in a grpc.NewServer call.
-//
-// Deprecated: Use [NewServerHandler] instead.
-func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
- cfg := newConfig(opts, "server")
- tracer := cfg.TracerProvider.Tracer(
- ScopeName,
- trace.WithInstrumentationVersion(Version()),
- )
-
- return func(
- srv interface{},
- ss grpc.ServerStream,
- info *grpc.StreamServerInfo,
- handler grpc.StreamHandler,
- ) error {
- ctx := ss.Context()
- i := &InterceptorInfo{
- StreamServerInfo: info,
- Type: StreamServer,
- }
- if cfg.Filter != nil && !cfg.Filter(i) {
- return handler(srv, wrapServerStream(ctx, ss, cfg))
- }
-
- ctx = extract(ctx, cfg.Propagators)
- name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
-
- startOpts := append([]trace.SpanStartOption{
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attr...),
- },
- cfg.SpanStartOptions...,
- )
-
- ctx, span := tracer.Start(
- trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
- name,
- startOpts...,
- )
- defer span.End()
-
- err := handler(srv, wrapServerStream(ctx, ss, cfg))
- if err != nil {
- s, _ := status.FromError(err)
- statusCode, msg := serverStatus(s)
- span.SetStatus(statusCode, msg)
- span.SetAttributes(statusCodeAttr(s.Code()))
- } else {
- span.SetAttributes(statusCodeAttr(grpc_codes.OK))
- }
-
- return err
- }
-}
-
-// telemetryAttributes returns a span name and span and metric attributes from
-// the gRPC method and peer address.
-func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
- name, methodAttrs := internal.ParseFullMethod(fullMethod)
- peerAttrs := peerAttr(peerAddress)
-
- attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
- attrs = append(attrs, RPCSystemGRPC)
- attrs = append(attrs, methodAttrs...)
- metricAttrs := attrs[:1+len(methodAttrs)]
- attrs = append(attrs, peerAttrs...)
- return name, attrs, metricAttrs
-}
-
-// peerAttr returns attributes about the peer address.
-func peerAttr(addr string) []attribute.KeyValue {
- host, p, err := net.SplitHostPort(addr)
- if err != nil {
- return nil
- }
-
- if host == "" {
- host = "127.0.0.1"
- }
- port, err := strconv.Atoi(p)
- if err != nil {
- return nil
- }
-
- var attr []attribute.KeyValue
- if ip := net.ParseIP(host); ip != nil {
- attr = []attribute.KeyValue{
- semconv.NetSockPeerAddr(host),
- semconv.NetSockPeerPort(port),
- }
- } else {
- attr = []attribute.KeyValue{
- semconv.NetPeerName(host),
- semconv.NetPeerPort(port),
- }
- }
-
- return attr
-}
-
-// peerFromCtx returns a peer address from a context, if one exists.
-func peerFromCtx(ctx context.Context) string {
- p, ok := peer.FromContext(ctx)
- if !ok {
- return ""
- }
- return p.Addr.String()
-}
-
-// statusCodeAttr returns status code attribute based on given gRPC code.
-func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue {
- return GRPCStatusCodeKey.Int64(int64(c))
-}
-
-// serverStatus returns a span status code and message for a given gRPC
-// status code. It maps specific gRPC status codes to a corresponding span
-// status code and message. This function is intended for use on the server
-// side of a gRPC connection.
-//
-// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented,
-// Internal, Unavailable, or DataLoss, it returns a span status code of Error
-// and the message from the gRPC status. Otherwise, it returns a span status
-// code of Unset and an empty message.
-func serverStatus(grpcStatus *status.Status) (codes.Code, string) {
- switch grpcStatus.Code() {
- case grpc_codes.Unknown,
- grpc_codes.DeadlineExceeded,
- grpc_codes.Unimplemented,
- grpc_codes.Internal,
- grpc_codes.Unavailable,
- grpc_codes.DataLoss:
- return codes.Error, grpcStatus.Message()
- default:
- return codes.Unset, ""
- }
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
deleted file mode 100644
index f611694..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-import (
- "google.golang.org/grpc"
-)
-
-// InterceptorType is the flag to define which gRPC interceptor
-// the InterceptorInfo object is.
-type InterceptorType uint8
-
-const (
- // UndefinedInterceptor is the type for the interceptor information that is not
- // well initialized or categorized to other types.
- UndefinedInterceptor InterceptorType = iota
- // UnaryClient is the type for grpc.UnaryClient interceptor.
- UnaryClient
- // StreamClient is the type for grpc.StreamClient interceptor.
- StreamClient
- // UnaryServer is the type for grpc.UnaryServer interceptor.
- UnaryServer
- // StreamServer is the type for grpc.StreamServer interceptor.
- StreamServer
-)
-
-// InterceptorInfo is the union of some arguments to four types of
-// gRPC interceptors.
-type InterceptorInfo struct {
- // Method is method name registered to UnaryClient and StreamClient
- Method string
- // UnaryServerInfo is the metadata for UnaryServer
- UnaryServerInfo *grpc.UnaryServerInfo
- // StreamServerInfo if the metadata for StreamServer
- StreamServerInfo *grpc.StreamServerInfo
- // Type is the type for interceptor
- Type InterceptorType
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
deleted file mode 100644
index cf32a9e..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
-
-import (
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
-)
-
-// ParseFullMethod returns a span name following the OpenTelemetry semantic
-// conventions as well as all applicable span attribute.KeyValue attributes based
-// on a gRPC's FullMethod.
-//
-// Parsing is consistent with grpc-go implementation:
-// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39
-func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
- if !strings.HasPrefix(fullMethod, "/") {
- // Invalid format, does not follow `/package.service/method`.
- return fullMethod, nil
- }
- name := fullMethod[1:]
- pos := strings.LastIndex(name, "/")
- if pos < 0 {
- // Invalid format, does not follow `/package.service/method`.
- return name, nil
- }
- service, method := name[:pos], name[pos+1:]
-
- var attrs []attribute.KeyValue
- if service != "" {
- attrs = append(attrs, semconv.RPCService(service))
- }
- if method != "" {
- attrs = append(attrs, semconv.RPCMethod(method))
- }
- return name, attrs
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
deleted file mode 100644
index f585fb6..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-import (
- "context"
-
- "google.golang.org/grpc/metadata"
-
- "go.opentelemetry.io/otel/baggage"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-type metadataSupplier struct {
- metadata *metadata.MD
-}
-
-// assert that metadataSupplier implements the TextMapCarrier interface.
-var _ propagation.TextMapCarrier = &metadataSupplier{}
-
-func (s *metadataSupplier) Get(key string) string {
- values := s.metadata.Get(key)
- if len(values) == 0 {
- return ""
- }
- return values[0]
-}
-
-func (s *metadataSupplier) Set(key string, value string) {
- s.metadata.Set(key, value)
-}
-
-func (s *metadataSupplier) Keys() []string {
- out := make([]string, 0, len(*s.metadata))
- for key := range *s.metadata {
- out = append(out, key)
- }
- return out
-}
-
-// Inject injects correlation context and span context into the gRPC
-// metadata object. This function is meant to be used on outgoing
-// requests.
-// Deprecated: Unnecessary public func.
-func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
- c := newConfig(opts, "")
- c.Propagators.Inject(ctx, &metadataSupplier{
- metadata: md,
- })
-}
-
-func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- md = metadata.MD{}
- }
- propagators.Inject(ctx, &metadataSupplier{
- metadata: &md,
- })
- return metadata.NewOutgoingContext(ctx, md)
-}
-
-// Extract returns the correlation context and span context that
-// another service encoded in the gRPC metadata object with Inject.
-// This function is meant to be used on incoming requests.
-// Deprecated: Unnecessary public func.
-func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
- c := newConfig(opts, "")
- ctx = c.Propagators.Extract(ctx, &metadataSupplier{
- metadata: md,
- })
-
- return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
-}
-
-func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
- md = metadata.MD{}
- }
-
- return propagators.Extract(ctx, &metadataSupplier{
- metadata: &md,
- })
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
deleted file mode 100644
index b65fab3..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-import (
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
-)
-
-// Semantic conventions for attribute keys for gRPC.
-const (
- // Name of message transmitted or received.
- RPCNameKey = attribute.Key("name")
-
- // Type of message transmitted or received.
- RPCMessageTypeKey = attribute.Key("message.type")
-
- // Identifier of message transmitted or received.
- RPCMessageIDKey = attribute.Key("message.id")
-
- // The compressed size of the message transmitted or received in bytes.
- RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // The uncompressed size of the message transmitted or received in
- // bytes.
- RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-// Semantic conventions for common RPC attributes.
-var (
- // Semantic convention for gRPC as the remoting system.
- RPCSystemGRPC = semconv.RPCSystemGRPC
-
- // Semantic convention for a message named message.
- RPCNameMessage = RPCNameKey.String("message")
-
- // Semantic conventions for RPC message types.
- RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
- RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
-)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
deleted file mode 100644
index e41e6df..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-import (
- "context"
- "sync/atomic"
- "time"
-
- grpc_codes "google.golang.org/grpc/codes"
- "google.golang.org/grpc/stats"
- "google.golang.org/grpc/status"
-
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/metric"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-type gRPCContextKey struct{}
-
-type gRPCContext struct {
- messagesReceived int64
- messagesSent int64
- metricAttrs []attribute.KeyValue
-}
-
-type serverHandler struct {
- *config
-}
-
-// NewServerHandler creates a stats.Handler for gRPC server.
-func NewServerHandler(opts ...Option) stats.Handler {
- h := &serverHandler{
- config: newConfig(opts, "server"),
- }
-
- return h
-}
-
-// TagConn can attach some information to the given context.
-func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
- span := trace.SpanFromContext(ctx)
- attrs := peerAttr(peerFromCtx(ctx))
- span.SetAttributes(attrs...)
- return ctx
-}
-
-// HandleConn processes the Conn stats.
-func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
-}
-
-// TagRPC can attach some information to the given context.
-func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- ctx = extract(ctx, h.config.Propagators)
-
- name, attrs := internal.ParseFullMethod(info.FullMethodName)
- attrs = append(attrs, RPCSystemGRPC)
- ctx, _ = h.tracer.Start(
- trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
- name,
- trace.WithSpanKind(trace.SpanKindServer),
- trace.WithAttributes(attrs...),
- )
-
- gctx := gRPCContext{
- metricAttrs: attrs,
- }
- return context.WithValue(ctx, gRPCContextKey{}, &gctx)
-}
-
-// HandleRPC processes the RPC stats.
-func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- h.handleRPC(ctx, rs)
-}
-
-type clientHandler struct {
- *config
-}
-
-// NewClientHandler creates a stats.Handler for gRPC client.
-func NewClientHandler(opts ...Option) stats.Handler {
- h := &clientHandler{
- config: newConfig(opts, "client"),
- }
-
- return h
-}
-
-// TagRPC can attach some information to the given context.
-func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
- name, attrs := internal.ParseFullMethod(info.FullMethodName)
- attrs = append(attrs, RPCSystemGRPC)
- ctx, _ = h.tracer.Start(
- ctx,
- name,
- trace.WithSpanKind(trace.SpanKindClient),
- trace.WithAttributes(attrs...),
- )
-
- gctx := gRPCContext{
- metricAttrs: attrs,
- }
-
- return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
-}
-
-// HandleRPC processes the RPC stats.
-func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
- h.handleRPC(ctx, rs)
-}
-
-// TagConn can attach some information to the given context.
-func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context {
- span := trace.SpanFromContext(ctx)
- attrs := peerAttr(cti.RemoteAddr.String())
- span.SetAttributes(attrs...)
- return ctx
-}
-
-// HandleConn processes the Conn stats.
-func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
- // no-op
-}
-
-func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) {
- span := trace.SpanFromContext(ctx)
- gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
- var messageId int64
- metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
- metricAttrs = append(metricAttrs, gctx.metricAttrs...)
- wctx := withoutCancel(ctx)
-
- switch rs := rs.(type) {
- case *stats.Begin:
- case *stats.InPayload:
- if gctx != nil {
- messageId = atomic.AddInt64(&gctx.messagesReceived, 1)
- c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
- }
-
- if c.ReceivedEvent {
- span.AddEvent("message",
- trace.WithAttributes(
- semconv.MessageTypeReceived,
- semconv.MessageIDKey.Int64(messageId),
- semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
- semconv.MessageUncompressedSizeKey.Int(rs.Length),
- ),
- )
- }
- case *stats.OutPayload:
- if gctx != nil {
- messageId = atomic.AddInt64(&gctx.messagesSent, 1)
- c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...))
- }
-
- if c.SentEvent {
- span.AddEvent("message",
- trace.WithAttributes(
- semconv.MessageTypeSent,
- semconv.MessageIDKey.Int64(messageId),
- semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
- semconv.MessageUncompressedSizeKey.Int(rs.Length),
- ),
- )
- }
- case *stats.OutTrailer:
- case *stats.End:
- var rpcStatusAttr attribute.KeyValue
-
- if rs.Error != nil {
- s, _ := status.FromError(rs.Error)
- span.SetStatus(codes.Error, s.Message())
- rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
- } else {
- rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
- }
- span.SetAttributes(rpcStatusAttr)
- span.End()
-
- metricAttrs = append(metricAttrs, rpcStatusAttr)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
-
- c.rpcDuration.Record(wctx, elapsedTime, metric.WithAttributes(metricAttrs...))
- c.rpcRequestsPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...))
- c.rpcResponsesPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...))
- default:
- return
- }
-}
-
-func withoutCancel(parent context.Context) context.Context {
- if parent == nil {
- panic("cannot create context from nil parent")
- }
- return withoutCancelCtx{parent}
-}
-
-type withoutCancelCtx struct {
- c context.Context
-}
-
-func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) {
- return
-}
-
-func (withoutCancelCtx) Done() <-chan struct{} {
- return nil
-}
-
-func (withoutCancelCtx) Err() error {
- return nil
-}
-
-func (w withoutCancelCtx) Value(key any) any {
- return w.c.Value(key)
-}
-
-func (w withoutCancelCtx) String() string {
- return "withoutCancel"
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
deleted file mode 100644
index f47c8a6..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
-
-// Version is the current release version of the gRPC instrumentation.
-func Version() string {
- return "0.46.1"
- // This string is updated by the pre_release.sh script during release
-}
-
-// SemVersion is the semantic version to be supplied to tracer/meter creation.
-//
-// Deprecated: Use [Version] instead.
-func SemVersion() string {
- return Version()
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
deleted file mode 100644
index 92b8cf7..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
- "strings"
-)
-
-// DefaultClient is the default Client and is used by Get, Head, Post and PostForm.
-// Please be careful of intitialization order - for example, if you change
-// the global propagator, the DefaultClient might still be using the old one.
-var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)}
-
-// Get is a convenient replacement for http.Get that adds a span around the request.
-func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil)
- if err != nil {
- return nil, err
- }
- return DefaultClient.Do(req)
-}
-
-// Head is a convenient replacement for http.Head that adds a span around the request.
-func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil)
- if err != nil {
- return nil, err
- }
- return DefaultClient.Do(req)
-}
-
-// Post is a convenient replacement for http.Post that adds a span around the request.
-func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) {
- req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", contentType)
- return DefaultClient.Do(req)
-}
-
-// PostForm is a convenient replacement for http.PostForm that adds a span around the request.
-func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) {
- return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
deleted file mode 100644
index 9509014..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "net/http"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Attribute keys that can be added to a span.
-const (
- ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read
- ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded)
- WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written
- WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
-)
-
-// Server HTTP metrics.
-const (
- RequestCount = "http.server.request_count" // Incoming request count total
- RequestContentLength = "http.server.request_content_length" // Incoming request bytes total
- ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total
- ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds
-)
-
-// Filter is a predicate used to determine whether a given http.request should
-// be traced. A Filter must return true if the request should be traced.
-type Filter func(*http.Request) bool
-
-func newTracer(tp trace.TracerProvider) trace.Tracer {
- return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version()))
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
deleted file mode 100644
index a1b5b5e..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "net/http"
- "net/http/httptrace"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-// ScopeName is the instrumentation scope name.
-const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-// config represents the configuration options available for the http.Handler
-// and http.Transport types.
-type config struct {
- ServerName string
- Tracer trace.Tracer
- Meter metric.Meter
- Propagators propagation.TextMapPropagator
- SpanStartOptions []trace.SpanStartOption
- PublicEndpoint bool
- PublicEndpointFn func(*http.Request) bool
- ReadEvent bool
- WriteEvent bool
- Filters []Filter
- SpanNameFormatter func(string, *http.Request) string
- ClientTrace func(context.Context) *httptrace.ClientTrace
-
- TracerProvider trace.TracerProvider
- MeterProvider metric.MeterProvider
-}
-
-// Option interface used for setting optional config properties.
-type Option interface {
- apply(*config)
-}
-
-type optionFunc func(*config)
-
-func (o optionFunc) apply(c *config) {
- o(c)
-}
-
-// newConfig creates a new config struct and applies opts to it.
-func newConfig(opts ...Option) *config {
- c := &config{
- Propagators: otel.GetTextMapPropagator(),
- MeterProvider: otel.GetMeterProvider(),
- }
- for _, opt := range opts {
- opt.apply(c)
- }
-
- // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
- if c.TracerProvider != nil {
- c.Tracer = newTracer(c.TracerProvider)
- }
-
- c.Meter = c.MeterProvider.Meter(
- ScopeName,
- metric.WithInstrumentationVersion(Version()),
- )
-
- return c
-}
-
-// WithTracerProvider specifies a tracer provider to use for creating a tracer.
-// If none is specified, the global provider is used.
-func WithTracerProvider(provider trace.TracerProvider) Option {
- return optionFunc(func(cfg *config) {
- if provider != nil {
- cfg.TracerProvider = provider
- }
- })
-}
-
-// WithMeterProvider specifies a meter provider to use for creating a meter.
-// If none is specified, the global provider is used.
-func WithMeterProvider(provider metric.MeterProvider) Option {
- return optionFunc(func(cfg *config) {
- if provider != nil {
- cfg.MeterProvider = provider
- }
- })
-}
-
-// WithPublicEndpoint configures the Handler to link the span with an incoming
-// span context. If this option is not provided, then the association is a child
-// association instead of a link.
-func WithPublicEndpoint() Option {
- return optionFunc(func(c *config) {
- c.PublicEndpoint = true
- })
-}
-
-// WithPublicEndpointFn runs with every request, and allows conditionnally
-// configuring the Handler to link the span with an incoming span context. If
-// this option is not provided or returns false, then the association is a
-// child association instead of a link.
-// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn.
-func WithPublicEndpointFn(fn func(*http.Request) bool) Option {
- return optionFunc(func(c *config) {
- c.PublicEndpointFn = fn
- })
-}
-
-// WithPropagators configures specific propagators. If this
-// option isn't specified, then the global TextMapPropagator is used.
-func WithPropagators(ps propagation.TextMapPropagator) Option {
- return optionFunc(func(c *config) {
- if ps != nil {
- c.Propagators = ps
- }
- })
-}
-
-// WithSpanOptions configures an additional set of
-// trace.SpanOptions, which are applied to each new span.
-func WithSpanOptions(opts ...trace.SpanStartOption) Option {
- return optionFunc(func(c *config) {
- c.SpanStartOptions = append(c.SpanStartOptions, opts...)
- })
-}
-
-// WithFilter adds a filter to the list of filters used by the handler.
-// If any filter indicates to exclude a request then the request will not be
-// traced. All filters must allow a request to be traced for a Span to be created.
-// If no filters are provided then all requests are traced.
-// Filters will be invoked for each processed request, it is advised to make them
-// simple and fast.
-func WithFilter(f Filter) Option {
- return optionFunc(func(c *config) {
- c.Filters = append(c.Filters, f)
- })
-}
-
-type event int
-
-// Different types of events that can be recorded, see WithMessageEvents.
-const (
- ReadEvents event = iota
- WriteEvents
-)
-
-// WithMessageEvents configures the Handler to record the specified events
-// (span.AddEvent) on spans. By default only summary attributes are added at the
-// end of the request.
-//
-// Valid events are:
-// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read
-// using the ReadBytesKey
-// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write
-// using the WriteBytesKey
-func WithMessageEvents(events ...event) Option {
- return optionFunc(func(c *config) {
- for _, e := range events {
- switch e {
- case ReadEvents:
- c.ReadEvent = true
- case WriteEvents:
- c.WriteEvent = true
- }
- }
- })
-}
-
-// WithSpanNameFormatter takes a function that will be called on every
-// request and the returned string will become the Span Name.
-func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option {
- return optionFunc(func(c *config) {
- c.SpanNameFormatter = f
- })
-}
-
-// WithClientTrace takes a function that returns client trace instance that will be
-// applied to the requests sent through the otelhttp Transport.
-func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option {
- return optionFunc(func(c *config) {
- c.ClientTrace = f
- })
-}
-
-// WithServerName returns an Option that sets the name of the (virtual) server
-// handling requests.
-func WithServerName(server string) Option {
- return optionFunc(func(c *config) {
- c.ServerName = server
- })
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
deleted file mode 100644
index 38c7f01..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otelhttp provides an http.Handler and functions that are intended
-// to be used to add tracing by wrapping existing handlers (with Handler) and
-// routes WithRouteTag.
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
deleted file mode 100644
index 9a82600..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "io"
- "net/http"
- "time"
-
- "github.com/felixge/httpsnoop"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/propagation"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
- "go.opentelemetry.io/otel/trace"
-)
-
-// middleware is an http middleware which wraps the next handler in a span.
-type middleware struct {
- operation string
- server string
-
- tracer trace.Tracer
- meter metric.Meter
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- readEvent bool
- writeEvent bool
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- counters map[string]metric.Int64Counter
- valueRecorders map[string]metric.Float64Histogram
- publicEndpoint bool
- publicEndpointFn func(*http.Request) bool
-}
-
-func defaultHandlerFormatter(operation string, _ *http.Request) string {
- return operation
-}
-
-// NewHandler wraps the passed handler in a span named after the operation and
-// enriches it with metrics.
-func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler {
- return NewMiddleware(operation, opts...)(handler)
-}
-
-// NewMiddleware returns a tracing and metrics instrumentation middleware.
-// The handler returned by the middleware wraps a handler
-// in a span named after the operation and enriches it with metrics.
-func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
- h := middleware{
- operation: operation,
- }
-
- defaultOpts := []Option{
- WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)),
- WithSpanNameFormatter(defaultHandlerFormatter),
- }
-
- c := newConfig(append(defaultOpts, opts...)...)
- h.configure(c)
- h.createMeasures()
-
- return func(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- h.serveHTTP(w, r, next)
- })
- }
-}
-
-func (h *middleware) configure(c *config) {
- h.tracer = c.Tracer
- h.meter = c.Meter
- h.propagators = c.Propagators
- h.spanStartOptions = c.SpanStartOptions
- h.readEvent = c.ReadEvent
- h.writeEvent = c.WriteEvent
- h.filters = c.Filters
- h.spanNameFormatter = c.SpanNameFormatter
- h.publicEndpoint = c.PublicEndpoint
- h.publicEndpointFn = c.PublicEndpointFn
- h.server = c.ServerName
-}
-
-func handleErr(err error) {
- if err != nil {
- otel.Handle(err)
- }
-}
-
-func (h *middleware) createMeasures() {
- h.counters = make(map[string]metric.Int64Counter)
- h.valueRecorders = make(map[string]metric.Float64Histogram)
-
- requestBytesCounter, err := h.meter.Int64Counter(
- RequestContentLength,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"),
- )
- handleErr(err)
-
- responseBytesCounter, err := h.meter.Int64Counter(
- ResponseContentLength,
- metric.WithUnit("By"),
- metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"),
- )
- handleErr(err)
-
- serverLatencyMeasure, err := h.meter.Float64Histogram(
- ServerLatency,
- metric.WithUnit("ms"),
- metric.WithDescription("Measures the duration of HTTP request handling"),
- )
- handleErr(err)
-
- h.counters[RequestContentLength] = requestBytesCounter
- h.counters[ResponseContentLength] = responseBytesCounter
- h.valueRecorders[ServerLatency] = serverLatencyMeasure
-}
-
-// serveHTTP sets up tracing and calls the given next http.Handler with the span
-// context injected into the request context.
-func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
- requestStartTime := time.Now()
- for _, f := range h.filters {
- if !f(r) {
- // Simply pass through to the handler if a filter rejects the request
- next.ServeHTTP(w, r)
- return
- }
- }
-
- ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
- opts := []trace.SpanStartOption{
- trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...),
- }
- if h.server != "" {
- hostAttr := semconv.NetHostName(h.server)
- opts = append(opts, trace.WithAttributes(hostAttr))
- }
- opts = append(opts, h.spanStartOptions...)
- if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) {
- opts = append(opts, trace.WithNewRoot())
- // Linking incoming span context if any for public endpoint.
- if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() {
- opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s}))
- }
- }
-
- tracer := h.tracer
-
- if tracer == nil {
- if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
- tracer = newTracer(span.TracerProvider())
- } else {
- tracer = newTracer(otel.GetTracerProvider())
- }
- }
-
- ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...)
- defer span.End()
-
- readRecordFunc := func(int64) {}
- if h.readEvent {
- readRecordFunc = func(n int64) {
- span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n)))
- }
- }
-
- var bw bodyWrapper
- // if request body is nil or NoBody, we don't want to mutate the body as it
- // will affect the identity of it in an unforeseeable way because we assert
- // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
- if r.Body != nil && r.Body != http.NoBody {
- bw.ReadCloser = r.Body
- bw.record = readRecordFunc
- r.Body = &bw
- }
-
- writeRecordFunc := func(int64) {}
- if h.writeEvent {
- writeRecordFunc = func(n int64) {
- span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n)))
- }
- }
-
- rww := &respWriterWrapper{
- ResponseWriter: w,
- record: writeRecordFunc,
- ctx: ctx,
- props: h.propagators,
- statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
- }
-
- // Wrap w to use our ResponseWriter methods while also exposing
- // other interfaces that w may implement (http.CloseNotifier,
- // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom).
-
- w = httpsnoop.Wrap(w, httpsnoop.Hooks{
- Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc {
- return rww.Header
- },
- Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc {
- return rww.Write
- },
- WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc {
- return rww.WriteHeader
- },
- })
-
- labeler := &Labeler{}
- ctx = injectLabeler(ctx, labeler)
-
- next.ServeHTTP(w, r.WithContext(ctx))
-
- setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err)
-
- // Add metrics
- attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
- if rww.statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
- }
- o := metric.WithAttributes(attributes...)
- h.counters[RequestContentLength].Add(ctx, bw.read, o)
- h.counters[ResponseContentLength].Add(ctx, rww.written, o)
-
- // Use floating point division here for higher precision (instead of Millisecond method).
- elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
-
- h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o)
-}
-
-func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) {
- attributes := []attribute.KeyValue{}
-
- // TODO: Consider adding an event after each read and write, possibly as an
- // option (defaulting to off), so as to not create needlessly verbose spans.
- if read > 0 {
- attributes = append(attributes, ReadBytesKey.Int64(read))
- }
- if rerr != nil && rerr != io.EOF {
- attributes = append(attributes, ReadErrorKey.String(rerr.Error()))
- }
- if wrote > 0 {
- attributes = append(attributes, WroteBytesKey.Int64(wrote))
- }
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
- }
- span.SetStatus(semconvutil.HTTPServerStatus(statusCode))
-
- if werr != nil && werr != io.EOF {
- attributes = append(attributes, WriteErrorKey.String(werr.Error()))
- }
- span.SetAttributes(attributes...)
-}
-
-// WithRouteTag annotates spans and metrics with the provided route name
-// with HTTP route attribute.
-func WithRouteTag(route string, h http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- attr := semconv.HTTPRouteKey.String(route)
-
- span := trace.SpanFromContext(r.Context())
- span.SetAttributes(attr)
-
- labeler, _ := LabelerFromContext(r.Context())
- labeler.Add(attr)
-
- h.ServeHTTP(w, r)
- })
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
deleted file mode 100644
index edf4ce3..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-// Generate semconvutil package:
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go
-//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
deleted file mode 100644
index d3dede9..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go
+++ /dev/null
@@ -1,552 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/httpconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
-)
-
-// HTTPClientResponse returns trace attributes for an HTTP response received by a
-// client from a server. It will return the following attributes if the related
-// values are defined in resp: "http.status.code",
-// "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...)
-func HTTPClientResponse(resp *http.Response) []attribute.KeyValue {
- return hc.ClientResponse(resp)
-}
-
-// HTTPClientRequest returns trace attributes for an HTTP request made by a client.
-// The following attributes are always returned: "http.url", "http.flavor",
-// "http.method", "net.peer.name". The following attributes are returned if the
-// related values are defined in req: "net.peer.port", "http.user_agent",
-// "http.request_content_length", "enduser.id".
-func HTTPClientRequest(req *http.Request) []attribute.KeyValue {
- return hc.ClientRequest(req)
-}
-
-// HTTPClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func HTTPClientStatus(code int) (codes.Code, string) {
- return hc.ClientStatus(code)
-}
-
-// HTTPServerRequest returns trace attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.flavor", "http.target", "net.host.name". The following attributes are
-// returned if they related values are defined in req: "net.host.port",
-// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
-// "http.client_ip".
-func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequest(server, req)
-}
-
-// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a
-// server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.flavor", "net.host.name". The following attributes are
-// returned if they related values are defined in req: "net.host.port".
-func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- return hc.ServerRequestMetrics(server, req)
-}
-
-// HTTPServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func HTTPServerStatus(code int) (codes.Code, string) {
- return hc.ServerStatus(code)
-}
-
-// HTTPRequestHeader returns the contents of h as attributes.
-//
-// Instrumentation should require an explicit configuration of which headers to
-// captured and then prune what they pass here. Including all headers can be a
-// security risk - explicit configuration helps avoid leaking sensitive
-// information.
-//
-// The User-Agent header is already captured in the http.user_agent attribute
-// from ClientRequest and ServerRequest. Instrumentation may provide an option
-// to capture that header here even though it is not recommended. Otherwise,
-// instrumentation should filter that out of what is passed.
-func HTTPRequestHeader(h http.Header) []attribute.KeyValue {
- return hc.RequestHeader(h)
-}
-
-// HTTPResponseHeader returns the contents of h as attributes.
-//
-// Instrumentation should require an explicit configuration of which headers to
-// captured and then prune what they pass here. Including all headers can be a
-// security risk - explicit configuration helps avoid leaking sensitive
-// information.
-//
-// The User-Agent header is already captured in the http.user_agent attribute
-// from ClientRequest and ServerRequest. Instrumentation may provide an option
-// to capture that header here even though it is not recommended. Otherwise,
-// instrumentation should filter that out of what is passed.
-func HTTPResponseHeader(h http.Header) []attribute.KeyValue {
- return hc.ResponseHeader(h)
-}
-
-// httpConv are the HTTP semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type httpConv struct {
- NetConv *netConv
-
- EnduserIDKey attribute.Key
- HTTPClientIPKey attribute.Key
- HTTPFlavorKey attribute.Key
- HTTPMethodKey attribute.Key
- HTTPRequestContentLengthKey attribute.Key
- HTTPResponseContentLengthKey attribute.Key
- HTTPRouteKey attribute.Key
- HTTPSchemeHTTP attribute.KeyValue
- HTTPSchemeHTTPS attribute.KeyValue
- HTTPStatusCodeKey attribute.Key
- HTTPTargetKey attribute.Key
- HTTPURLKey attribute.Key
- HTTPUserAgentKey attribute.Key
-}
-
-var hc = &httpConv{
- NetConv: nc,
-
- EnduserIDKey: semconv.EnduserIDKey,
- HTTPClientIPKey: semconv.HTTPClientIPKey,
- HTTPFlavorKey: semconv.HTTPFlavorKey,
- HTTPMethodKey: semconv.HTTPMethodKey,
- HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
- HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
- HTTPRouteKey: semconv.HTTPRouteKey,
- HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
- HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
- HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
- HTTPTargetKey: semconv.HTTPTargetKey,
- HTTPURLKey: semconv.HTTPURLKey,
- HTTPUserAgentKey: semconv.HTTPUserAgentKey,
-}
-
-// ClientResponse returns attributes for an HTTP response received by a client
-// from a server. The following attributes are returned if the related values
-// are defined in resp: "http.status.code", "http.response_content_length".
-//
-// This does not add all OpenTelemetry required attributes for an HTTP event,
-// it assumes ClientRequest was used to create the span with a complete set of
-// attributes. If a complete set of attributes can be generated using the
-// request contained in resp. For example:
-//
-// append(ClientResponse(resp), ClientRequest(resp.Request)...)
-func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
- var n int
- if resp.StatusCode > 0 {
- n++
- }
- if resp.ContentLength > 0 {
- n++
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- if resp.StatusCode > 0 {
- attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
- }
- if resp.ContentLength > 0 {
- attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
- }
- return attrs
-}
-
-// ClientRequest returns attributes for an HTTP request made by a client. The
-// following attributes are always returned: "http.url", "http.flavor",
-// "http.method", "net.peer.name". The following attributes are returned if the
-// related values are defined in req: "net.peer.port", "http.user_agent",
-// "http.request_content_length", "enduser.id".
-func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue {
- n := 3 // URL, peer name, proto, and method.
- var h string
- if req.URL != nil {
- h = req.URL.Host
- }
- peer, p := firstHostPort(h, req.Header.Get("Host"))
- port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
- if port > 0 {
- n++
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- if req.ContentLength > 0 {
- n++
- }
- userID, _, hasUserID := req.BasicAuth()
- if hasUserID {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.flavor(req.Proto))
-
- var u string
- if req.URL != nil {
- // Remove any username/password info that may be in the URL.
- userinfo := req.URL.User
- req.URL.User = nil
- u = req.URL.String()
- // Restore any username/password info that was removed.
- req.URL.User = userinfo
- }
- attrs = append(attrs, c.HTTPURLKey.String(u))
-
- attrs = append(attrs, c.NetConv.PeerName(peer))
- if port > 0 {
- attrs = append(attrs, c.NetConv.PeerPort(port))
- }
-
- if useragent != "" {
- attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
- }
-
- if l := req.ContentLength; l > 0 {
- attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
- }
-
- if hasUserID {
- attrs = append(attrs, c.EnduserIDKey.String(userID))
- }
-
- return attrs
-}
-
-// ServerRequest returns attributes for an HTTP request received by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.flavor", "http.target", "net.host.name". The following attributes are
-// returned if they related values are defined in req: "net.host.port",
-// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
-// "http.client_ip".
-func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
- // TODO: This currently does not add the specification required
- // `http.target` attribute. It has too high of a cardinality to safely be
- // added. An alternate should be added, or this comment removed, when it is
- // addressed by the specification. If it is ultimately decided to continue
- // not including the attribute, the HTTPTargetKey field of the httpConv
- // should be removed as well.
-
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- peer, peerPort := splitHostPort(req.RemoteAddr)
- if peer != "" {
- n++
- if peerPort > 0 {
- n++
- }
- }
- useragent := req.UserAgent()
- if useragent != "" {
- n++
- }
- userID, _, hasUserID := req.BasicAuth()
- if hasUserID {
- n++
- }
- clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
- if clientIP != "" {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.method(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.flavor(req.Proto))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- if peer != "" {
- // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
- // file-path that would be interpreted with a sock family.
- attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
- if peerPort > 0 {
- attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
- }
- }
-
- if useragent != "" {
- attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
- }
-
- if hasUserID {
- attrs = append(attrs, c.EnduserIDKey.String(userID))
- }
-
- if clientIP != "" {
- attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
- }
-
- return attrs
-}
-
-// ServerRequestMetrics returns metric attributes for an HTTP request received
-// by a server.
-//
-// The server must be the primary server name if it is known. For example this
-// would be the ServerName directive
-// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
-// server, and the server_name directive
-// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
-// nginx server. More generically, the primary server name would be the host
-// header value that matches the default virtual host of an HTTP server. It
-// should include the host identifier and if a port is used to route to the
-// server that port identifier should be included as an appropriate port
-// suffix.
-//
-// If the primary server name is not known, server should be an empty string.
-// The req Host will be used to determine the server instead.
-//
-// The following attributes are always returned: "http.method", "http.scheme",
-// "http.flavor", "net.host.name". The following attributes are
-// returned if they related values are defined in req: "net.host.port".
-func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue {
- // TODO: This currently does not add the specification required
- // `http.target` attribute. It has too high of a cardinality to safely be
- // added. An alternate should be added, or this comment removed, when it is
- // addressed by the specification. If it is ultimately decided to continue
- // not including the attribute, the HTTPTargetKey field of the httpConv
- // should be removed as well.
-
- n := 4 // Method, scheme, proto, and host name.
- var host string
- var p int
- if server == "" {
- host, p = splitHostPort(req.Host)
- } else {
- // Prioritize the primary server name.
- host, p = splitHostPort(server)
- if p < 0 {
- _, p = splitHostPort(req.Host)
- }
- }
- hostPort := requiredHTTPPort(req.TLS != nil, p)
- if hostPort > 0 {
- n++
- }
- attrs := make([]attribute.KeyValue, 0, n)
-
- attrs = append(attrs, c.methodMetric(req.Method))
- attrs = append(attrs, c.scheme(req.TLS != nil))
- attrs = append(attrs, c.flavor(req.Proto))
- attrs = append(attrs, c.NetConv.HostName(host))
-
- if hostPort > 0 {
- attrs = append(attrs, c.NetConv.HostPort(hostPort))
- }
-
- return attrs
-}
-
-func (c *httpConv) method(method string) attribute.KeyValue {
- if method == "" {
- return c.HTTPMethodKey.String(http.MethodGet)
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) methodMetric(method string) attribute.KeyValue {
- method = strings.ToUpper(method)
- switch method {
- case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
- default:
- method = "_OTHER"
- }
- return c.HTTPMethodKey.String(method)
-}
-
-func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive
- if https {
- return c.HTTPSchemeHTTPS
- }
- return c.HTTPSchemeHTTP
-}
-
-func (c *httpConv) flavor(proto string) attribute.KeyValue {
- switch proto {
- case "HTTP/1.0":
- return c.HTTPFlavorKey.String("1.0")
- case "HTTP/1.1":
- return c.HTTPFlavorKey.String("1.1")
- case "HTTP/2":
- return c.HTTPFlavorKey.String("2.0")
- case "HTTP/3":
- return c.HTTPFlavorKey.String("3.0")
- default:
- return c.HTTPFlavorKey.String(proto)
- }
-}
-
-func serverClientIP(xForwardedFor string) string {
- if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
- xForwardedFor = xForwardedFor[:idx]
- }
- return xForwardedFor
-}
-
-func requiredHTTPPort(https bool, port int) int { // nolint:revive
- if https {
- if port > 0 && port != 443 {
- return port
- }
- } else {
- if port > 0 && port != 80 {
- return port
- }
- }
- return -1
-}
-
-// Return the request host and port from the first non-empty source.
-func firstHostPort(source ...string) (host string, port int) {
- for _, hostport := range source {
- host, port = splitHostPort(hostport)
- if host != "" || port > 0 {
- break
- }
- }
- return
-}
-
-// RequestHeader returns the contents of h as OpenTelemetry attributes.
-func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue {
- return c.header("http.request.header", h)
-}
-
-// ResponseHeader returns the contents of h as OpenTelemetry attributes.
-func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue {
- return c.header("http.response.header", h)
-}
-
-func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue {
- key := func(k string) attribute.Key {
- k = strings.ToLower(k)
- k = strings.ReplaceAll(k, "-", "_")
- k = fmt.Sprintf("%s.%s", prefix, k)
- return attribute.Key(k)
- }
-
- attrs := make([]attribute.KeyValue, 0, len(h))
- for k, v := range h {
- attrs = append(attrs, key(k).StringSlice(v))
- }
- return attrs
-}
-
-// ClientStatus returns a span status code and message for an HTTP status code
-// value received by a client.
-func (c *httpConv) ClientStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 400 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
-
-// ServerStatus returns a span status code and message for an HTTP status code
-// value returned by a server. Status codes in the 400-499 range are not
-// returned as errors.
-func (c *httpConv) ServerStatus(code int) (codes.Code, string) {
- if code < 100 || code >= 600 {
- return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
- }
- if code >= 500 {
- return codes.Error, ""
- }
- return codes.Unset, ""
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
deleted file mode 100644
index bde8893..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go
+++ /dev/null
@@ -1,368 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/semconvutil/netconv.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
-
-import (
- "net"
- "strconv"
- "strings"
-
- "go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
-)
-
-// NetTransport returns a trace attribute describing the transport protocol of the
-// passed network. See the net.Dial for information about acceptable network
-// values.
-func NetTransport(network string) attribute.KeyValue {
- return nc.Transport(network)
-}
-
-// NetClient returns trace attributes for a client network connection to address.
-// See net.Dial for information about acceptable address values, address should
-// be the same as the one used to create conn. If conn is nil, only network
-// peer attributes will be returned that describe address. Otherwise, the
-// socket level information about conn will also be included.
-func NetClient(address string, conn net.Conn) []attribute.KeyValue {
- return nc.Client(address, conn)
-}
-
-// NetServer returns trace attributes for a network listener listening at address.
-// See net.Listen for information about acceptable address values, address
-// should be the same as the one used to create ln. If ln is nil, only network
-// host attributes will be returned that describe address. Otherwise, the
-// socket level information about ln will also be included.
-func NetServer(address string, ln net.Listener) []attribute.KeyValue {
- return nc.Server(address, ln)
-}
-
-// netConv are the network semantic convention attributes defined for a version
-// of the OpenTelemetry specification.
-type netConv struct {
- NetHostNameKey attribute.Key
- NetHostPortKey attribute.Key
- NetPeerNameKey attribute.Key
- NetPeerPortKey attribute.Key
- NetSockFamilyKey attribute.Key
- NetSockPeerAddrKey attribute.Key
- NetSockPeerPortKey attribute.Key
- NetSockHostAddrKey attribute.Key
- NetSockHostPortKey attribute.Key
- NetTransportOther attribute.KeyValue
- NetTransportTCP attribute.KeyValue
- NetTransportUDP attribute.KeyValue
- NetTransportInProc attribute.KeyValue
-}
-
-var nc = &netConv{
- NetHostNameKey: semconv.NetHostNameKey,
- NetHostPortKey: semconv.NetHostPortKey,
- NetPeerNameKey: semconv.NetPeerNameKey,
- NetPeerPortKey: semconv.NetPeerPortKey,
- NetSockFamilyKey: semconv.NetSockFamilyKey,
- NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
- NetSockPeerPortKey: semconv.NetSockPeerPortKey,
- NetSockHostAddrKey: semconv.NetSockHostAddrKey,
- NetSockHostPortKey: semconv.NetSockHostPortKey,
- NetTransportOther: semconv.NetTransportOther,
- NetTransportTCP: semconv.NetTransportTCP,
- NetTransportUDP: semconv.NetTransportUDP,
- NetTransportInProc: semconv.NetTransportInProc,
-}
-
-func (c *netConv) Transport(network string) attribute.KeyValue {
- switch network {
- case "tcp", "tcp4", "tcp6":
- return c.NetTransportTCP
- case "udp", "udp4", "udp6":
- return c.NetTransportUDP
- case "unix", "unixgram", "unixpacket":
- return c.NetTransportInProc
- default:
- // "ip:*", "ip4:*", and "ip6:*" all are considered other.
- return c.NetTransportOther
- }
-}
-
-// Host returns attributes for a network host address.
-func (c *netConv) Host(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.HostName(h))
- if p > 0 {
- attrs = append(attrs, c.HostPort(int(p)))
- }
- return attrs
-}
-
-// Server returns attributes for a network listener listening at address. See
-// net.Listen for information about acceptable address values, address should
-// be the same as the one used to create ln. If ln is nil, only network host
-// attributes will be returned that describe address. Otherwise, the socket
-// level information about ln will also be included.
-func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue {
- if ln == nil {
- return c.Host(address)
- }
-
- lAddr := ln.Addr()
- if lAddr == nil {
- return c.Host(address)
- }
-
- hostName, hostPort := splitHostPort(address)
- sockHostAddr, sockHostPort := splitHostPort(lAddr.String())
- network := lAddr.Network()
- sockFamily := family(network, sockHostAddr)
-
- n := nonZeroStr(hostName, network, sockHostAddr, sockFamily)
- n += positiveInt(hostPort, sockHostPort)
- attr := make([]attribute.KeyValue, 0, n)
- if hostName != "" {
- attr = append(attr, c.HostName(hostName))
- if hostPort > 0 {
- // Only if net.host.name is set should net.host.port be.
- attr = append(attr, c.HostPort(hostPort))
- }
- }
- if network != "" {
- attr = append(attr, c.Transport(network))
- }
- if sockFamily != "" {
- attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
- }
- if sockHostAddr != "" {
- attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
- if sockHostPort > 0 {
- // Only if net.sock.host.addr is set should net.sock.host.port be.
- attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
- }
- }
- return attr
-}
-
-func (c *netConv) HostName(name string) attribute.KeyValue {
- return c.NetHostNameKey.String(name)
-}
-
-func (c *netConv) HostPort(port int) attribute.KeyValue {
- return c.NetHostPortKey.Int(port)
-}
-
-// Client returns attributes for a client network connection to address. See
-// net.Dial for information about acceptable address values, address should be
-// the same as the one used to create conn. If conn is nil, only network peer
-// attributes will be returned that describe address. Otherwise, the socket
-// level information about conn will also be included.
-func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue {
- if conn == nil {
- return c.Peer(address)
- }
-
- lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr()
-
- var network string
- switch {
- case lAddr != nil:
- network = lAddr.Network()
- case rAddr != nil:
- network = rAddr.Network()
- default:
- return c.Peer(address)
- }
-
- peerName, peerPort := splitHostPort(address)
- var (
- sockFamily string
- sockPeerAddr string
- sockPeerPort int
- sockHostAddr string
- sockHostPort int
- )
-
- if lAddr != nil {
- sockHostAddr, sockHostPort = splitHostPort(lAddr.String())
- }
-
- if rAddr != nil {
- sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String())
- }
-
- switch {
- case sockHostAddr != "":
- sockFamily = family(network, sockHostAddr)
- case sockPeerAddr != "":
- sockFamily = family(network, sockPeerAddr)
- }
-
- n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily)
- n += positiveInt(peerPort, sockPeerPort, sockHostPort)
- attr := make([]attribute.KeyValue, 0, n)
- if peerName != "" {
- attr = append(attr, c.PeerName(peerName))
- if peerPort > 0 {
- // Only if net.peer.name is set should net.peer.port be.
- attr = append(attr, c.PeerPort(peerPort))
- }
- }
- if network != "" {
- attr = append(attr, c.Transport(network))
- }
- if sockFamily != "" {
- attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
- }
- if sockPeerAddr != "" {
- attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr))
- if sockPeerPort > 0 {
- // Only if net.sock.peer.addr is set should net.sock.peer.port be.
- attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort))
- }
- }
- if sockHostAddr != "" {
- attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
- if sockHostPort > 0 {
- // Only if net.sock.host.addr is set should net.sock.host.port be.
- attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
- }
- }
- return attr
-}
-
-func family(network, address string) string {
- switch network {
- case "unix", "unixgram", "unixpacket":
- return "unix"
- default:
- if ip := net.ParseIP(address); ip != nil {
- if ip.To4() == nil {
- return "inet6"
- }
- return "inet"
- }
- }
- return ""
-}
-
-func nonZeroStr(strs ...string) int {
- var n int
- for _, str := range strs {
- if str != "" {
- n++
- }
- }
- return n
-}
-
-func positiveInt(ints ...int) int {
- var n int
- for _, i := range ints {
- if i > 0 {
- n++
- }
- }
- return n
-}
-
-// Peer returns attributes for a network peer address.
-func (c *netConv) Peer(address string) []attribute.KeyValue {
- h, p := splitHostPort(address)
- var n int
- if h != "" {
- n++
- if p > 0 {
- n++
- }
- }
-
- if n == 0 {
- return nil
- }
-
- attrs := make([]attribute.KeyValue, 0, n)
- attrs = append(attrs, c.PeerName(h))
- if p > 0 {
- attrs = append(attrs, c.PeerPort(int(p)))
- }
- return attrs
-}
-
-func (c *netConv) PeerName(name string) attribute.KeyValue {
- return c.NetPeerNameKey.String(name)
-}
-
-func (c *netConv) PeerPort(port int) attribute.KeyValue {
- return c.NetPeerPortKey.Int(port)
-}
-
-func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue {
- return c.NetSockPeerAddrKey.String(addr)
-}
-
-func (c *netConv) SockPeerPort(port int) attribute.KeyValue {
- return c.NetSockPeerPortKey.Int(port)
-}
-
-// splitHostPort splits a network address hostport of the form "host",
-// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
-// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
-// port.
-//
-// An empty host is returned if it is not provided or unparsable. A negative
-// port is returned if it is not provided or unparsable.
-func splitHostPort(hostport string) (host string, port int) {
- port = -1
-
- if strings.HasPrefix(hostport, "[") {
- addrEnd := strings.LastIndex(hostport, "]")
- if addrEnd < 0 {
- // Invalid hostport.
- return
- }
- if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
- host = hostport[1:addrEnd]
- return
- }
- } else {
- if i := strings.LastIndex(hostport, ":"); i < 0 {
- host = hostport
- return
- }
- }
-
- host, pStr, err := net.SplitHostPort(hostport)
- if err != nil {
- return
- }
-
- p, err := strconv.ParseUint(pStr, 10, 16)
- if err != nil {
- return
- }
- return host, int(p)
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
deleted file mode 100644
index 26a51a1..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// Labeler is used to allow instrumented HTTP handlers to add custom attributes to
-// the metrics recorded by the net/http instrumentation.
-type Labeler struct {
- mu sync.Mutex
- attributes []attribute.KeyValue
-}
-
-// Add attributes to a Labeler.
-func (l *Labeler) Add(ls ...attribute.KeyValue) {
- l.mu.Lock()
- defer l.mu.Unlock()
- l.attributes = append(l.attributes, ls...)
-}
-
-// Get returns a copy of the attributes added to the Labeler.
-func (l *Labeler) Get() []attribute.KeyValue {
- l.mu.Lock()
- defer l.mu.Unlock()
- ret := make([]attribute.KeyValue, len(l.attributes))
- copy(ret, l.attributes)
- return ret
-}
-
-type labelerContextKeyType int
-
-const lablelerContextKey labelerContextKeyType = 0
-
-func injectLabeler(ctx context.Context, l *Labeler) context.Context {
- return context.WithValue(ctx, lablelerContextKey, l)
-}
-
-// LabelerFromContext retrieves a Labeler instance from the provided context if
-// one is available. If no Labeler was found in the provided context a new, empty
-// Labeler is returned and the second return value is false. In this case it is
-// safe to use the Labeler but any attributes added to it will not be used.
-func LabelerFromContext(ctx context.Context) (*Labeler, bool) {
- l, ok := ctx.Value(lablelerContextKey).(*Labeler)
- if !ok {
- l = &Labeler{}
- }
- return l, ok
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
deleted file mode 100644
index e835cac..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/http/httptrace"
-
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Transport implements the http.RoundTripper interface and wraps
-// outbound HTTP(S) requests with a span.
-type Transport struct {
- rt http.RoundTripper
-
- tracer trace.Tracer
- propagators propagation.TextMapPropagator
- spanStartOptions []trace.SpanStartOption
- filters []Filter
- spanNameFormatter func(string, *http.Request) string
- clientTrace func(context.Context) *httptrace.ClientTrace
-}
-
-var _ http.RoundTripper = &Transport{}
-
-// NewTransport wraps the provided http.RoundTripper with one that
-// starts a span and injects the span context into the outbound request headers.
-//
-// If the provided http.RoundTripper is nil, http.DefaultTransport will be used
-// as the base http.RoundTripper.
-func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
- if base == nil {
- base = http.DefaultTransport
- }
-
- t := Transport{
- rt: base,
- }
-
- defaultOpts := []Option{
- WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
- WithSpanNameFormatter(defaultTransportFormatter),
- }
-
- c := newConfig(append(defaultOpts, opts...)...)
- t.applyConfig(c)
-
- return &t
-}
-
-func (t *Transport) applyConfig(c *config) {
- t.tracer = c.Tracer
- t.propagators = c.Propagators
- t.spanStartOptions = c.SpanStartOptions
- t.filters = c.Filters
- t.spanNameFormatter = c.SpanNameFormatter
- t.clientTrace = c.ClientTrace
-}
-
-func defaultTransportFormatter(_ string, r *http.Request) string {
- return "HTTP " + r.Method
-}
-
-// RoundTrip creates a Span and propagates its context via the provided request's headers
-// before handing the request to the configured base RoundTripper. The created span will
-// end when the response body is closed or when a read from the body returns io.EOF.
-func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
- for _, f := range t.filters {
- if !f(r) {
- // Simply pass through to the base RoundTripper if a filter rejects the request
- return t.rt.RoundTrip(r)
- }
- }
-
- tracer := t.tracer
-
- if tracer == nil {
- if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() {
- tracer = newTracer(span.TracerProvider())
- } else {
- tracer = newTracer(otel.GetTracerProvider())
- }
- }
-
- opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options
-
- ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...)
-
- if t.clientTrace != nil {
- ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx))
- }
-
- r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
- span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
- t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
-
- res, err := t.rt.RoundTrip(r)
- if err != nil {
- span.RecordError(err)
- span.SetStatus(codes.Error, err.Error())
- span.End()
- return res, err
- }
-
- span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
- span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
- res.Body = newWrappedBody(span, res.Body)
-
- return res, err
-}
-
-// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
-// io.ReadCloser. If the passed body implements io.Writer, the returned value
-// will implement io.ReadWriteCloser.
-func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser {
- // The successful protocol switch responses will have a body that
- // implement an io.ReadWriteCloser. Ensure this interface type continues
- // to be satisfied if that is the case.
- if _, ok := body.(io.ReadWriteCloser); ok {
- return &wrappedBody{span: span, body: body}
- }
-
- // Remove the implementation of the io.ReadWriteCloser and only implement
- // the io.ReadCloser.
- return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}}
-}
-
-// wrappedBody is the response body type returned by the transport
-// instrumentation to complete a span. Errors encountered when using the
-// response body are recorded in span tracking the response.
-//
-// The span tracking the response is ended when this body is closed.
-//
-// If the response body implements the io.Writer interface (i.e. for
-// successful protocol switches), the wrapped body also will.
-type wrappedBody struct {
- span trace.Span
- body io.ReadCloser
-}
-
-var _ io.ReadWriteCloser = &wrappedBody{}
-
-func (wb *wrappedBody) Write(p []byte) (int, error) {
- // This will not panic given the guard in newWrappedBody.
- n, err := wb.body.(io.Writer).Write(p)
- if err != nil {
- wb.span.RecordError(err)
- wb.span.SetStatus(codes.Error, err.Error())
- }
- return n, err
-}
-
-func (wb *wrappedBody) Read(b []byte) (int, error) {
- n, err := wb.body.Read(b)
-
- switch err {
- case nil:
- // nothing to do here but fall through to the return
- case io.EOF:
- wb.span.End()
- default:
- wb.span.RecordError(err)
- wb.span.SetStatus(codes.Error, err.Error())
- }
- return n, err
-}
-
-func (wb *wrappedBody) Close() error {
- wb.span.End()
- if wb.body != nil {
- return wb.body.Close()
- }
- return nil
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
deleted file mode 100644
index bd41c18..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-// Version is the current release version of the otelhttp instrumentation.
-func Version() string {
- return "0.46.1"
- // This string is updated by the pre_release.sh script during release
-}
-
-// SemVersion is the semantic version to be supplied to tracer/meter creation.
-//
-// Deprecated: Use [Version] instead.
-func SemVersion() string {
- return Version()
-}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
deleted file mode 100644
index 11a35ed..0000000
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
-
-import (
- "context"
- "io"
- "net/http"
-
- "go.opentelemetry.io/otel/propagation"
-)
-
-var _ io.ReadCloser = &bodyWrapper{}
-
-// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
-// of bytes read and the last error.
-type bodyWrapper struct {
- io.ReadCloser
- record func(n int64) // must not be nil
-
- read int64
- err error
-}
-
-func (w *bodyWrapper) Read(b []byte) (int, error) {
- n, err := w.ReadCloser.Read(b)
- n1 := int64(n)
- w.read += n1
- w.err = err
- w.record(n1)
- return n, err
-}
-
-func (w *bodyWrapper) Close() error {
- return w.ReadCloser.Close()
-}
-
-var _ http.ResponseWriter = &respWriterWrapper{}
-
-// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
-// bytes written, the last error, and to catch the first written statusCode.
-// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
-// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
-// that may be useful when using it in real life situations.
-type respWriterWrapper struct {
- http.ResponseWriter
- record func(n int64) // must not be nil
-
- // used to inject the header
- ctx context.Context
-
- props propagation.TextMapPropagator
-
- written int64
- statusCode int
- err error
- wroteHeader bool
-}
-
-func (w *respWriterWrapper) Header() http.Header {
- return w.ResponseWriter.Header()
-}
-
-func (w *respWriterWrapper) Write(p []byte) (int, error) {
- if !w.wroteHeader {
- w.WriteHeader(http.StatusOK)
- }
- n, err := w.ResponseWriter.Write(p)
- n1 := int64(n)
- w.record(n1)
- w.written += n1
- w.err = err
- return n, err
-}
-
-// WriteHeader persists initial statusCode for span attribution.
-// All calls to WriteHeader will be propagated to the underlying ResponseWriter
-// and will persist the statusCode from the first call.
-// Blocking consecutive calls to WriteHeader alters expected behavior and will
-// remove warning logs from net/http where developers will notice incorrect handler implementations.
-func (w *respWriterWrapper) WriteHeader(statusCode int) {
- if !w.wroteHeader {
- w.wroteHeader = true
- w.statusCode = statusCode
- }
- w.ResponseWriter.WriteHeader(statusCode)
-}
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
deleted file mode 100644
index ae6a3bc..0000000
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ /dev/null
@@ -1,5 +0,0 @@
-ot
-fo
-te
-collison
-consequentially
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
deleted file mode 100644
index 4afbb1f..0000000
--- a/vendor/go.opentelemetry.io/otel/.codespellrc
+++ /dev/null
@@ -1,10 +0,0 @@
-# https://github.com/codespell-project/codespell
-[codespell]
-builtin = clear,rare,informal
-check-filenames =
-check-hidden =
-ignore-words = .codespellignore
-interactive = 1
-skip = .git,go.mod,go.sum,semconv,venv,.tools
-uri-ignore-words-list = *
-write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes
deleted file mode 100644
index 314766e..0000000
--- a/vendor/go.opentelemetry.io/otel/.gitattributes
+++ /dev/null
@@ -1,3 +0,0 @@
-* text=auto eol=lf
-*.{cmd,[cC][mM][dD]} text eol=crlf
-*.{bat,[bB][aA][tT]} text eol=crlf
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
deleted file mode 100644
index 895c766..0000000
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-.DS_Store
-Thumbs.db
-
-.tools/
-venv/
-.idea/
-.vscode/
-*.iml
-*.so
-coverage.*
-go.work
-go.work.sum
-
-gen/
-
-/example/dice/dice
-/example/namedtracer/namedtracer
-/example/otel-collector/otel-collector
-/example/opencensus/opencensus
-/example/passthrough/passthrough
-/example/prometheus/prometheus
-/example/zipkin/zipkin
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
deleted file mode 100644
index 38a1f56..0000000
--- a/vendor/go.opentelemetry.io/otel/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "opentelemetry-proto"]
- path = exporters/otlp/internal/opentelemetry-proto
- url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
deleted file mode 100644
index a62511f..0000000
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ /dev/null
@@ -1,296 +0,0 @@
-# See https://github.com/golangci/golangci-lint#config-file
-run:
- issues-exit-code: 1 #Default
- tests: true #Default
-
-linters:
- # Disable everything by default so upgrades to not include new "default
- # enabled" linters.
- disable-all: true
- # Specifically enable linters we want to use.
- enable:
- - depguard
- - errcheck
- - godot
- - gofumpt
- - goimports
- - gosec
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - typecheck
- - unused
-
-issues:
- # Maximum issues count per one linter.
- # Set to 0 to disable.
- # Default: 50
- # Setting to unlimited so the linter only is run once to debug all issues.
- max-issues-per-linter: 0
- # Maximum count of issues with the same text.
- # Set to 0 to disable.
- # Default: 3
- # Setting to unlimited so the linter only is run once to debug all issues.
- max-same-issues: 0
- # Excluding configuration per-path, per-linter, per-text and per-source.
- exclude-rules:
- # TODO: Having appropriate comments for exported objects helps development,
- # even for objects in internal packages. Appropriate comments for all
- # exported objects should be added and this exclusion removed.
- - path: '.*internal/.*'
- text: "exported (method|function|type|const) (.+) should have comment or be unexported"
- linters:
- - revive
- # Yes, they are, but it's okay in a test.
- - path: _test\.go
- text: "exported func.*returns unexported type.*which can be annoying to use"
- linters:
- - revive
- # Example test functions should be treated like main.
- - path: example.*_test\.go
- text: "calls to (.+) only in main[(][)] or init[(][)] functions"
- linters:
- - revive
- # It's okay to not run gosec in a test.
- - path: _test\.go
- linters:
- - gosec
- # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
- # as we commonly use it in tests and examples.
- - text: "G404:"
- linters:
- - gosec
- # Igonoring gosec G402: TLS MinVersion too low
- # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- - text: "G402: TLS MinVersion too low."
- linters:
- - gosec
- include:
- # revive exported should have comment or be unexported.
- - EXC0012
- # revive package comment should be of the form ...
- - EXC0013
-
-linters-settings:
- depguard:
- rules:
- non-tests:
- files:
- - "!$test"
- - "!**/*test/*.go"
- - "!**/internal/matchers/*.go"
- deny:
- - pkg: "testing"
- - pkg: "github.com/stretchr/testify"
- - pkg: "crypto/md5"
- - pkg: "crypto/sha1"
- - pkg: "crypto/**/pkix"
- otlp-internal:
- files:
- - "!**/exporters/otlp/internal/**/*.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
- desc: Do not use cross-module internal packages.
- otlptrace-internal:
- files:
- - "!**/exporters/otlp/otlptrace/*.go"
- - "!**/exporters/otlp/otlptrace/internal/**.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
- desc: Do not use cross-module internal packages.
- otlpmetric-internal:
- files:
- - "!**/exporters/otlp/otlpmetric/internal/*.go"
- - "!**/exporters/otlp/otlpmetric/internal/**/*.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
- desc: Do not use cross-module internal packages.
- otel-internal:
- files:
- - "**/sdk/*.go"
- - "**/sdk/**/*.go"
- - "**/exporters/*.go"
- - "**/exporters/**/*.go"
- - "**/schema/*.go"
- - "**/schema/**/*.go"
- - "**/metric/*.go"
- - "**/metric/**/*.go"
- - "**/bridge/*.go"
- - "**/bridge/**/*.go"
- - "**/example/*.go"
- - "**/example/**/*.go"
- - "**/trace/*.go"
- - "**/trace/**/*.go"
- deny:
- - pkg: "go.opentelemetry.io/otel/internal$"
- desc: Do not use cross-module internal packages.
- - pkg: "go.opentelemetry.io/otel/internal/attribute"
- desc: Do not use cross-module internal packages.
- - pkg: "go.opentelemetry.io/otel/internal/internaltest"
- desc: Do not use cross-module internal packages.
- - pkg: "go.opentelemetry.io/otel/internal/matchers"
- desc: Do not use cross-module internal packages.
- godot:
- exclude:
- # Exclude links.
- - '^ *\[[^]]+\]:'
- # Exclude sentence fragments for lists.
- - '^[ ]*[-•]'
- # Exclude sentences prefixing a list.
- - ':$'
- goimports:
- local-prefixes: go.opentelemetry.io
- misspell:
- locale: US
- ignore-words:
- - cancelled
- revive:
- # Sets the default failure confidence.
- # This means that linting errors with less than 0.8 confidence will be ignored.
- # Default: 0.8
- confidence: 0.01
- rules:
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports
- - name: blank-imports
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr
- - name: bool-literal-in-expr
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr
- - name: constant-logical-expr
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
- # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- - name: context-as-argument
- disabled: true
- arguments:
- allowTypesBefore: "*testing.T"
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
- - name: context-keys-type
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit
- - name: deep-exit
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer
- - name: defer
- disabled: false
- arguments:
- - ["call-chain", "loop"]
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports
- - name: dot-imports
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports
- - name: duplicated-imports
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return
- - name: early-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block
- - name: empty-block
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines
- - name: empty-lines
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming
- - name: error-naming
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return
- - name: error-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings
- - name: error-strings
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf
- - name: errorf
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported
- - name: exported
- disabled: false
- arguments:
- - "sayRepetitiveInsteadOfStutters"
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter
- - name: flag-parameter
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches
- - name: identical-branches
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return
- - name: if-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement
- - name: increment-decrement
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow
- - name: indent-error-flow
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing
- - name: import-shadowing
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments
- - name: package-comments
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range
- - name: range
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure
- - name: range-val-in-closure
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address
- - name: range-val-address
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id
- - name: redefines-builtin-id
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format
- - name: string-format
- disabled: false
- arguments:
- - - panic
- - '/^[^\n]*$/'
- - must not contain line breaks
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag
- - name: struct-tag
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else
- - name: superfluous-else
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal
- - name: time-equal
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming
- - name: var-naming
- disabled: false
- arguments:
- - ["ID"] # AllowList
- - ["Otel", "Aws", "Gcp"] # DenyList
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration
- - name: var-declaration
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion
- - name: unconditional-recursion
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return
- - name: unexported-return
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error
- - name: unhandled-error
- disabled: false
- arguments:
- - "fmt.Fprint"
- - "fmt.Fprintf"
- - "fmt.Fprintln"
- - "fmt.Print"
- - "fmt.Printf"
- - "fmt.Println"
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt
- - name: unnecessary-stmt
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break
- - name: useless-break
- disabled: false
- # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
- - name: waitgroup-by-value
- disabled: false
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
deleted file mode 100644
index 40d62fa..0000000
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ /dev/null
@@ -1,6 +0,0 @@
-http://localhost
-http://jaeger-collector
-https://github.com/open-telemetry/opentelemetry-go/milestone/
-https://github.com/open-telemetry/opentelemetry-go/projects
-file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
-file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
deleted file mode 100644
index 3202496..0000000
--- a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-# Default state for all rules
-default: true
-
-# ul-style
-MD004: false
-
-# hard-tabs
-MD010: false
-
-# line-length
-MD013: false
-
-# no-duplicate-header
-MD024:
- siblings_only: true
-
-#single-title
-MD025: false
-
-# ol-prefix
-MD029:
- style: ordered
-
-# no-inline-html
-MD033: false
-
-# fenced-code-language
-MD040: false
-
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
deleted file mode 100644
index 24874f8..0000000
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ /dev/null
@@ -1,2818 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
-
-This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [Unreleased]
-
-## [1.21.0/0.44.0] 2023-11-16
-
-### Removed
-
-- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
-- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
-- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
-- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
-
-### Fixed
-
-- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
-- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
-
-## [1.20.0/0.43.0] 2023-11-10
-
-This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
-
-### Added
-
-- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
-- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
-- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
-- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
-- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
-- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
-- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
-- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
-- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
-- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
-- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
-- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
-- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
-
-### Deprecated
-
-- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
-- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
-- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
- Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
-- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
-- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
-
-### Changed
-
-- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
-- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
- This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
- See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
-- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
- This extends the `Tracer` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
- See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
-- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
- This extends the `Span` interface and is is a breaking change for any existing implementation.
- Implementors need to update their implementations based on what they want the default behavior of the interface to be.
- See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
-- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
-- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
-- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
-- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
-- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
-- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
-- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
-
-### Fixed
-
-- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
-- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
-- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
-- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
-- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
-
-## [1.19.0/0.42.0/0.0.7] 2023-09-28
-
-This release contains the first stable release of the OpenTelemetry Go [metric SDK].
-Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
-- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
-
-### Changed
-
-- Allow '/' characters in metric instrument names. (#4501)
-- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
-- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
-
-### Fixed
-
-- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
-
-### Removed
-
-- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
-
-## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
-
-This is a release candidate for the v1.19.0/v0.42.0 release.
-That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Changed
-
-- Allow '/' characters in metric instrument names. (#4501)
-
-### Fixed
-
-- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
-
-## [1.18.0/0.41.0/0.0.6] 2023-09-12
-
-This release drops the compatibility guarantee of [Go 1.19].
-
-### Added
-
-- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
-- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
-
-### Changed
-
-- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
-
-### Deprecated
-
-- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
- The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
-
-### Removed
-
-- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
-- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
-- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
-- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
-- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
-
-## [1.17.0/0.40.0/0.0.5] 2023-08-28
-
-### Added
-
-- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
-- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
-- Add support for exponential histogram aggregations.
- A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
-- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
-- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
-- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
-- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
-- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
-- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
- The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
-- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
-- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
-- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
-- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
-- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
-- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
-- Support Go 1.21. (#4463)
-
-### Changed
-
-- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
-- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
-- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
-- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
-- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
-- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
-- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
-- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
-- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
-- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
-- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
-- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
-- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
-- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
-- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
-- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
-
-### Removed
-
-- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
- Use the added `WithProducer` option instead. (#4346)
-- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
- Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
-
-### Fixed
-
-- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
-- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
-- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
-- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
-- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
-- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
-- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
-- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
-- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
-- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
-- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
-- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
-- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
-- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
-- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
- OpenTelemetry dropped support for Jaeger exporter in July 2023.
- Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
- or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
-- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
-- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
-- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
-- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
- Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
-
-## [1.16.0/0.39.0] 2023-05-18
-
-This release contains the first stable release of the OpenTelemetry Go [metric API].
-Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
- The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
-- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
- The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
-- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
-- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
-- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
-
-### Changed
-
-- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
-- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
- Use `go.opentelemetry.io/otel/metric` instead. (#4055)
-
-### Fixed
-
-- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
-
-## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
-
-This is a release candidate for the v1.16.0/v0.39.0 release.
-That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
- - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
- - Use `GetMeterProivder` for a global `metric.MeterProvider`.
- - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
-
-### Changed
-
-- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
- This stages the metric API to be released as a stable module. (#4038)
-
-### Removed
-
-- The `go.opentelemetry.io/otel/metric/global` package is removed.
- Use `go.opentelemetry.io/otel` instead. (#4039)
-
-## [1.15.1/0.38.1] 2023-05-02
-
-### Fixed
-
-- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
-
-## [1.15.0/0.38.0] 2023-04-27
-
-### Added
-
-- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916)
-- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949)
-- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970)
-- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971)
- - The `AddConfig` used to hold configuration for addition measurements
- - `NewAddConfig` used to create a new `AddConfig`
- - `AddOption` used to configure an `AddConfig`
- - The `RecordConfig` used to hold configuration for recorded measurements
- - `NewRecordConfig` used to create a new `RecordConfig`
- - `RecordOption` used to configure a `RecordConfig`
- - The `ObserveConfig` used to hold configuration for observed measurements
- - `NewObserveConfig` used to create a new `ObserveConfig`
- - `ObserveOption` used to configure an `ObserveConfig`
-- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`.
- They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971)
-- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956)
-- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956)
-
-### Changed
-
-- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870)
-- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`.
- This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916)
-- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941)
- - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider`
-- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966)
-- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974)
-- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
- - The `Int64Counter.Add` method now accepts `...AddOption`
- - The `Float64Counter.Add` method now accepts `...AddOption`
- - The `Int64UpDownCounter.Add` method now accepts `...AddOption`
- - The `Float64UpDownCounter.Add` method now accepts `...AddOption`
- - The `Int64Histogram.Record` method now accepts `...RecordOption`
- - The `Float64Histogram.Record` method now accepts `...RecordOption`
- - The `Int64Observer.Observe` method now accepts `...ObserveOption`
- - The `Float64Observer.Observe` method now accepts `...ObserveOption`
-- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
- - The `Observer.ObserveInt64` method now accepts `...ObserveOption`
- - The `Observer.ObserveFloat64` method now accepts `...ObserveOption`
-- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986)
-
-### Fixed
-
-- `TracerProvider` allows calling `Tracer()` while it's shutting down.
- It used to deadlock. (#3924)
-- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949)
-- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951)
-- Automatically figure out the default aggregation with `aggregation.Default`. (#3967)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated.
- Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018)
-
-## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23
-
-This is a release candidate for the v1.15.0/v0.38.0 release.
-That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-### Added
-
-- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812)
-- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828)
-- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
- Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849)
-- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895)
-- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900)
-- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854)
-
-### Changed
-
-- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832)
-- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832)
-- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833)
-- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844)
-- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849)
-- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853)
-- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892)
-- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
-- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
-- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900)
-
-### Fixed
-
-- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829)
-- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892)
-- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
- Use the added `float64` instrument configuration instead. (#3895)
-- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
- Use the added `int64` instrument configuration instead. (#3895)
-- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893)
-
-## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01
-
-This is a release candidate for the v1.15.0/v0.38.0 release.
-That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
-See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
-
-This release drops the compatibility guarantee of [Go 1.18].
-
-### Added
-
-- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818)
- - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
- - Use `GetMeterProivder` for a global `metric.MeterProvider`.
- - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
-
-### Changed
-
-- Dropped compatibility testing for [Go 1.18].
- The project no longer guarantees support for this version of Go. (#3813)
-
-### Fixed
-
-- Handle empty environment variable as it they were not set. (#3764)
-- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
-- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
-- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/metric/global` package is deprecated.
- Use `go.opentelemetry.io/otel` instead. (#3818)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814)
-
-## [1.14.0/0.37.0/0.0.4] 2023-02-27
-
-This release is the last to support [Go 1.18].
-The next release will require at least [Go 1.19].
-
-### Added
-
-- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
-- Support [Go 1.20]. (#3693)
-- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
- The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
- - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
- - `OtelScopeNameKey` -> `OTelScopeNameKey`
- - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
- - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
- - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
- - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
- - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
- - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
- - `OtelStatusCodeError` -> `OTelStatusCodeError`
- - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
- - `OtelScopeName` -> `OTelScopeName`
- - `OtelScopeVersion` -> `OTelScopeVersion`
- - `OtelLibraryName` -> `OTelLibraryName`
- - `OtelLibraryVersion` -> `OTelLibraryVersion`
- - `OtelStatusDescription` -> `OTelStatusDescription`
-- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
- See the [README](./bridge/opentracing/README.md) for more information. (#3570)
-- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
-- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
-- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
- - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
- - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
-
-### Changed
-
-- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
-- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
- This change is made to enable memory reuse by SDK users. (#3732)
-- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
-
-### Fixed
-
-- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
-- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
-- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
-- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
-- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
-- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
-- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
- Use the equivalent unit string instead. (#3776)
- - Use `"1"` instead of `unit.Dimensionless`
- - Use `"By"` instead of `unit.Bytes`
- - Use `"ms"` instead of `unit.Milliseconds`
-
-## [1.13.0/0.36.0] 2023-02-07
-
-### Added
-
-- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
- These functions ensure semantic convention type correctness. (#3675)
-
-### Fixed
-
-- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
- - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
- - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
-- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
-
-## [1.12.0/0.35.0] 2023-01-28
-
-### Added
-
-- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
- This options is used to configure `int64` Observer callbacks during their creation. (#3507)
-- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
- This options is used to configure `float64` Observer callbacks during their creation. (#3507)
-- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
- These additions are used to enable external metric Producers. (#3524)
-- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
- This new named function type is registered with a `Meter`. (#3564)
-- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
- The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
- - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
- - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
- - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
-- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
- The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
-- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
- The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
-- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
- The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
-- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
- These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
- - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
- - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
- - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
- - `Int64ObservableCounter` replaces the `asyncint64.Counter`
- - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
- - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
- - `Float64Counter` replaces the `syncfloat64.Counter`
- - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
- - `Float64Histogram` replaces the `syncfloat64.Histogram`
- - `Int64Counter` replaces the `syncint64.Counter`
- - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
- - `Int64Histogram` replaces the `syncint64.Histogram`
-- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
- This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
-- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
- This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
-- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
- The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
-
-### Changed
-
-- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
-- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507)
- - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
- - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
- - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
- - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
-- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
- This `Registration` can be used to unregister callbacks. (#3522)
-- Global error handler uses an atomic value instead of a mutex. (#3543)
-- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
-- Global logger uses an atomic value instead of a mutex. (#3545)
-- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
-- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
- This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
-- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name.
- Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
-- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
-- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
- - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
- - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
- - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
- - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
- - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
- - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
-- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
- - The named `Callback` replaces the inline function parameter. (#3564)
- - `Callback` is required to return an error. (#3576)
- - `Callback` accepts the added `Observer` parameter added.
- This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
- - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
-- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
- This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
- Instead it uses the `net.sock.peer` attributes. (#3581)
-- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
-
-### Fixed
-
-- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
-- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
- Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
-
-### Deprecated
-
-- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
- Use `NewMetricProducer` instead. (#3541)
-- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
- Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
-- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
- Use `NewTracerProvider` instead. (#3116)
-
-### Removed
-
-- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
- - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
- - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
- - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
- - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Int64Counter`
- - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
- - The `Histogram` method is replaced by `Meter.Int64Histogram`
-- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
- Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
- - The `Counter` method is replaced by `Meter.Float64Counter`
- - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
- - The `Histogram` method is replaced by `Meter.Float64Histogram`
-
-## [1.11.2/0.34.0] 2022-12-05
-
-### Added
-
-- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
- This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
-- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
- This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
-- OTLP exporters now recognize: (#3363)
- - `OTEL_EXPORTER_OTLP_INSECURE`
- - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
- - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
- - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
- - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
- - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
- - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
-- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
- These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
-- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
- These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
-- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
-- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
-
-### Changed
-
-- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
- Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
- The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
-- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
-- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
-- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
-- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
-
-### Fixed
-
-- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
-- Remove comparable requirement for `Reader`s. (#3387)
-- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
-- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
-- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
-- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
-- Re-enabled Attribute Filters in the Metric SDK. (#3396)
-- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
-- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
-- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
-- Prevent duplicate Prometheus description, unit, and type. (#3469)
-- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
-
-### Removed
-
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
-- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
- Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
-
-## [1.11.1/0.33.0] 2022-10-19
-
-### Added
-
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
- By default, it will register with the default Prometheus registerer.
- A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
-- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
-
-### Changed
-
-- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
- It will return an error if the exporter fails to register with Prometheus. (#3239)
-
-### Fixed
-
-- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
-- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
- This fixes the implementation to be compliant with the W3C specification. (#3226)
-- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
-- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
-- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
-- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
-- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
-- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
- Instead the exporter is defined as an "unchecked" collector for Prometheus.
- This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
-- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
-- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
- This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
-
-## [1.11.0/0.32.3] 2022-10-12
-
-### Added
-
-- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
-
-### Changed
-
-- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
-- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
- This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
-
-## [0.32.2] Metric SDK (Alpha) - 2022-10-11
-
-### Added
-
-- Added an example of using metric views to customize instruments. (#3177)
-- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
-
-### Changed
-
-- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
-- Update histogram default bounds to match the requirements of the latest specification. (#3222)
-- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
-
-### Fixed
-
-- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
-- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
-- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
-- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
-- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
-
-## [0.32.1] Metric SDK (Alpha) - 2022-09-22
-
-### Changed
-
-- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
- Invalid characters are replaced with `_`. (#3212)
-
-### Added
-
-- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
-- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
-
-### Fixed
-
-- Updated go.mods to point to valid versions of the sdk. (#3216)
-- Set the `MeterProvider` resource on all exported metric data. (#3218)
-
-## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
-
-### Changed
-
-- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
- Please see the package documentation for how the new SDK is initialized and configured. (#3175)
-- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
-
-### Removed
-
-- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
- A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
- A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
-- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
-- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
-- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
-
-## [1.10.0] - 2022-09-09
-
-### Added
-
-- Support Go 1.19. (#3077)
- Include compatibility testing and document support. (#3077)
-- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
-- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
-
-### Changed
-
-- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096)
-- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
-- All exporters will be shutdown even if one reports an error (#3091)
-- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
-
-## [1.9.0/0.0.3] - 2022-08-01
-
-### Added
-
-- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
-- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
- The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
-- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
- The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
-- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
-
-### Fixed
-
-- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
-
-## [1.8.0/0.31.0] - 2022-07-08
-
-### Added
-
-- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
-of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
-
-### Changed
-
-- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
-- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
-- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
-
-### Removed
-
-- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
-
-### Deprecated
-
-- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
- Use the equivalent `Scope` struct instead. (#2977)
-- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
- Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
-
-## [1.7.0/0.30.0] - 2022-04-28
-
-### Added
-
-- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
- The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
-- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
- The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
-- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
- The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
-- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
-
-### Fixed
-
-- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
-- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
-
-### Changed
-
-- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
-- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
- The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
-- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
- Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
-
-### Deprecated
-
-- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
- Use the equivalent `Iterator.Attribute` method instead. (#2790)
-- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
- Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
-- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
- Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
-
-### Removed
-
-- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
-- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
-
-## [0.29.0] - 2022-04-11
-
-### Added
-
-- The metrics global package was added back into several test files. (#2764)
-- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
- This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
-
-### Removed
-
-- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
- Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
-
-### Changed
-
-- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
-- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
- This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
-
-## [1.6.3] - 2022-04-07
-
-### Fixed
-
-- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
-
-## [1.6.2] - 2022-04-06
-
-### Changed
-
-- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
-- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
- This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
-
-## [1.6.1] - 2022-03-28
-
-### Fixed
-
-- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
- Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
-
-### Security
-
-- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
- This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
-
-## [1.6.0/0.28.0] - 2022-03-23
-
-### ⚠️ Notice ⚠️
-
-This update is a breaking change of the unstable Metrics API.
-Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
-
-### Added
-
-- Add metrics exponential histogram support.
- New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
-- Add Go 1.18 to our compatibility tests. (#2679)
-- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
-- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
-
-### Changed
-
-- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
- High-level changes include:
-
- - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
- These `InstrumentProvider`s are managed with a `Meter`.
- - Synchronous and asynchronous instruments are grouped into their own packages based on value types.
- - Asynchronous callbacks can now be registered with a `Meter`.
-
- Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
-
-### Fixed
-
-- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
-
-## [1.5.0] - 2022-03-16
-
-### Added
-
-- Log the Exporters configuration in the TracerProviders message. (#2578)
-- Added support to configure the span limits with environment variables.
- The following environment variables are supported. (#2606, #2637)
- - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
- - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
- - `OTEL_SPAN_EVENT_COUNT_LIMIT`
- - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
- - `OTEL_SPAN_LINK_COUNT_LIMIT`
- - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
-
- If the provided environment variables are invalid (negative), the default values would be used.
-- Rename the `gc` runtime name to `go` (#2560)
-- Add resource container ID detection. (#2418)
-- Add span attribute value length limit.
- The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
- The default limit for this resource is "unlimited". (#2637)
-- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
- This option replaces the `WithSpanLimits` option.
- Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
- Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
- Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
-
-### Changed
-
-- Drop oldest tracestate `Member` when capacity is reached. (#2592)
-- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
-- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
-- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
-- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
-- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
-
-### Fixed
-
-- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
-- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
-- Unlimited span limits are now supported (negative values). (#2636, #2637)
-
-### Deprecated
-
-- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
- Use `WithRawSpanLimits` instead.
- That option allows setting unlimited and zero limits, this option does not.
- This option will be kept until the next major version incremented release. (#2637)
-
-## [1.4.1] - 2022-02-16
-
-### Fixed
-
-- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
-
-## [1.4.0] - 2022-02-11
-
-### Added
-
-- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
-- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
- To enable use a logger with Verbosity (V level) `>=1`. (#2500)
-- Added support to configure the batch span-processor with environment variables.
- The following environment variables are used. (#2515)
- - `OTEL_BSP_SCHEDULE_DELAY`
- - `OTEL_BSP_EXPORT_TIMEOUT`
- - `OTEL_BSP_MAX_QUEUE_SIZE`.
- - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
-
-### Changed
-
-- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
-
-### Deprecated
-
-- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
- Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
-- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
-
-### Fixed
-
-- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
-- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
-- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
-- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
-- W3C baggage will now decode urlescaped values. (#2529)
-- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
-- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
- Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
- This drop order still only applies to attributes with unique keys not already contained in the span.
- If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
-
-### Removed
-
-- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
- - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
- - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
- - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
-
-## [1.3.0] - 2021-12-10
-
-### ⚠️ Notice ⚠️
-
-We have updated the project minimum supported Go version to 1.16
-
-### Added
-
-- Added an internal Logger.
- This can be used by the SDK and API to provide users with feedback of the internal state.
- To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
-- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
-- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
-
-### Changed
-
-- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
-- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
-- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
-- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
-- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
-
-### Fixed
-
-- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
- Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
- When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
-- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
-- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
-
-### Deprecated
-
-- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
-- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
-
-### Removed
-
-- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
-- Remove the metric Bound Instruments interface and implementations. (#2399)
-- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
-- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
-
-## [1.2.0] - 2021-11-12
-
-### Changed
-
-- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
-- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
-- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
- - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
- - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
- - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
-- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
-
-### Added
-
-- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
-- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
-- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
-
-## [1.1.0] - 2021-10-27
-
-### Added
-
-- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
-- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
- The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
-- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
- The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
-- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
- The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
- - When upgrading from the `semconv/v1.4.0` package note the following name changes:
- - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
- - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
- - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
- - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
- - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
- - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
-
-### Changed
-
-- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
-
-### Fixed
-
-- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
-- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
-- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
-
-## [1.0.1] - 2021-10-01
-
-### Fixed
-
-- json stdout exporter no longer crashes due to concurrency bug. (#2265)
-
-## [Metrics 0.24.0] - 2021-10-01
-
-### Changed
-
-- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
-- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
- - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
- - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
-
-## [1.0.0] - 2021-09-20
-
-This is the first stable release for the project.
-This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
-
-### Added
-
-- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
-
-### Fixed
-
-- Slice-valued attributes can correctly be used as map keys. (#2223)
-
-### Removed
-
-- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
-- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
-- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
-- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
- Use the typed functions and methods added to the package instead. (#2235)
- - The `Key.Array` method is removed.
- - The `Array` function is removed.
- - The `Any` function is removed.
- - The `ArrayValue` function is removed.
- - The `AsArray` function is removed.
-
-## [1.0.0-RC3] - 2021-09-02
-
-### Added
-
-- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
-- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
-- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
- - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
-- Added the `go.opentelemetry.io/otel/example/fib` example package.
- Included is an example application that computes Fibonacci numbers. (#2203)
-
-### Changed
-
-- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
- - ValueRecorder becomes Histogram
- - ValueObserver becomes Gauge
- - SumObserver becomes CounterObserver
- - UpDownSumObserver becomes UpDownCounterObserver
- The API exported from this project is still considered experimental. (#2202)
-- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
-- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
-- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
-- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
- All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
- The functions from that package should be used instead. (#2166)
-- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
- Use the typed `*Slice` functions and types added to the package instead. (#2162)
-- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
- Use the typed functions instead. (#2181)
-- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
- The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
-
-### Removed
-
-- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
-
-### Fixed
-
-- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
-- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
-- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
-- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
-- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195)
-- Fixed typos in resources.go. (#2201)
-
-## [1.0.0-RC2] - 2021-07-26
-
-### Added
-
-- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
-- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
-- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
- This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
-- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
-- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
- This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
- For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
-- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
- This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
-
-### Changed
-
-- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
-- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
-
-### Deprecated
-
-- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
-- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
-- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
- Use the `trace.ParseTraceState` function instead. (#2122)
-
-### Removed
-
-- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
-- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
-- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
- The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
-- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
- The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
-- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
-
-### Fixed
-
-- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
-- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
-- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
-- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
- This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
-- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
-- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
-
-## [Experimental Metrics v0.22.0] - 2021-07-19
-
-### Added
-
-- Adds HTTP support for OTLP metrics exporter. (#2022)
-
-### Removed
-
-- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
-
-## [1.0.0-RC1] / 0.21.0 - 2021-06-18
-
-With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
-while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
-with major version 0.
-
-### Added
-
-- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
- - The following status codes are defined as transient errors:
- | gRPC Status Code | Description |
- | ---------------- | ----------- |
- | 1 | Cancelled |
- | 4 | Deadline Exceeded |
- | 8 | Resource Exhausted |
- | 10 | Aborted |
- | 10 | Out of Range |
- | 14 | Unavailable |
- | 15 | Data Loss |
-- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
-- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
- This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
-- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
-- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
-- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
-- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
- It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
-- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
- This method returns the number of list-members the `TraceState` holds. (#1937)
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
-- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
-- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
- These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
-- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
-- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
-- Several builtin resource detectors now correctly populate the schema URL. (#1938)
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
-- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
-- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
-- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
-- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
-
-### Changed
-
-- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
- `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
-- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
-- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
-- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
-- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
-- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
-- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
-- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
- This method returns the status of a span using the new `Status` type. (#1874)
-- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
- This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
-- Unembed `SpanContext` in `Link`. (#1877)
-- Generate Semantic conventions from the specification YAML. (#1891)
-- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
-- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
-- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
-- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
-- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
-- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
-- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
-- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
-- Refactored option types according to the contribution style guide. (#1882)
-- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
- This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
- The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
-- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
-- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
-- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
-- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
-- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
-- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
-- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
-- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
-- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
-
-### Deprecated
-
-- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
-- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
-- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
-
-### Removed
-
-- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
-- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
-- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
- The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
- The `IsRecording` method returns if the span is recording or not.
- A read-only span value does not need to know if updates to it will be recorded or not.
- By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
-- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
- The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
- When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
-- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
- Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
- The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
- - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
-- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
-- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
-- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
- Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
-- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
- These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
-- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
-- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
-
-### Fixed
-
-- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
-- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
-- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
-- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
-- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
-- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
-- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
-
-### Security
-
-## [0.20.0] - 2021-04-23
-
-### Added
-
-- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
-- Adds semantic conventions for exceptions. (#1492)
-- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
- These environment variables can be used to override Jaeger agent hostname and port (#1752)
-- Option `ExportTimeout` was added to batch span processor. (#1755)
-- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
-- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
-- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
-- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
-- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
-- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
-- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
- - `process.pid`
- - `process.executable.name`
- - `process.executable.path`
- - `process.command_args`
- - `process.owner`
- - `process.runtime.name`
- - `process.runtime.version`
- - `process.runtime.description`
-- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
-- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
- - `OTEL_EXPORTER_OTLP_ENDPOINT`
- - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
- - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
- - `OTEL_EXPORTER_OTLP_HEADERS`
- - `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
- - `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
- - `OTEL_EXPORTER_OTLP_COMPRESSION`
- - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
- - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
- - `OTEL_EXPORTER_OTLP_TIMEOUT`
- - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
- - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
- - `OTEL_EXPORTER_OTLP_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
- - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
-- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
-- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
-
-### Fixed
-
-- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
-- The Jaeger exporter now correctly sets tags for the Span status code and message.
- This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
-- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
- Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
-- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
-- Fixed typo for default service name in Jaeger Exporter. (#1797)
-- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
-- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
- Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
-
-### Changed
-
-- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
-- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
-- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
-- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
-- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
- The Span's SpanContext can now self-identify as being remote or not.
- This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
-- Improve OTLP/gRPC exporter connection errors. (#1737)
-- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
- The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
-- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
- This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
-- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
- to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
-- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
-- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
- It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
-- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
-- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
-- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
-- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
-- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
-- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
- This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
-- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
-- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
- The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
-- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
-- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
-
-### Removed
-
-- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
- These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
-- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
- This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
- To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
-- Setting error status while recording error with Span from oteltest package. (#1729)
-- The concept of a remote and local Span stored in a context is unified to just the current Span.
- Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
- Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
- If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
-- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
- This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
-- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
-- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
- The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
-- Remove the `WithDisabled` option from the Jaeger exporter.
- To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
-- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
- These functions for retrieving specific environment variable values are redundant of other internal functions and
- are not intended for end user use. (#1824)
-- Removed the Jaeger exporter `WithSDKOptions` `Option`.
- This option was used to set SDK options for the exporter creation convenience functions.
- These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
- If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
-- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
- The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
-- The Jaeger exporter `Option` type is removed.
- The type is no longer used by the exporter to configure anything.
- All the previous configurations these options provided were duplicates of SDK configuration.
- They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
-
-## [0.19.0] - 2021-03-18
-
-### Added
-
-- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
-- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
-- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
-- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
-- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
-
-### Changed
-
-- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
- - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
-- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
-- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
-- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
-- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
-- Added non-empty string check for trace `Attribute` keys. (#1659)
-- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
-- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
-- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
-- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
-- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
-- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
-
-### Removed
-
-- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
-- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
-- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
- These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
-- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
-- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
-- Removed `jaeger.WithProcess` configuration option. (#1673)
-- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
-
-### Fixed
-
-- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
-- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
-- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
-- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
-- Synchronization issues in global trace delegate implementation. (#1686)
-- Reduced excess memory usage by global `TracerProvider`. (#1687)
-
-## [0.18.0] - 2021-03-03
-
-### Added
-
-- Added `resource.Default()` for use with meter and tracer providers. (#1507)
-- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
-- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
-- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
-- Compatibility testing suite in the CI system for the following systems. (#1567)
- | OS | Go Version | Architecture |
- | ------- | ---------- | ------------ |
- | Ubuntu | 1.15 | amd64 |
- | Ubuntu | 1.14 | amd64 |
- | Ubuntu | 1.15 | 386 |
- | Ubuntu | 1.14 | 386 |
- | MacOS | 1.15 | amd64 |
- | MacOS | 1.14 | amd64 |
- | Windows | 1.15 | amd64 |
- | Windows | 1.14 | amd64 |
- | Windows | 1.15 | 386 |
- | Windows | 1.14 | 386 |
-
-### Changed
-
-- Replaced interface `oteltest.SpanRecorder` with its existing implementation
- `StandardSpanRecorder`. (#1542)
-- Default span limit values to 128. (#1535)
-- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
-- Renamed the `otel/label` package to `otel/attribute`. (#1541)
-- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
-- Parallelize the CI linting and testing. (#1567)
-- Stagger timestamps in exact aggregator tests. (#1569)
-- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
-- Prevent end-users from implementing some interfaces (#1575)
-
- ```
- "otel/exporters/otlp/otlphttp".Option
- "otel/exporters/stdout".Option
- "otel/oteltest".Option
- "otel/trace".TracerOption
- "otel/trace".SpanOption
- "otel/trace".EventOption
- "otel/trace".LifeCycleOption
- "otel/trace".InstrumentationOption
- "otel/sdk/resource".Option
- "otel/sdk/trace".ParentBasedSamplerOption
- "otel/sdk/trace".ReadOnlySpan
- "otel/sdk/trace".ReadWriteSpan
- ```
-
-### Removed
-
-- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
-- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
-- Removed the `test-386` make target.
- This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
-
-### Fixed
-
-- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
-- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
-- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
-- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
-- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
-
-## [0.17.0] - 2021-02-12
-
-### Changed
-
-- Rename project default branch from `master` to `main`. (#1505)
-- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
-- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
-- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
-- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
-
-## Fixed
-
-- Fixed otlpgrpc reconnection issue.
-- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
-- The otel-collector example now uses the default OTLP receiver port of the collector.
-
-## [0.16.0] - 2021-01-13
-
-### Added
-
-- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
-- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
-- Added documentation about the project's versioning policy. (#1388)
-- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
-- Added codeql workflow to GitHub Actions (#1428)
-- Added Gosec workflow to GitHub Actions (#1429)
-- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
-- Add an OpenCensus exporter bridge. (#1444)
-
-### Changed
-
-- Rename `internal/testing` to `internal/internaltest`. (#1449)
-- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
-- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
-- Improve span duration accuracy. (#1360)
-- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
-- Remove duplicate checkout from GitHub Actions workflow (#1407)
-- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
-- Metric `exact` aggregator includes per-point timestamps (#1412)
-- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
-- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
-- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
-- Unify endpoint API that related to OTel exporter. (#1401)
-- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435)
-- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
-- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
-- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
-- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
-- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
-- Metric Push and Pull Controller components are combined into a single "basic" Controller:
- - `WithExporter()` and `Start()` to configure Push behavior
- - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
- - `Start()` and `Stop()` accept Context. (#1378)
-- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
-
-### Removed
-
-- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
-- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
-- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
-
-### Fixed
-
-- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
-
-## [0.15.0] - 2020-12-10
-
-### Added
-
-- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363)
-
-### Changed
-
-- The Zipkin exporter now uses the Span status code to determine. (#1328)
-- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357)
-- Move the OpenCensus example into `example` directory. (#1359)
-- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363)
-- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
-- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
-
-### Fixed
-
-- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
-
-## [0.14.0] - 2020-11-19
-
-### Added
-
-- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
-- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
-- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
-- `TraceState` has been added to `SpanContext`. (#1340)
-- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
-- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
-- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
-- Add missing tests for `sdk/trace/attributes_map.go`. (#1337)
-
-### Changed
-
-- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307)
- - `ID` has been renamed to `TraceID`.
- - `IDFromHex` has been renamed to `TraceIDFromHex`.
- - `EmptySpanContext` is removed.
-- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229)
-- OTLP Exporter updates:
- - supports OTLP v0.6.0 (#1230, #1354)
- - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296)
-- The Sampler is now called on local child spans. (#1233)
-- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240)
-- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`.
- This matches the returned type and fixes misuse of the term metric. (#1240)
-- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241)
-- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252)
-- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321)
-- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316)
-- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316)
-- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254)
-- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254)
-- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330)
-- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330)
-- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267)
-- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276)
-- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and
- `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235)
-- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210)
-- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310)
-- Updated span collection limits for attribute, event and link counts to 1000 (#1318)
-- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338)
-
-### Removed
-
-- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243)
-- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy.
- It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254)
-- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
- `Tracer` and `Span` from the same module should be used in their place instead. (#1306)
-- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
-- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
-
-### Fixed
-
-- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244)
-- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258)
-- Fix condition in `label.Any`. (#1299)
-- Fix global `TracerProvider` to pass options to its configured provider. (#1329)
-- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309)
-
-## [0.13.0] - 2020-10-08
-
-### Added
-
-- OTLP Metric exporter supports Histogram aggregation. (#1209)
-- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
-- A Baggage API to implement the OpenTelemetry specification. (#1217)
-- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227)
-
-### Changed
-
-- Set default propagator to no-op propagator. (#1184)
-- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325)
-- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
-- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
- They now are `Unset`, `Error`, and `Ok`.
- They no longer track the gRPC codes. (#1214)
-- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
-- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325)
-- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264)
-
-### Fixed
-
-- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
-
-### Removed
-
-- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
-- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
- The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
-- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
-- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
-- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
-- Nested array/slice support has been removed. (#1226)
-
-## [0.12.0] - 2020-09-24
-
-### Added
-
-- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
-- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
- This addition was made to conform with our project option conventions. (#1155)
-- Instrumentation library information was added to the Zipkin exporter. (#1119)
-- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
-- More semantic conventions for k8s as resource attributes. (#1167)
-
-### Changed
-
-- Add reconnecting udp connection type to Jaeger exporter.
- This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
- It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
-- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
- This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
-- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
- This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
-- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
-- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
- This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
-- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
-- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
-- Move `tools` package under `internal`. (#1141)
-- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
- The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged.
-- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
-- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
-- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
-- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
- recommend the use of `newConfig()` instead of `configure()`. (#1163)
-- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
-- Ensure exported interface types include parameter names and update the
- Style Guide to reflect this styling rule. (#1172)
-- Don't consider unset environment variable for resource detection to be an error. (#1170)
-- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
- `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
-- ValueObserver instruments use LastValue aggregator by default. (#1165)
-- OTLP Metric exporter supports LastValue aggregation. (#1165)
-- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
-- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
-- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
-- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
-- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
-- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
-- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
-- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
-- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
-- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
-- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
-- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
-- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
-- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
-- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
-- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
-
-### Removed
-
-- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
- `go.opentelemetry.io/contrib/propagators/` module. (#1191)
-- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
-
-### Fixed
-
-- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
-- Fix missing shutdown processor in otel-collector example. (#1186)
-- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
-
-## [0.11.0] - 2020-08-24
-
-### Added
-
-- Support for exporting array-valued attributes via OTLP. (#992)
-- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
-- Support for filtering metric label sets. (#1047)
-- A dimensionality-reducing metric Processor. (#1057)
-- Integration tests for more OTel Collector Attribute types. (#1062)
-- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
-
-### Changed
-
-- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
-- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
-- Rename `api/testharness` to `api/apitest`. (#1049)
-- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
-- Change Metric Processor to merge multiple observations. (#1024)
-- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
- This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
-- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
-- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
-- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
-- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
-- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
-- Unify Callback Function Naming.
- Rename `*Callback` with `*Func`. (#1061)
-- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
-- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
- This interface still supports the export of `SpanData`, but only as a slice.
- Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
-- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
- If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
-- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
- This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
-
-### Removed
-
-- Duplicate, unused API sampler interface. (#999)
- Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
-- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
- This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
-- The `WithSpan` method of the `Tracer` interface.
- The functionality this method provided was limited compared to what a user can provide themselves.
- It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
-- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
- These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
-- The `oterror` package. (#1026)
-- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
-
-### Fixed
-
-- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
-- Correct instrumentation version tag in Jaeger exporter. (#1037)
-- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
-- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
-- The `otel-collector` example referenced outdated collector processors. (#1006)
-
-## [0.10.0] - 2020-07-29
-
-This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
-
-### Added
-
-- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
- These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
-- Add propagator option for gRPC instrumentation. (#986)
-- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
-
-### Changed
-
-- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
- This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
-- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
- This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
-- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
-- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
- - `value.Bool` was replaced with `kv.BoolValue`.
- - `value.Int64` was replaced with `kv.Int64Value`.
- - `value.Uint64` was replaced with `kv.Uint64Value`.
- - `value.Float64` was replaced with `kv.Float64Value`.
- - `value.Int32` was replaced with `kv.Int32Value`.
- - `value.Uint32` was replaced with `kv.Uint32Value`.
- - `value.Float32` was replaced with `kv.Float32Value`.
- - `value.String` was replaced with `kv.StringValue`.
- - `value.Int` was replaced with `kv.IntValue`.
- - `value.Uint` was replaced with `kv.UintValue`.
- - `value.Array` was replaced with `kv.ArrayValue`.
-- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
-- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
-- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
-- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
-- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
-
-### Removed
-
-- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
-
-### Fixed
-
-- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
-- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
-- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
-- Correct Go language formatting in the README documentation. (#961)
-- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
-- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
-- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
-
-## [0.9.0] - 2020-07-20
-
-### Added
-
-- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
-- A Detector to automatically detect resources from an environment variable. (#939)
-- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
-- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
- References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
-
-### Changed
-
-- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
-
-### Removed
-
-- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
-
-## [0.8.0] - 2020-07-09
-
-### Added
-
-- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
- A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
-- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
-- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
-- Add `peer.service` semantic attribute. (#898)
-- Add database-specific semantic attributes. (#899)
-- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
-- Add http content size semantic conventions. (#905)
-- Include `http.request_content_length` in HTTP request basic attributes. (#905)
-- Add semantic conventions for operating system process resource attribute keys. (#919)
-- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
-
-### Changed
-
-- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
-- Use lowercase header names for B3 Multiple Headers. (#881)
-- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
- This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
- If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
-- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
- Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
- This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
-- Extend semantic conventions for RPC. (#900)
-- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
- - `"api/standard".FaaSName` -> `FaaSNameKey`
- - `"api/standard".FaaSID` -> `FaaSIDKey`
- - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
- - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
-
-### Removed
-
-- The `FlagsUnused` trace flag is removed.
- The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
-- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
- If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
-
-### Fixed
-
-- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
-- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
-- The B3 propagator now propagates the debug flag.
- This removes the behavior of changing the debug flag into a set sampling bit.
- Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
-- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
-- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
-- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
-- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
-- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
-- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
-- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
-- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
-- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
-- Update otel-colector example to use the v0.5.0 collector. (#915)
-- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
-- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
-- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
- This is in accordance with OpenTelemetry semantic conventions. (#922)
-- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
-- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
-- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
-- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
-
-## [0.7.0] - 2020-06-26
-
-This release implements the v0.5.0 version of the OpenTelemetry specification.
-
-### Added
-
-- The othttp instrumentation now includes default metrics. (#861)
-- This CHANGELOG file to track all changes in the project going forward.
-- Support for array type attributes. (#798)
-- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
-- Timestamps are now passed to exporters for each export. (#835)
-- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
- This replaces the prior `Record` `struct` use for this purpose. (#835)
-- New dependabot integration to automate package upgrades. (#814)
-- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
- This instrumentation version is passed on to exporters. (#811) (#805) (#802)
-- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
-- Environment variables for Jaeger exporter are supported. (#796)
-- New `aggregation.Kind` in the export metric API. (#808)
-- New example that uses OTLP and the collector. (#790)
-- Handle errors in the span `SetName` during span initialization. (#791)
-- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
-- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
-- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
- There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
-- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
-- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
-- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
-
-### Changed
-
-- Rename `Integrator` to `Processor` in the metric SDK. (#863)
-- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
-- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
-- Rename `simple` integrator to `basic` integrator. (#857)
-- Merge otlp collector examples. (#841)
-- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
- With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
-- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
-- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
- All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
- Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
-- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
-- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
-- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
-- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
-- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
-- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
-- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
-
-### Removed
-
-- `Uint64NumberKind` and related functions from the API. (#864)
-- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
-- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
-
-### Fixed
-
-- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
-- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
-- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
-- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
-- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
-- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
-- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
-- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
-- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
-- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
-- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
-- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
-- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
-- Set span status from HTTP status code in the othttp instrumentation. (#832)
-- Fixed typo in push controller comment. (#834)
-- The `Aggregator` testing has been updated and cleaned. (#812)
-- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
-- Fixed `global` `handler_test.go` test failure. #804
-- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
-- Fixed OTLP example's accidental early close of exporter. (#807)
-- Ensure zipkin exporter reads and closes response body. (#788)
-- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
-- Clean up tools and RELEASING documentation. (#762)
-
-## [0.6.0] - 2020-05-21
-
-### Added
-
-- Support for `Resource`s in the prometheus exporter. (#757)
-- New pull controller. (#751)
-- New `UpDownSumObserver` instrument. (#750)
-- OpenTelemetry collector demo. (#711)
-- New `SumObserver` instrument. (#747)
-- New `UpDownCounter` instrument. (#745)
-- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
-- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
-
-### Changed
-
-- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
-- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
-- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
-- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
-- The prometheus exporter now uses the new pull controller. (#751)
-- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
-- Support use of synchronous instruments in asynchronous callbacks (#725)
-- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
-- Rename `Observer` instrument to `ValueObserver`. (#734)
-- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
-- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
-- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
-
-### Fixed
-
-- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
-- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
-- Fix `string` case in `kv` `Infer` function. (#746)
-- Fix panic in grpctrace client interceptors. (#740)
-- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
-- Rewrite span batch process queue batching logic. (#719)
-- Remove the push controller named Meter map. (#738)
-- Fix Histogram aggregator initial state (fix #735). (#736)
-- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
-- Added test for grpctrace `UnaryInterceptorClient`. (#695)
-- Rearrange `api/metric` code layout. (#724)
-
-## [0.5.0] - 2020-05-13
-
-### Added
-
-- Batch `Observer` callback support. (#717)
-- Alias `api` types to root package of project. (#696)
-- Create basic `othttp.Transport` for simple client instrumentation. (#678)
-- `SetAttribute(string, interface{})` to the trace API. (#674)
-- Jaeger exporter option that allows user to specify custom http client. (#671)
-- `Stringer` and `Infer` methods to `key`s. (#662)
-
-### Changed
-
-- Rename `NewKey` in the `kv` package to just `Key`. (#721)
-- Move `core` and `key` to `kv` package. (#720)
-- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
-- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
-- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
-- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
-- Move `Number` from `core` to `api/metric` package. (#706)
-- Move `SpanContext` from `core` to `trace` package. (#692)
-- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
-
-### Fixed
-
-- Update tooling to run generators in all submodules. (#705)
-- gRPC interceptor regexp to match methods without a service name. (#683)
-- Use a `const` for padding 64-bit B3 trace IDs. (#701)
-- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
-- Left-pad 64-bit B3 trace IDs with zero. (#698)
-- Propagate at least the first W3C tracestate header. (#694)
-- Remove internal `StateLocker` implementation. (#688)
-- Increase instance size CI system uses. (#690)
-- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
-- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
-- Reimplement histogram using mutex instead of `StateLocker`. (#669)
-- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
-- Update documentation to not include any references to `WithKeys`. (#672)
-- Correct misspelling. (#668)
-- Fix clobbering of the span context if extraction fails. (#656)
-- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
-
-## [0.4.3] - 2020-04-24
-
-### Added
-
-- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
-- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
-- New `api/label` package, providing common label set implementation. (#651)
-- Support for JSON marshaling of `Resources`. (#654)
-- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
-- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
-- `WithSpanFormatter` option to the othttp plugin. (#617)
-- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
-- The prometheus exporter now supports exporting histograms. (#601)
-- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
-- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
-- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
-- An iterable structure (`AttributeIterator`) for `Resource` attributes.
-
-### Changed
-
-- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
-- Pass `Resources` through the metrics export pipeline. (#659)
-
-### Removed
-
-- `WithKeys` option from the metric API. (#639)
-
-### Fixed
-
-- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
-- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
-- Use type names for return values in jaeger exporter. (#648)
-- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
-- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
-- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
-- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
-- Add error wrapping to the prometheus exporter. (#631)
-- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
-- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
-- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
-- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
-- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
-- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
-- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
-
-## [0.4.2] - 2020-03-31
-
-### Fixed
-
-- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
-- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
-
-## [0.4.1] - 2020-03-31
-
-### Fixed
-
-- Update `tag.sh` to create signed tags. (#604)
-
-## [0.4.0] - 2020-03-30
-
-### Added
-
-- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
-- Script to verify examples after a new release. (#579)
-
-### Removed
-
-- The dogstatsd exporter due to lack of support.
- This additionally removes support for statsd. (#591)
-- `LabelSet` from the metric API.
- This is replaced by a `[]core.KeyValue` slice. (#595)
-- `Labels` from the metric API's `Meter` interface. (#595)
-
-### Changed
-
-- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
-- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
-- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
-
-### Fixed
-
-- Corrected missing return in mock span. (#582)
-- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
-- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
-- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
-- Add a `RecordBatch` benchmark. (#594)
-- Moved span transforms of the OTLP exporter to the internal package. (#593)
-- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
-- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
-- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
-- Update project documentation godoc.org links to pkg.go.dev. (#602)
-
-## [0.3.0] - 2020-03-21
-
-This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
-There is still a possibility of breaking changes.
-
-### Added
-
-- Add `Observer` metric instrument. (#474)
-- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
-- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
-- The zipkin trace exporter. (#495)
-- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
-- Add `StatusMessage` field to the trace `Span`. (#524)
-- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
-- The `Resource` type was added to the SDK. (#528)
-- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
-- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
- Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
-- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
-- Scripts to better automate the release process. (#576)
-
-### Changed
-
-- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
-- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
-- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
-- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
-- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
-- Rename metric API `Options` to `Config`. (#541)
-- Rename metric `Counter` aggregator to be `Sum`. (#541)
-- Unify metric options into `Option` from instrument specific options. (#541)
-- The trace API's `TraceProvider` now support `Resource`s. (#545)
-- Correct error in zipkin module name. (#548)
-- The jaeger trace exporter now supports `Resource`s. (#551)
-- Metric SDK now supports `Resource`s.
- The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
-- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
-- The stdout trace exporter now supports `Resource`s. (#558)
-- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
-- Replace `Ordered` with an iterator in `export.Labels`. (#567)
-
-### Removed
-
-- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
-- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
-- `GetDescriptor` from the metric SDK. (#575)
-- The `Gauge` instrument from the metric API. (#537)
-
-### Fixed
-
-- Make histogram aggregator checkpoint consistent. (#438)
-- Update README with import instructions and how to build and test. (#505)
-- The default label encoding was updated to be unique. (#508)
-- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
-- Fix data race in `BatchedSpanProcessor`. (#518)
-- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
-- Use a variable-size array to represent ordered labels in maps. (#523)
-- Update the OTLP protobuf and update changed import path. (#532)
-- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
-- Eliminate goroutine leak in histogram stress test. (#547)
-- Update OTLP exporter with latest protobuf. (#550)
-- Add filters to the othttp plugin. (#556)
-- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
-- Encode labels once during checkpoint.
- The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
- This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
-- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
-
-## [0.2.3] - 2020-03-04
-
-### Added
-
-- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
-- Configurable push frequency for exporters setup pipeline. (#504)
-
-### Changed
-
-- Rename the `exporter` directory to `exporters`.
- The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
- This resulted in all subsequent releases not becoming the default latest.
- A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
- Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
- Consequentially, this action also renames *all* exporter packages. (#502)
-
-### Removed
-
-- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
-
-## [0.2.2] - 2020-02-27
-
-### Added
-
-- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
-- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
-- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
-- `Config` and configuring `Option` to the propagator API. (#467)
-- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
-- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
-- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
-- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
-- Histogram aggregator. (#433)
-- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
-- `AlwaysParentSample` sampler to the trace API. (#455)
-- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
-
-### Changed
-
-- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
-- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
-- Move correlation context propagation to correlation package. (#479)
-- Do not default to putting remote span context into links. (#480)
-- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
-- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
-- Renamed the `export` package to `metric` to match directory structure. (#432)
-- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
-- Rename the `api/propagators` package to `api/propagation`. (#444)
-- Move the propagators from the `propagators` package into the `trace` API package. (#444)
-- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
-- Moved all dependencies of tools package to a tools directory. (#466)
-
-### Removed
-
-- Binary propagators. (#467)
-- NOOP propagator. (#467)
-
-### Fixed
-
-- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
-- Fix a possible nil-dereference crash (#478)
-- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
-- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
-- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
-- Initialize `onError` based on `Config` in prometheus exporter. (#486)
-- Correct module name in prometheus exporter README. (#475)
-- Removed tracer name prefix from span names. (#430)
-- Fix `aggregator_test.go` import package comment. (#431)
-- Improved detail in stdout exporter. (#436)
-- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
-- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
-- Reword function documentation in gRPC plugin. (#446)
-- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
-- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
-- Upgraded to Go 1.13 in CI. (#465)
-- Correct opentelemetry.io URL in trace SDK documentation. (#464)
-- Refactored reference counting logic in SDK determination of stale records. (#468)
-- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
-
-## [0.2.1.1] - 2020-01-13
-
-### Fixed
-
-- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428)
-
-## [0.2.1] - 2020-01-08
-
-### Added
-
-- Global meter forwarding implementation.
- This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
-- Global trace forwarding implementation.
- This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
-- Standardize export pipeline creation in all exporters. (#395)
-- A testing, organization, and comments for 64-bit field alignment. (#418)
-- Script to tag all modules in the project. (#414)
-
-### Changed
-
-- Renamed `propagation` package to `propagators`. (#362)
-- Renamed `B3Propagator` propagator to `B3`. (#362)
-- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
-- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
-- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
-- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
-- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
-- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
-- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
-- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
-- `Number` now has a pointer receiver for its methods. (#375)
-- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
-- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
-- Renamed `Message` in Event to `Name` in the trace API. (#389)
-- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
-- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
-- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
-- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
-- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
-- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
-- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
-- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
-- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
-- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
-- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
-- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
-
-### Fixed
-
-- Aggregator import path corrected. (#421)
-- Correct links in README. (#368)
-- The README was updated to match latest code changes in its examples. (#374)
-- Don't capitalize error statements. (#375)
-- Fix ignored errors. (#375)
-- Fix ambiguous variable naming. (#375)
-- Removed unnecessary type casting. (#375)
-- Use named parameters. (#375)
-- Updated release schedule. (#378)
-- Correct http-stackdriver example module name. (#394)
-- Removed the `http.request` span in `httptrace` package. (#397)
-- Add comments in the metrics SDK (#399)
-- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
-- Add documentation of compatible exporters in the README. (#405)
-- Typo fix. (#408)
-- Simplify span check logic in SDK tracer implementation. (#419)
-
-## [0.2.0] - 2019-12-03
-
-### Added
-
-- Unary gRPC tracing example. (#351)
-- Prometheus exporter. (#334)
-- Dogstatsd metrics exporter. (#326)
-
-### Changed
-
-- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
-- Rename `GetMeter` to `Meter`. (#357)
-- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
-- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
-- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
-- Move `/global` package to `/api/global`. (#356)
-- Rename `GetTracer` to `Tracer`. (#347)
-
-### Removed
-
-- `SetAttribute` from the `Span` interface in the trace API. (#361)
-- `AddLink` from the `Span` interface in the trace API. (#349)
-- `Link` from the `Span` interface in the trace API. (#349)
-
-### Fixed
-
-- Exclude example directories from coverage report. (#365)
-- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
-- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
-- Run the race checker for all test. (#354)
-- Redundant commands in the Makefile are removed. (#354)
-- Split the `generate` and `lint` targets of the Makefile. (#354)
-- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
-- Add example Prometheus binary to gitignore. (#358)
-- Support negative numbers with the `MaxSumCount`. (#335)
-- Resolve race conditions in `push_test.go` identified in #339. (#340)
-- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
-- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
- Previously it was testing `AlwaysSample` twice. (#325)
-- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
-- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
-- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
- This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
- This was corrected. (#333)
-
-## [0.1.2] - 2019-11-18
-
-### Fixed
-
-- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
-- Removed unnecessary unslicing of parameters that are already a slice. (#324)
-
-## [0.1.1] - 2019-11-18
-
-This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
-
-### Added
-
-- Metrics stdout export pipeline. (#265)
-- Array aggregation for raw measure metrics. (#282)
-- The core.Value now have a `MarshalJSON` method. (#281)
-
-### Removed
-
-- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
-- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
-
-### Changed
-
-- Allocation in LabelSet construction to reduce GC overhead. (#318)
-- `trace.WithAttributes` to append values instead of replacing (#315)
-- Use a formula for tolerance in sampling tests. (#298)
-- Move export types into trace and metric-specific sub-directories. (#289)
-- `SpanKind` back to being based on an `int` type. (#288)
-
-### Fixed
-
-- URL to OpenTelemetry website in README. (#323)
-- Name of othttp default tracer. (#321)
-- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
-- CI modules cache to correctly restore/save from/to the cache. (#316)
-- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
-- README now reflects the new code structure introduced with these changes. (#291)
-- Make the basic example work. (#279)
-
-## [0.1.0] - 2019-11-04
-
-This is the first release of open-telemetry go library.
-It contains api and sdk for trace and meter.
-
-### Added
-
-- Initial OpenTelemetry trace and metric API prototypes.
-- Initial OpenTelemetry trace, metric, and export SDK packages.
-- A wireframe bridge to support compatibility with OpenTracing.
-- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
-- Exporters for Jaeger, Stackdriver, and stdout.
-- Propagators for binary, B3, and trace-context protocols.
-- Project information and guidelines in the form of a README and CONTRIBUTING.
-- Tools to build the project and a Makefile to automate the process.
-- Apache-2.0 license.
-- CircleCI build CI manifest files.
-- CODEOWNERS file to track owners of this project.
-
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
-[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
-[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
-[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
-[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
-[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
-[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
-[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
-[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
-[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
-[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
-[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
-[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
-[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
-[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
-[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
-[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
-[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
-[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
-[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
-[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
-[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
-[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
-[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
-[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
-[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
-[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
-[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
-[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
-[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
-[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
-[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
-[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
-[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
-[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
-[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
-[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
-[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
-[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
-[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
-[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
-[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
-[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
-[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
-[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
-[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
-[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
-[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
-[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
-[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
-[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
-[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
-[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
-[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
-[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
-[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
-[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
-[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
-[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
-[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
-[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
-[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
-[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
-[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
-[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
-[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
-[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
-[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
-[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
-[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
-[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
-[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
-[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
-
-[Go 1.20]: https://go.dev/doc/go1.20
-[Go 1.19]: https://go.dev/doc/go1.19
-[Go 1.18]: https://go.dev/doc/go1.18
-
-[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
-[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
-[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
deleted file mode 100644
index 6237400..0000000
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ /dev/null
@@ -1,17 +0,0 @@
-#####################################################
-#
-# List of approvers for this repository
-#
-#####################################################
-#
-# Learn about membership in OpenTelemetry community:
-# https://github.com/open-telemetry/community/blob/main/community-membership.md
-#
-#
-# Learn about CODEOWNERS file format:
-# https://help.github.com/en/articles/about-code-owners
-#
-
-* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
-
-CODEOWNERS @MrAlias @MadVikingGod @pellared
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
deleted file mode 100644
index 850606a..0000000
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ /dev/null
@@ -1,624 +0,0 @@
-# Contributing to opentelemetry-go
-
-The Go special interest group (SIG) meets regularly. See the
-OpenTelemetry
-[community](https://github.com/open-telemetry/community#golang-sdk)
-repo for information on this and other language SIGs.
-
-See the [public meeting
-notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit)
-for a summary description of past meetings. To request edit access,
-join the meeting or get in touch on
-[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
-
-## Development
-
-You can view and edit the source code by cloning this repository:
-
-```sh
-git clone https://github.com/open-telemetry/opentelemetry-go.git
-```
-
-Run `make test` to run the tests instead of `go test`.
-
-There are some generated files checked into the repo. To make sure
-that the generated files are up-to-date, run `make` (or `make
-precommit` - the `precommit` target is the default).
-
-The `precommit` target also fixes the formatting of the code and
-checks the status of the go module files.
-
-Additionally, there is a `codespell` target that checks for common
-typos in the code. It is not run by default, but you can run it
-manually with `make codespell`. It will set up a virtual environment
-in `venv` and install `codespell` there.
-
-If after running `make precommit` the output of `git status` contains
-`nothing to commit, working tree clean` then it means that everything
-is up-to-date and properly formatted.
-
-## Pull Requests
-
-### How to Send Pull Requests
-
-Everyone is welcome to contribute code to `opentelemetry-go` via
-GitHub pull requests (PRs).
-
-To create a new PR, fork the project in GitHub and clone the upstream
-repo:
-
-```sh
-go get -d go.opentelemetry.io/otel
-```
-
-(This may print some warning about "build constraints exclude all Go
-files", just ignore it.)
-
-This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
-can alternatively use `git` directly with:
-
-```sh
-git clone https://github.com/open-telemetry/opentelemetry-go
-```
-
-(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
-that name is a kind of a redirector to GitHub that `go get` can
-understand, but `git` does not.)
-
-This would put the project in the `opentelemetry-go` directory in
-current working directory.
-
-Enter the newly created directory and add your fork as a new remote:
-
-```sh
-git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
-```
-
-Check out a new branch, make modifications, run linters and tests, update
-`CHANGELOG.md`, and push the branch to your fork:
-
-```sh
-git checkout -b <YOUR_BRANCH_NAME>
-# edit files
-# update changelog
-make precommit
-git add -p
-git commit
-git push <YOUR_FORK> <YOUR_BRANCH_NAME>
-```
-
-Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
-request ID to the entry you added to `CHANGELOG.md`.
-
-Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
-Rewriting Git history makes it difficult to keep track of iterations during code review.
-All pull requests are squashed to a single commit upon merge to `main`.
-
-### How to Receive Comments
-
-* If the PR is not ready for review, please put `[WIP]` in the title,
- tag it as `work-in-progress`, or mark it as
- [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
-* Make sure CLA is signed and CI is clear.
-
-### How to Get PRs Merged
-
-A PR is considered **ready to merge** when:
-
-* It has received two qualified approvals[^1].
-
- This is not enforced through automation, but needs to be validated by the
- maintainer merging.
- * The qualified approvals need to be from [Approver]s/[Maintainer]s
- affiliated with different companies. Two qualified approvals from
- [Approver]s or [Maintainer]s affiliated with the same company counts as a
- single qualified approval.
- * PRs introducing changes that have already been discussed and consensus
- reached only need one qualified approval. The discussion and resolution
- needs to be linked to the PR.
- * Trivial changes[^2] only need one qualified approval.
-
-* All feedback has been addressed.
- * All PR comments and suggestions are resolved.
- * All GitHub Pull Request reviews with a status of "Request changes" have
- been addressed. Another review by the objecting reviewer with a different
- status can be submitted to clear the original review, or the review can be
- dismissed by a [Maintainer] when the issues from the original review have
- been addressed.
- * Any comments or reviews that cannot be resolved between the PR author and
- reviewers can be submitted to the community [Approver]s and [Maintainer]s
- during the weekly SIG meeting. If consensus is reached among the
- [Approver]s and [Maintainer]s during the SIG meeting the objections to the
- PR may be dismissed or resolved or the PR closed by a [Maintainer].
- * Any substantive changes to the PR require existing Approval reviews be
- cleared unless the approver explicitly states that their approval persists
- across changes. This includes changes resulting from other feedback.
- [Approver]s and [Maintainer]s can help in clearing reviews and they should
- be consulted if there are any questions.
-
-* The PR branch is up to date with the base branch it is merging into.
- * To ensure this does not block the PR, it should be configured to allow
- maintainers to update it.
-
-* It has been open for review for at least one working day. This gives people
- reasonable time to review.
- * Trivial changes[^2] do not have to wait for one day and may be merged with
- a single [Maintainer]'s approval.
-
-* All required GitHub workflows have succeeded.
-* Urgent fix can take exception as long as it has been actively communicated
- among [Maintainer]s.
-
-Any [Maintainer] can merge the PR once the above criteria have been met.
-
-[^1]: A qualified approval is a GitHub Pull Request review with "Approve"
- status from an OpenTelemetry Go [Approver] or [Maintainer].
-[^2]: Trivial changes include: typo corrections, cosmetic non-substantive
- changes, documentation corrections or updates, dependency updates, etc.
-
-## Design Choices
-
-As with other OpenTelemetry clients, opentelemetry-go follows the
-[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
-
-It's especially valuable to read through the [library
-guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
-
-### Focus on Capabilities, Not Structure Compliance
-
-OpenTelemetry is an evolving specification, one where the desires and
-use cases are clear, but the method to satisfy those uses cases are
-not.
-
-As such, Contributions should provide functionality and behavior that
-conforms to the specification, but the interface and structure is
-flexible.
-
-It is preferable to have contributions follow the idioms of the
-language rather than conform to specific API names or argument
-patterns in the spec.
-
-For a deeper discussion, see
-[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
-
-## Documentation
-
-Each (non-internal, non-test) package must be documented using
-[Go Doc Comments](https://go.dev/doc/comment),
-preferably in a `doc.go` file.
-
-Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
-instead of putting code snippets in Go doc comments.
-In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
-
-You can install and run a "local Go Doc site" in the following way:
-
- ```sh
- go install golang.org/x/pkgsite/cmd/pkgsite@latest
- pkgsite
- ```
-
-[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
-is an example of a very well-documented package.
-
-## Style Guide
-
-One of the primary goals of this project is that it is actually used by
-developers. With this goal in mind the project strives to build
-user-friendly and idiomatic Go code adhering to the Go community's best
-practices.
-
-For a non-comprehensive but foundational overview of these best practices
-the [Effective Go](https://golang.org/doc/effective_go.html) documentation
-is an excellent starting place.
-
-As a convenience for developers building this project the `make precommit`
-will format, lint, validate, and in some cases fix the changes you plan to
-submit. This check will need to pass for your changes to be able to be
-merged.
-
-In addition to idiomatic Go, the project has adopted certain standards for
-implementations of common patterns. These standards should be followed as a
-default, and if they are not followed documentation needs to be included as
-to the reasons why.
-
-### Configuration
-
-When creating an instantiation function for a complex `type T struct`, it is
-useful to allow variable number of options to be applied. However, the strong
-type system of Go restricts the function design options. There are a few ways
-to solve this problem, but we have landed on the following design.
-
-#### `config`
-
-Configuration should be held in a `struct` named `config`, or prefixed with
-specific type name this Configuration applies to if there are multiple
-`config` in the package. This type must contain configuration options.
-
-```go
-// config contains configuration options for a thing.
-type config struct {
- // options ...
-}
-```
-
-In general the `config` type will not need to be used externally to the
-package and should be unexported. If, however, it is expected that the user
-will likely want to build custom options for the configuration, the `config`
-should be exported. Please, include in the documentation for the `config`
-how the user can extend the configuration.
-
-It is important that internal `config` are not shared across package boundaries.
-Meaning a `config` from one package should not be directly used by another. The
-one exception is the API packages. The configs from the base API, eg.
-`go.opentelemetry.io/otel/trace.TracerConfig` and
-`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
-by the SDK therefore it is expected that these are exported.
-
-When a config is exported we want to maintain forward and backward
-compatibility, to achieve this no fields should be exported but should
-instead be accessed by methods.
-
-Optionally, it is common to include a `newConfig` function (with the same
-naming scheme). This function wraps any defaults setting and looping over
-all options to create a configured `config`.
-
-```go
-// newConfig returns an appropriately configured config.
-func newConfig(options ...Option) config {
- // Set default values for config.
- config := config{/* […] */}
- for _, option := range options {
- config = option.apply(config)
- }
- // Perform any validation here.
- return config
-}
-```
-
-If validation of the `config` options is also performed this can return an
-error as well that is expected to be handled by the instantiation function
-or propagated to the user.
-
-Given the design goal of not having the user need to work with the `config`,
-the `newConfig` function should also be unexported.
-
-#### `Option`
-
-To set the value of the options a `config` contains, a corresponding
-`Option` interface type should be used.
-
-```go
-type Option interface {
- apply(config) config
-}
-```
-
-Having `apply` unexported makes sure that it will not be used externally.
-Moreover, the interface becomes sealed so the user cannot easily implement
-the interface on its own.
-
-The `apply` method should return a modified version of the passed config.
-This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
-
-The name of the interface should be prefixed in the same way the
-corresponding `config` is (if at all).
-
-#### Options
-
-All user configurable options for a `config` must have a related unexported
-implementation of the `Option` interface and an exported configuration
-function that wraps this implementation.
-
-The wrapping function name should be prefixed with `With*` (or in the
-special case of a boolean options `Without*`) and should have the following
-function signature.
-
-```go
-func With*(…) Option { … }
-```
-
-##### `bool` Options
-
-```go
-type defaultFalseOption bool
-
-func (o defaultFalseOption) apply(c config) config {
- c.Bool = bool(o)
- return c
-}
-
-// WithOption sets a T to have an option included.
-func WithOption() Option {
- return defaultFalseOption(true)
-}
-```
-
-```go
-type defaultTrueOption bool
-
-func (o defaultTrueOption) apply(c config) config {
- c.Bool = bool(o)
- return c
-}
-
-// WithoutOption sets a T to have Bool option excluded.
-func WithoutOption() Option {
- return defaultTrueOption(false)
-}
-```
-
-##### Declared Type Options
-
-```go
-type myTypeOption struct {
- MyType MyType
-}
-
-func (o myTypeOption) apply(c config) config {
- c.MyType = o.MyType
- return c
-}
-
-// WithMyType sets T to have include MyType.
-func WithMyType(t MyType) Option {
- return myTypeOption{t}
-}
-```
-
-##### Functional Options
-
-```go
-type optionFunc func(config) config
-
-func (fn optionFunc) apply(c config) config {
- return fn(c)
-}
-
-// WithMyType sets t as MyType.
-func WithMyType(t MyType) Option {
- return optionFunc(func(c config) config {
- c.MyType = t
- return c
- })
-}
-```
-
-#### Instantiation
-
-Using this configuration pattern to configure instantiation with a `NewT`
-function.
-
-```go
-func NewT(options ...Option) T {…}
-```
-
-Any required parameters can be declared before the variadic `options`.
-
-#### Dealing with Overlap
-
-Sometimes there are multiple complex `struct` that share common
-configuration and also have distinct configuration. To avoid repeated
-portions of `config`s, a common `config` can be used with the union of
-options being handled with the `Option` interface.
-
-For example.
-
-```go
-// config holds options for all animals.
-type config struct {
- Weight float64
- Color string
- MaxAltitude float64
-}
-
-// DogOption apply Dog specific options.
-type DogOption interface {
- applyDog(config) config
-}
-
-// BirdOption apply Bird specific options.
-type BirdOption interface {
- applyBird(config) config
-}
-
-// Option apply options for all animals.
-type Option interface {
- BirdOption
- DogOption
-}
-
-type weightOption float64
-
-func (o weightOption) applyDog(c config) config {
- c.Weight = float64(o)
- return c
-}
-
-func (o weightOption) applyBird(c config) config {
- c.Weight = float64(o)
- return c
-}
-
-func WithWeight(w float64) Option { return weightOption(w) }
-
-type furColorOption string
-
-func (o furColorOption) applyDog(c config) config {
- c.Color = string(o)
- return c
-}
-
-func WithFurColor(c string) DogOption { return furColorOption(c) }
-
-type maxAltitudeOption float64
-
-func (o maxAltitudeOption) applyBird(c config) config {
- c.MaxAltitude = float64(o)
- return c
-}
-
-func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
-
-func NewDog(name string, o ...DogOption) Dog {…}
-func NewBird(name string, o ...BirdOption) Bird {…}
-```
-
-### Interfaces
-
-To allow other developers to better comprehend the code, it is important
-to ensure it is sufficiently documented. One simple measure that contributes
-to this aim is self-documenting by naming method parameters. Therefore,
-where appropriate, methods of every exported interface type should have
-their parameters appropriately named.
-
-#### Interface Stability
-
-All exported stable interfaces that include the following warning in their
-documentation are allowed to be extended with additional methods.
-
-> Warning: methods may be added to this interface in minor releases.
-
-These interfaces are defined by the OpenTelemetry specification and will be
-updated as the specification evolves.
-
-Otherwise, stable interfaces MUST NOT be modified.
-
-#### How to Change Specification Interfaces
-
-When an API change must be made, we will update the SDK with the new method one
-release before the API change. This will allow the SDK one version before the
-API change to work seamlessly with the new API.
-
-If an incompatible version of the SDK is used with the new API the application
-will fail to compile.
-
-#### How Not to Change Specification Interfaces
-
-We have explored using a v2 of the API to change interfaces and found that there
-was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
-Problems happened with libraries that upgraded to v2 when an application did not,
-and would not produce any telemetry.
-
-More detail of the approaches considered and their limitations can be found in
-the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
-issue.
-
-#### How to Change Other Interfaces
-
-If new functionality is needed for an interface that cannot be changed it MUST
-be added by including an additional interface. That added interface can be a
-simple interface for the specific functionality that you want to add or it can
-be a super-set of the original interface. For example, if you wanted to a
-`Close` method to the `Exporter` interface:
-
-```go
-type Exporter interface {
- Export()
-}
-```
-
-A new interface, `Closer`, can be added:
-
-```go
-type Closer interface {
- Close()
-}
-```
-
-Code that is passed the `Exporter` interface can now check to see if the passed
-value also satisfies the new interface. E.g.
-
-```go
-func caller(e Exporter) {
- /* ... */
- if c, ok := e.(Closer); ok {
- c.Close()
- }
- /* ... */
-}
-```
-
-Alternatively, a new type that is the super-set of an `Exporter` can be created.
-
-```go
-type ClosingExporter struct {
- Exporter
- Close()
-}
-```
-
-This new type can be used similar to the simple interface above in that a
-passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
-and the `Close` method called.
-
-This super-set approach can be useful if there is explicit behavior that needs
-to be coupled with the original type and passed as a unified type to a new
-function, but, because of this coupling, it also limits the applicability of
-the added functionality. If there exist other interfaces where this
-functionality should be added, each one will need their own super-set
-interfaces and will duplicate the pattern. For this reason, the simple targeted
-interface that defines the specific functionality should be preferred.
-
-### Testing
-
-The tests should never leak goroutines.
-
-Use the term `ConcurrentSafe` in the test name when it aims to verify the
-absence of race conditions.
-
-### Internal packages
-
-The use of internal packages should be scoped to a single module. A sub-module
-should never import from a parent internal package. This creates a coupling
-between the two modules where a user can upgrade the parent without the child
-and if the internal package API has changed it will fail to upgrade[^3].
-
-There are two known exceptions to this rule:
-
-- `go.opentelemetry.io/otel/internal/global`
- - This package manages global state for all of opentelemetry-go. It needs to
- be a single package in order to ensure the uniqueness of the global state.
-- `go.opentelemetry.io/otel/internal/baggage`
- - This package provides values in a `context.Context` that need to be
- recognized by `go.opentelemetry.io/otel/baggage` and
- `go.opentelemetry.io/otel/bridge/opentracing` but remain private.
-
-If you have duplicate code in multiple modules, make that code into a Go
-template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
-to render the templates in the desired locations. See [#4404] for an example of
-this.
-
-[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
-
-## Approvers and Maintainers
-
-### Approvers
-
-- [Evan Torrie](https://github.com/evantorrie), Verizon Media
-- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
-- [David Ashpole](https://github.com/dashpole), Google
-- [Chester Cheung](https://github.com/hanyuancheung), Tencent
-- [Damien Mathieu](https://github.com/dmathieu), Elastic
-- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
-
-### Maintainers
-
-- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
-- [Robert Pająk](https://github.com/pellared), Splunk
-- [Tyler Yahn](https://github.com/MrAlias), Splunk
-
-### Emeritus
-
-- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
-- [Josh MacDonald](https://github.com/jmacd), LightStep
-
-### Become an Approver or a Maintainer
-
-See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
-
-[Approver]: #approvers
-[Maintainer]: #maintainers
-[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
-[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/go.opentelemetry.io/otel/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
deleted file mode 100644
index 35fc189..0000000
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-TOOLS_MOD_DIR := ./internal/tools
-
-ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
-ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
-OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
-
-GO = go
-TIMEOUT = 60
-
-.DEFAULT_GOAL := precommit
-
-.PHONY: precommit ci
-precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
-ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
-
-# Tools
-
-TOOLS = $(CURDIR)/.tools
-
-$(TOOLS):
- @mkdir -p $@
-$(TOOLS)/%: | $(TOOLS)
- cd $(TOOLS_MOD_DIR) && \
- $(GO) build -o $@ $(PACKAGE)
-
-MULTIMOD = $(TOOLS)/multimod
-$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
-
-SEMCONVGEN = $(TOOLS)/semconvgen
-$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
-
-CROSSLINK = $(TOOLS)/crosslink
-$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
-
-SEMCONVKIT = $(TOOLS)/semconvkit
-$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
-
-DBOTCONF = $(TOOLS)/dbotconf
-$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
-
-GOLANGCI_LINT = $(TOOLS)/golangci-lint
-$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
-
-MISSPELL = $(TOOLS)/misspell
-$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
-
-GOCOVMERGE = $(TOOLS)/gocovmerge
-$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
-
-STRINGER = $(TOOLS)/stringer
-$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
-
-PORTO = $(TOOLS)/porto
-$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
-
-GOJQ = $(TOOLS)/gojq
-$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
-
-GOTMPL = $(TOOLS)/gotmpl
-$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
-
-GORELEASE = $(TOOLS)/gorelease
-$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
-
-GOVULNCHECK = $(TOOLS)/govulncheck
-$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
-
-.PHONY: tools
-tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
-
-# Virtualized python tools via docker
-
-# The directory where the virtual environment is created.
-VENVDIR := venv
-
-# The directory where the python tools are installed.
-PYTOOLS := $(VENVDIR)/bin
-
-# The pip executable in the virtual environment.
-PIP := $(PYTOOLS)/pip
-
-# The directory in the docker image where the current directory is mounted.
-WORKDIR := /workdir
-
-# The python image to use for the virtual environment.
-PYTHONIMAGE := python:3.11.3-slim-bullseye
-
-# Run the python image with the current directory mounted.
-DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
-
-# Create a virtual environment for Python tools.
-$(PYTOOLS):
-# The `--upgrade` flag is needed to ensure that the virtual environment is
-# created with the latest pip version.
- @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
-
-# Install python packages into the virtual environment.
-$(PYTOOLS)/%: | $(PYTOOLS)
- @$(DOCKERPY) $(PIP) install -r requirements.txt
-
-CODESPELL = $(PYTOOLS)/codespell
-$(CODESPELL): PACKAGE=codespell
-
-# Generate
-
-.PHONY: generate
-generate: go-generate vanity-import-fix
-
-.PHONY: go-generate
-go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
-go-generate/%: DIR=$*
-go-generate/%: | $(STRINGER) $(GOTMPL)
- @echo "$(GO) generate $(DIR)/..." \
- && cd $(DIR) \
- && PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
-
-.PHONY: vanity-import-fix
-vanity-import-fix: | $(PORTO)
- @$(PORTO) --include-internal -w .
-
-# Generate go.work file for local development.
-.PHONY: go-work
-go-work: | $(CROSSLINK)
- $(CROSSLINK) work --root=$(shell pwd)
-
-# Build
-
-.PHONY: build
-
-build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
-build/%: DIR=$*
-build/%:
- @echo "$(GO) build $(DIR)/..." \
- && cd $(DIR) \
- && $(GO) build ./...
-
-build-tests/%: DIR=$*
-build-tests/%:
- @echo "$(GO) build tests $(DIR)/..." \
- && cd $(DIR) \
- && $(GO) list ./... \
- | grep -v third_party \
- | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
-
-# Tests
-
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race
-.PHONY: $(TEST_TARGETS) test
-test-default test-race: ARGS=-race
-test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
-test-short: ARGS=-short
-test-verbose: ARGS=-v -race
-$(TEST_TARGETS): test
-test: $(OTEL_GO_MOD_DIRS:%=test/%)
-test/%: DIR=$*
-test/%:
- @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
- && cd $(DIR) \
- && $(GO) list ./... \
- | grep -v third_party \
- | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
-
-COVERAGE_MODE = atomic
-COVERAGE_PROFILE = coverage.out
-.PHONY: test-coverage
-test-coverage: | $(GOCOVMERGE)
- @set -e; \
- printf "" > coverage.txt; \
- for dir in $(ALL_COVERAGE_MOD_DIRS); do \
- echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
- (cd "$${dir}" && \
- $(GO) list ./... \
- | grep -v third_party \
- | grep -v 'semconv/v.*' \
- | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
- $(GO) tool cover -html=coverage.out -o coverage.html); \
- done; \
- $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
-
-# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
-BENCHMARK_TARGETS := sdk/trace
-.PHONY: benchmark
-benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
-BENCHMARK_FILTER = .
-# You can override the filter for a particular directory by adding a rule here.
-benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
-benchmark/%:
- @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
- && cd $* \
- $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
-
-.PHONY: golangci-lint golangci-lint-fix
-golangci-lint-fix: ARGS=--fix
-golangci-lint-fix: golangci-lint
-golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
-golangci-lint/%: DIR=$*
-golangci-lint/%: | $(GOLANGCI_LINT)
- @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
- && cd $(DIR) \
- && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
-
-.PHONY: crosslink
-crosslink: | $(CROSSLINK)
- @echo "Updating intra-repository dependencies in all go modules" \
- && $(CROSSLINK) --root=$(shell pwd) --prune
-
-.PHONY: go-mod-tidy
-go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
-go-mod-tidy/%: DIR=$*
-go-mod-tidy/%: | crosslink
- @echo "$(GO) mod tidy in $(DIR)" \
- && cd $(DIR) \
- && $(GO) mod tidy -compat=1.20
-
-.PHONY: lint-modules
-lint-modules: go-mod-tidy
-
-.PHONY: lint
-lint: misspell lint-modules golangci-lint govulncheck
-
-.PHONY: vanity-import-check
-vanity-import-check: | $(PORTO)
- @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
-
-.PHONY: misspell
-misspell: | $(MISSPELL)
- @$(MISSPELL) -w $(ALL_DOCS)
-
-.PHONY: govulncheck
-govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
-govulncheck/%: DIR=$*
-govulncheck/%: | $(GOVULNCHECK)
- @echo "govulncheck ./... in $(DIR)" \
- && cd $(DIR) \
- && $(GOVULNCHECK) ./...
-
-.PHONY: codespell
-codespell: | $(CODESPELL)
- @$(DOCKERPY) $(CODESPELL)
-
-.PHONY: license-check
-license-check:
- @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
- awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
- done); \
- if [ -n "$${licRes}" ]; then \
- echo "license header checking failed:"; echo "$${licRes}"; \
- exit 1; \
- fi
-
-DEPENDABOT_CONFIG = .github/dependabot.yml
-.PHONY: dependabot-check
-dependabot-check: | $(DBOTCONF)
- @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
-
-.PHONY: dependabot-generate
-dependabot-generate: | $(DBOTCONF)
- @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
-
-.PHONY: check-clean-work-tree
-check-clean-work-tree:
- @if ! git diff --quiet; then \
- echo; \
- echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
- echo; \
- git status; \
- exit 1; \
- fi
-
-SEMCONVPKG ?= "semconv/"
-.PHONY: semconv-generate
-semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
- [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
- [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
- $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
-
-.PHONY: gorelease
-gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
-gorelease/%: DIR=$*
-gorelease/%:| $(GORELEASE)
- @echo "gorelease in $(DIR):" \
- && cd $(DIR) \
- && $(GORELEASE) \
- || echo ""
-
-.PHONY: prerelease
-prerelease: | $(MULTIMOD)
- @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
-
-COMMIT ?= "HEAD"
-.PHONY: add-tags
-add-tags: | $(MULTIMOD)
- @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
- $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
-
-.PHONY: lint-markdown
-lint-markdown:
- docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
deleted file mode 100644
index 2c5b0cc..0000000
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-# OpenTelemetry-Go
-
-[](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
-[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
-[](https://pkg.go.dev/go.opentelemetry.io/otel)
-[](https://goreportcard.com/report/go.opentelemetry.io/otel)
-[](https://cloud-native.slack.com/archives/C01NPAXACKT)
-
-OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
-It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
-
-## Project Status
-
-| Signal | Status |
-|---------|------------|
-| Traces | Stable |
-| Metrics | Stable |
-| Logs | Design [1] |
-
-- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
- No Logs Pull Requests are currently being accepted.
-
-Progress and status specific to this repository is tracked in our
-[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
-and
-[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
-
-Project versioning information and stability guarantees can be found in the
-[versioning documentation](VERSIONING.md).
-
-### Compatibility
-
-OpenTelemetry-Go ensures compatibility with the current supported versions of
-the [Go language](https://golang.org/doc/devel/release#policy):
-
-> Each major Go release is supported until there are two newer major releases.
-> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
-
-For versions of Go that are no longer supported upstream, opentelemetry-go will
-stop ensuring compatibility with these versions in the following manner:
-
-- A minor release of opentelemetry-go will be made to add support for the new
- supported release of Go.
-- The following minor release of opentelemetry-go will remove compatibility
- testing for the oldest (now archived upstream) version of Go. This, and
- future, releases of opentelemetry-go may include features only supported by
- the currently supported versions of Go.
-
-Currently, this project supports the following environments.
-
-| OS | Go Version | Architecture |
-|---------|------------|--------------|
-| Ubuntu | 1.21 | amd64 |
-| Ubuntu | 1.20 | amd64 |
-| Ubuntu | 1.21 | 386 |
-| Ubuntu | 1.20 | 386 |
-| MacOS | 1.21 | amd64 |
-| MacOS | 1.20 | amd64 |
-| Windows | 1.21 | amd64 |
-| Windows | 1.20 | amd64 |
-| Windows | 1.21 | 386 |
-| Windows | 1.20 | 386 |
-
-While this project should work for other systems, no compatibility guarantees
-are made for those systems currently.
-
-## Getting Started
-
-You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
-
-OpenTelemetry's goal is to provide a single set of APIs to capture distributed
-traces and metrics from your application and send them to an observability
-platform. This project allows you to do just that for applications written in
-Go. There are two steps to this process: instrument your application, and
-configure an exporter.
-
-### Instrumentation
-
-To start capturing distributed traces and metric events from your application
-it first needs to be instrumented. The easiest way to do this is by using an
-instrumentation library for your code. Be sure to check out [the officially
-supported instrumentation
-libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
-
-If you need to extend the telemetry an instrumentation library provides or want
-to build your own instrumentation for your application directly you will need
-to use the
-[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
-package. The included [examples](./example/) are a good way to see some
-practical uses of this process.
-
-### Export
-
-Now that your application is instrumented to collect telemetry, it needs an
-export pipeline to send that telemetry to an observability platform.
-
-All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
-
-| Exporter | Metrics | Traces |
-|---------------------------------------|:-------:|:------:|
-| [OTLP](./exporters/otlp/) | ✓ | ✓ |
-| [Prometheus](./exporters/prometheus/) | ✓ | |
-| [stdout](./exporters/stdout/) | ✓ | ✓ |
-| [Zipkin](./exporters/zipkin/) | | ✓ |
-
-## Contributing
-
-See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
deleted file mode 100644
index 82ce3ee..0000000
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ /dev/null
@@ -1,139 +0,0 @@
-# Release Process
-
-## Semantic Convention Generation
-
-New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
-The `semconv-generate` make target is used for this.
-
-1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
-2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
-3. Run the `make semconv-generate ...` target from this repository.
-
-For example,
-
-```sh
-export TAG="v1.21.0" # Change to the release version you are generating.
-export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
-docker pull otel/semconvgen:latest
-make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
-```
-
-This should create a new sub-package of [`semconv`](./semconv).
-Ensure things look correct before submitting a pull request to include the addition.
-
-## Breaking changes validation
-
-You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
-
-You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
-
-## Pre-Release
-
-First, decide which module sets will be released and update their versions
-in `versions.yaml`. Commit this change to a new branch.
-
-Update go.mod for submodules to depend on the new release which will happen in the next step.
-
-1. Run the `prerelease` make target. It creates a branch
- `prerelease_<module set>_<new tag>` that will contain all release changes.
-
- ```
- make prerelease MODSET=<module set>
- ```
-
-2. Verify the changes.
-
- ```
- git diff ...prerelease_<module set>_<new tag>
- ```
-
- This should have changed the version for all modules to be `<new tag>`.
- If these changes look correct, merge them into your pre-release branch:
-
- ```go
- git merge prerelease_<module set>_<new tag>
- ```
-
-3. Update the [Changelog](./CHANGELOG.md).
- - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
- To verify this, you can look directly at the commits since the `<last tag>`.
-
- ```
- git --no-pager log --pretty=oneline "<last tag>..HEAD"
- ```
-
- - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
- - Update all the appropriate links at the bottom.
-
-4. Push the changes to upstream and create a Pull Request on GitHub.
- Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
-
-## Tag
-
-Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
-
-***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
-Failure to do so will leave things in a broken state. As long as you do not
-change `versions.yaml` between pre-release and this step, things should be fine.
-
-***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
-It is critical you make sure the version you push upstream is correct.
-[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
-
-1. For each module set that will be released, run the `add-tags` make target
- using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
-
- ```
- make add-tags MODSET=<module set> COMMIT=<commit hash>
- ```
-
- It should only be necessary to provide an explicit `COMMIT` value if the
- current `HEAD` of your working directory is not the correct commit.
-
-2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
- Make sure you push all sub-modules as well.
-
- ```
- git push upstream <new tag>
- git push upstream <submodules-path/new tag>
- ...
- ```
-
-## Release
-
-Finally create a Release for the new `<new tag>` on GitHub.
-The release body should include all the release notes from the Changelog for this release.
-
-## Verify Examples
-
-After releasing verify that examples build outside of the repository.
-
-```
-./verify_examples.sh
-```
-
-The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
-This ensures they build with the published release, not the local copy.
-
-## Post-Release
-
-### Contrib Repository
-
-Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
-
-### Website Documentation
-
-Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
-Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
-
-[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
-[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
-[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
-
-### Demo Repository
-
-Bump the dependencies in the following Go services:
-
-- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
deleted file mode 100644
index 412f1e3..0000000
--- a/vendor/go.opentelemetry.io/otel/VERSIONING.md
+++ /dev/null
@@ -1,224 +0,0 @@
-# Versioning
-
-This document describes the versioning policy for this repository. This policy
-is designed so the following goals can be achieved.
-
-**Users are provided a codebase of value that is stable and secure.**
-
-## Policy
-
-* Versioning of this project will be idiomatic of a Go project using [Go
- modules](https://github.com/golang/go/wiki/Modules).
- * [Semantic import
- versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
- will be used.
- * Versions will comply with [semver
- 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
- * New methods may be added to exported API interfaces. All exported
- interfaces that fall within this exception will include the following
- paragraph in their public documentation.
-
- > Warning: methods may be added to this interface in minor releases.
-
- * If a module is version `v2` or higher, the major version of the module
- must be included as a `/vN` at the end of the module paths used in
- `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
- go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
- (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
- paths used in `go get` commands (e.g., `go get
- go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a
- `@v2.0.1` in that example. One way to think about it is that the module
- name now includes the `/v2`, so include `/v2` whenever you are using the
- module name).
- * If a module is version `v0` or `v1`, do not include the major version in
- either the module path or the import path.
- * Modules will be used to encapsulate signals and components.
- * Experimental modules still under active development will be versioned at
- `v0` to imply the stability guarantee defined by
- [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
-
- > Major version zero (0.y.z) is for initial development. Anything MAY
- > change at any time. The public API SHOULD NOT be considered stable.
-
- * Mature modules for which we guarantee a stable public API will be versioned
- with a major version greater than `v0`.
- * The decision to make a module stable will be made on a case-by-case
- basis by the maintainers of this project.
- * Experimental modules will start their versioning at `v0.0.0` and will
- increment their minor version when backwards incompatible changes are
- released and increment their patch version when backwards compatible
- changes are released.
- * All stable modules that use the same major version number will use the
- same entire version number.
- * Stable modules may be released with an incremented minor or patch
- version even though that module has not been changed, but rather so
- that it will remain at the same version as other stable modules that
- did undergo change.
- * When an experimental module becomes stable a new stable module version
- will be released and will include this now stable module. The new
- stable module version will be an increment of the minor version number
- and will be applied to all existing stable modules as well as the newly
- stable module being released.
-* Versioning of the associated [contrib
- repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
- this project will be idiomatic of a Go project using [Go
- modules](https://github.com/golang/go/wiki/Modules).
- * [Semantic import
- versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
- will be used.
- * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
- * If a module is version `v2` or higher, the
- major version of the module must be included as a `/vN` at the end of the
- module paths used in `go.mod` files (e.g., `module
- go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
- go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
- package import path (e.g., `import
- "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
- the paths used in `go get` commands (e.g., `go get
- go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there
- is both a `/v2` and a `@v2.0.1` in that example. One way to think about
- it is that the module name now includes the `/v2`, so include `/v2`
- whenever you are using the module name).
- * If a module is version `v0` or `v1`, do not include the major version
- in either the module path or the import path.
- * In addition to public APIs, telemetry produced by stable instrumentation
- will remain stable and backwards compatible. This is to avoid breaking
- alerts and dashboard.
- * Modules will be used to encapsulate instrumentation, detectors, exporters,
- propagators, and any other independent sets of related components.
- * Experimental modules still under active development will be versioned at
- `v0` to imply the stability guarantee defined by
- [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
-
- > Major version zero (0.y.z) is for initial development. Anything MAY
- > change at any time. The public API SHOULD NOT be considered stable.
-
- * Mature modules for which we guarantee a stable public API and telemetry will
- be versioned with a major version greater than `v0`.
- * Experimental modules will start their versioning at `v0.0.0` and will
- increment their minor version when backwards incompatible changes are
- released and increment their patch version when backwards compatible
- changes are released.
- * Stable contrib modules cannot depend on experimental modules from this
- project.
- * All stable contrib modules of the same major version with this project
- will use the same entire version as this project.
- * Stable modules may be released with an incremented minor or patch
- version even though that module's code has not been changed. Instead
- the only change that will have been included is to have updated that
- modules dependency on this project's stable APIs.
- * When an experimental module in contrib becomes stable a new stable
- module version will be released and will include this now stable
- module. The new stable module version will be an increment of the minor
- version number and will be applied to all existing stable contrib
- modules, this project's modules, and the newly stable module being
- released.
- * Contrib modules will be kept up to date with this project's releases.
- * Due to the dependency contrib modules will implicitly have on this
- project's modules the release of stable contrib modules to match the
- released version number will be staggered after this project's release.
- There is no explicit time guarantee for how long after this projects
- release the contrib release will be. Effort should be made to keep them
- as close in time as possible.
- * No additional stable release in this project can be made until the
- contrib repository has a matching stable release.
- * No release can be made in the contrib repository after this project's
- stable release except for a stable release of the contrib repository.
-* GitHub releases will be made for all releases.
-* Go modules will be made available at Go package mirrors.
-
-## Example Versioning Lifecycle
-
-To better understand the implementation of the above policy the following
-example is provided. This project is simplified to include only the following
-modules and their versions:
-
-* `otel`: `v0.14.0`
-* `otel/trace`: `v0.14.0`
-* `otel/metric`: `v0.14.0`
-* `otel/baggage`: `v0.14.0`
-* `otel/sdk/trace`: `v0.14.0`
-* `otel/sdk/metric`: `v0.14.0`
-
-These modules have been developed to a point where the `otel/trace`,
-`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
-should be considered for a stable release. The `otel/metric` and
-`otel/sdk/metric` are still under active development and the `otel` module
-depends on both `otel/trace` and `otel/metric`.
-
-The `otel` package is refactored to remove its dependencies on `otel/metric` so
-it can be released as stable as well. With that done the following release
-candidates are made:
-
-* `otel`: `v1.0.0-RC1`
-* `otel/trace`: `v1.0.0-RC1`
-* `otel/baggage`: `v1.0.0-RC1`
-* `otel/sdk/trace`: `v1.0.0-RC1`
-
-The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
-
-A few minor issues are discovered in the `otel/trace` package. These issues are
-resolved with some minor, but backwards incompatible, changes and are released
-as a second release candidate:
-
-* `otel`: `v1.0.0-RC2`
-* `otel/trace`: `v1.0.0-RC2`
-* `otel/baggage`: `v1.0.0-RC2`
-* `otel/sdk/trace`: `v1.0.0-RC2`
-
-Notice that all module version numbers are incremented to adhere to our
-versioning policy.
-
-After these release candidates have been evaluated to satisfaction, they are
-released as version `v1.0.0`.
-
-* `otel`: `v1.0.0`
-* `otel/trace`: `v1.0.0`
-* `otel/baggage`: `v1.0.0`
-* `otel/sdk/trace`: `v1.0.0`
-
-Since both the `go` utility and the Go module system support [the semantic
-versioning definition of
-precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
-will correctly be interpreted as the successor to the previous release
-candidates.
-
-Active development of this project continues. The `otel/metric` module now has
-backwards incompatible changes to its API that need to be released and the
-`otel/baggage` module has a minor bug fix that needs to be released. The
-following release is made:
-
-* `otel`: `v1.0.1`
-* `otel/trace`: `v1.0.1`
-* `otel/metric`: `v0.15.0`
-* `otel/baggage`: `v1.0.1`
-* `otel/sdk/trace`: `v1.0.1`
-* `otel/sdk/metric`: `v0.15.0`
-
-Notice that, again, all stable module versions are incremented in unison and
-the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
-bumped its version. This bump of the `otel/sdk/metric` package makes sense
-given their coupling, though it is not explicitly required by our versioning
-policy.
-
-As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
-point where they should be evaluated for stability. The `otel` module is
-reintegrated with the `otel/metric` package and the following release is made:
-
-* `otel`: `v1.1.0-RC1`
-* `otel/trace`: `v1.1.0-RC1`
-* `otel/metric`: `v1.1.0-RC1`
-* `otel/baggage`: `v1.1.0-RC1`
-* `otel/sdk/trace`: `v1.1.0-RC1`
-* `otel/sdk/metric`: `v1.1.0-RC1`
-
-All the modules are evaluated and determined to a viable stable release. They
-are then released as version `v1.1.0` (the minor version is incremented to
-indicate the addition of new signal).
-
-* `otel`: `v1.1.0`
-* `otel/trace`: `v1.1.0`
-* `otel/metric`: `v1.1.0`
-* `otel/baggage`: `v1.1.0`
-* `otel/sdk/trace`: `v1.1.0`
-* `otel/sdk/metric`: `v1.1.0`
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
deleted file mode 100644
index dafe742..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package attribute provides key and value attributes.
-package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
deleted file mode 100644
index fe2bc57..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "bytes"
- "sync"
- "sync/atomic"
-)
-
-type (
- // Encoder is a mechanism for serializing an attribute set into a specific
- // string representation that supports caching, to avoid repeated
- // serialization. An example could be an exporter encoding the attribute
- // set into a wire representation.
- Encoder interface {
- // Encode returns the serialized encoding of the attribute set using
- // its Iterator. This result may be cached by a attribute.Set.
- Encode(iterator Iterator) string
-
- // ID returns a value that is unique for each class of attribute
- // encoder. Attribute encoders allocate these using `NewEncoderID`.
- ID() EncoderID
- }
-
- // EncoderID is used to identify distinct Encoder
- // implementations, for caching encoded results.
- EncoderID struct {
- value uint64
- }
-
- // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
- // allocations used in encoding attributes. This implementation encodes a
- // comma-separated list of key=value, with '/'-escaping of '=', ',', and
- // '\'.
- defaultAttrEncoder struct {
- // pool is a pool of attribute set builders. The buffers in this pool
- // grow to a size that most attribute encodings will not allocate new
- // memory.
- pool sync.Pool // *bytes.Buffer
- }
-)
-
-// escapeChar is used to ensure uniqueness of the attribute encoding where
-// keys or values contain either '=' or ','. Since there is no parser needed
-// for this encoding and its only requirement is to be unique, this choice is
-// arbitrary. Users will see these in some exporters (e.g., stdout), so the
-// backslash ('\') is used as a conventional choice.
-const escapeChar = '\\'
-
-var (
- _ Encoder = &defaultAttrEncoder{}
-
- // encoderIDCounter is for generating IDs for other attribute encoders.
- encoderIDCounter uint64
-
- defaultEncoderOnce sync.Once
- defaultEncoderID = NewEncoderID()
- defaultEncoderInstance *defaultAttrEncoder
-)
-
-// NewEncoderID returns a unique attribute encoder ID. It should be called
-// once per each type of attribute encoder. Preferably in init() or in var
-// definition.
-func NewEncoderID() EncoderID {
- return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
-}
-
-// DefaultEncoder returns an attribute encoder that encodes attributes in such
-// a way that each escaped attribute's key is followed by an equal sign and
-// then by an escaped attribute's value. All key-value pairs are separated by
-// a comma.
-//
-// Escaping is done by prepending a backslash before either a backslash, equal
-// sign or a comma.
-func DefaultEncoder() Encoder {
- defaultEncoderOnce.Do(func() {
- defaultEncoderInstance = &defaultAttrEncoder{
- pool: sync.Pool{
- New: func() interface{} {
- return &bytes.Buffer{}
- },
- },
- }
- })
- return defaultEncoderInstance
-}
-
-// Encode is a part of an implementation of the AttributeEncoder interface.
-func (d *defaultAttrEncoder) Encode(iter Iterator) string {
- buf := d.pool.Get().(*bytes.Buffer)
- defer d.pool.Put(buf)
- buf.Reset()
-
- for iter.Next() {
- i, keyValue := iter.IndexedAttribute()
- if i > 0 {
- _, _ = buf.WriteRune(',')
- }
- copyAndEscape(buf, string(keyValue.Key))
-
- _, _ = buf.WriteRune('=')
-
- if keyValue.Value.Type() == STRING {
- copyAndEscape(buf, keyValue.Value.AsString())
- } else {
- _, _ = buf.WriteString(keyValue.Value.Emit())
- }
- }
- return buf.String()
-}
-
-// ID is a part of an implementation of the AttributeEncoder interface.
-func (*defaultAttrEncoder) ID() EncoderID {
- return defaultEncoderID
-}
-
-// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
-// making the default encoding unique.
-func copyAndEscape(buf *bytes.Buffer, val string) {
- for _, ch := range val {
- switch ch {
- case '=', ',', escapeChar:
- _, _ = buf.WriteRune(escapeChar)
- }
- _, _ = buf.WriteRune(ch)
- }
-}
-
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`. Invalid encoder IDs will not be cached.
-func (id EncoderID) Valid() bool {
- return id.value != 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
deleted file mode 100644
index 638c213..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-// Filter supports removing certain attributes from attribute sets. When
-// the filter returns true, the attribute will be kept in the filtered
-// attribute set. When the filter returns false, the attribute is excluded
-// from the filtered attribute set, and the attribute instead appears in
-// the removed list of excluded attributes.
-type Filter func(KeyValue) bool
-
-// NewAllowKeysFilter returns a Filter that only allows attributes with one of
-// the provided keys.
-//
-// If keys is empty a deny-all filter is returned.
-func NewAllowKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return false }
- }
-
- allowed := make(map[Key]struct{})
- for _, k := range keys {
- allowed[k] = struct{}{}
- }
- return func(kv KeyValue) bool {
- _, ok := allowed[kv.Key]
- return ok
- }
-}
-
-// NewDenyKeysFilter returns a Filter that only allows attributes
-// that do not have one of the provided keys.
-//
-// If keys is empty an allow-all filter is returned.
-func NewDenyKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return true }
- }
-
- forbid := make(map[Key]struct{})
- for _, k := range keys {
- forbid[k] = struct{}{}
- }
- return func(kv KeyValue) bool {
- _, ok := forbid[kv.Key]
- return !ok
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
deleted file mode 100644
index 841b271..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-// Iterator allows iterating over the set of attributes in order, sorted by
-// key.
-type Iterator struct {
- storage *Set
- idx int
-}
-
-// MergeIterator supports iterating over two sets of attributes while
-// eliminating duplicate values from the combined set. The first iterator
-// value takes precedence.
-type MergeIterator struct {
- one oneIterator
- two oneIterator
- current KeyValue
-}
-
-type oneIterator struct {
- iter Iterator
- done bool
- attr KeyValue
-}
-
-// Next moves the iterator to the next position. Returns false if there are no
-// more attributes.
-func (i *Iterator) Next() bool {
- i.idx++
- return i.idx < i.Len()
-}
-
-// Label returns current KeyValue. Must be called only after Next returns
-// true.
-//
-// Deprecated: Use Attribute instead.
-func (i *Iterator) Label() KeyValue {
- return i.Attribute()
-}
-
-// Attribute returns the current KeyValue of the Iterator. It must be called
-// only after Next returns true.
-func (i *Iterator) Attribute() KeyValue {
- kv, _ := i.storage.Get(i.idx)
- return kv
-}
-
-// IndexedLabel returns current index and attribute. Must be called only
-// after Next returns true.
-//
-// Deprecated: Use IndexedAttribute instead.
-func (i *Iterator) IndexedLabel() (int, KeyValue) {
- return i.idx, i.Attribute()
-}
-
-// IndexedAttribute returns current index and attribute. Must be called only
-// after Next returns true.
-func (i *Iterator) IndexedAttribute() (int, KeyValue) {
- return i.idx, i.Attribute()
-}
-
-// Len returns a number of attributes in the iterated set.
-func (i *Iterator) Len() int {
- return i.storage.Len()
-}
-
-// ToSlice is a convenience function that creates a slice of attributes from
-// the passed iterator. The iterator is set up to start from the beginning
-// before creating the slice.
-func (i *Iterator) ToSlice() []KeyValue {
- l := i.Len()
- if l == 0 {
- return nil
- }
- i.idx = -1
- slice := make([]KeyValue, 0, l)
- for i.Next() {
- slice = append(slice, i.Attribute())
- }
- return slice
-}
-
-// NewMergeIterator returns a MergeIterator for merging two attribute sets.
-// Duplicates are resolved by taking the value from the first set.
-func NewMergeIterator(s1, s2 *Set) MergeIterator {
- mi := MergeIterator{
- one: makeOne(s1.Iter()),
- two: makeOne(s2.Iter()),
- }
- return mi
-}
-
-func makeOne(iter Iterator) oneIterator {
- oi := oneIterator{
- iter: iter,
- }
- oi.advance()
- return oi
-}
-
-func (oi *oneIterator) advance() {
- if oi.done = !oi.iter.Next(); !oi.done {
- oi.attr = oi.iter.Attribute()
- }
-}
-
-// Next returns true if there is another attribute available.
-func (m *MergeIterator) Next() bool {
- if m.one.done && m.two.done {
- return false
- }
- if m.one.done {
- m.current = m.two.attr
- m.two.advance()
- return true
- }
- if m.two.done {
- m.current = m.one.attr
- m.one.advance()
- return true
- }
- if m.one.attr.Key == m.two.attr.Key {
- m.current = m.one.attr // first iterator attribute value wins
- m.one.advance()
- m.two.advance()
- return true
- }
- if m.one.attr.Key < m.two.attr.Key {
- m.current = m.one.attr
- m.one.advance()
- return true
- }
- m.current = m.two.attr
- m.two.advance()
- return true
-}
-
-// Label returns the current value after Next() returns true.
-//
-// Deprecated: Use Attribute instead.
-func (m *MergeIterator) Label() KeyValue {
- return m.current
-}
-
-// Attribute returns the current value after Next() returns true.
-func (m *MergeIterator) Attribute() KeyValue {
- return m.current
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
deleted file mode 100644
index 0656a04..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-// Key represents the key part in key-value pairs. It's a string. The
-// allowed character set in the key depends on the use of the key.
-type Key string
-
-// Bool creates a KeyValue instance with a BOOL Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Bool(name, value).
-func (k Key) Bool(v bool) KeyValue {
- return KeyValue{
- Key: k,
- Value: BoolValue(v),
- }
-}
-
-// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- BoolSlice(name, value).
-func (k Key) BoolSlice(v []bool) KeyValue {
- return KeyValue{
- Key: k,
- Value: BoolSliceValue(v),
- }
-}
-
-// Int creates a KeyValue instance with an INT64 Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Int(name, value).
-func (k Key) Int(v int) KeyValue {
- return KeyValue{
- Key: k,
- Value: IntValue(v),
- }
-}
-
-// IntSlice creates a KeyValue instance with an INT64SLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- IntSlice(name, value).
-func (k Key) IntSlice(v []int) KeyValue {
- return KeyValue{
- Key: k,
- Value: IntSliceValue(v),
- }
-}
-
-// Int64 creates a KeyValue instance with an INT64 Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Int64(name, value).
-func (k Key) Int64(v int64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Int64Value(v),
- }
-}
-
-// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Int64Slice(name, value).
-func (k Key) Int64Slice(v []int64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Int64SliceValue(v),
- }
-}
-
-// Float64 creates a KeyValue instance with a FLOAT64 Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Float64(name, value).
-func (k Key) Float64(v float64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Float64Value(v),
- }
-}
-
-// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- Float64(name, value).
-func (k Key) Float64Slice(v []float64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Float64SliceValue(v),
- }
-}
-
-// String creates a KeyValue instance with a STRING Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- String(name, value).
-func (k Key) String(v string) KeyValue {
- return KeyValue{
- Key: k,
- Value: StringValue(v),
- }
-}
-
-// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
-//
-// If creating both a key and value at the same time, use the provided
-// convenience function instead -- StringSlice(name, value).
-func (k Key) StringSlice(v []string) KeyValue {
- return KeyValue{
- Key: k,
- Value: StringSliceValue(v),
- }
-}
-
-// Defined returns true for non-empty keys.
-func (k Key) Defined() bool {
- return len(k) != 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
deleted file mode 100644
index 1ddf3ce..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "fmt"
-)
-
-// KeyValue holds a key and value pair.
-type KeyValue struct {
- Key Key
- Value Value
-}
-
-// Valid returns if kv is a valid OpenTelemetry attribute.
-func (kv KeyValue) Valid() bool {
- return kv.Key.Defined() && kv.Value.Type() != INVALID
-}
-
-// Bool creates a KeyValue with a BOOL Value type.
-func Bool(k string, v bool) KeyValue {
- return Key(k).Bool(v)
-}
-
-// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
-func BoolSlice(k string, v []bool) KeyValue {
- return Key(k).BoolSlice(v)
-}
-
-// Int creates a KeyValue with an INT64 Value type.
-func Int(k string, v int) KeyValue {
- return Key(k).Int(v)
-}
-
-// IntSlice creates a KeyValue with an INT64SLICE Value type.
-func IntSlice(k string, v []int) KeyValue {
- return Key(k).IntSlice(v)
-}
-
-// Int64 creates a KeyValue with an INT64 Value type.
-func Int64(k string, v int64) KeyValue {
- return Key(k).Int64(v)
-}
-
-// Int64Slice creates a KeyValue with an INT64SLICE Value type.
-func Int64Slice(k string, v []int64) KeyValue {
- return Key(k).Int64Slice(v)
-}
-
-// Float64 creates a KeyValue with a FLOAT64 Value type.
-func Float64(k string, v float64) KeyValue {
- return Key(k).Float64(v)
-}
-
-// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
-func Float64Slice(k string, v []float64) KeyValue {
- return Key(k).Float64Slice(v)
-}
-
-// String creates a KeyValue with a STRING Value type.
-func String(k, v string) KeyValue {
- return Key(k).String(v)
-}
-
-// StringSlice creates a KeyValue with a STRINGSLICE Value type.
-func StringSlice(k string, v []string) KeyValue {
- return Key(k).StringSlice(v)
-}
-
-// Stringer creates a new key-value pair with a passed name and a string
-// value generated by the passed Stringer interface.
-func Stringer(k string, v fmt.Stringer) KeyValue {
- return Key(k).String(v.String())
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
deleted file mode 100644
index 9f9303d..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ /dev/null
@@ -1,429 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "encoding/json"
- "reflect"
- "sort"
- "sync"
-)
-
-type (
- // Set is the representation for a distinct attribute set. It manages an
- // immutable set of attributes, with an internal cache for storing
- // attribute encodings.
- //
- // This type supports the Equivalent method of comparison using values of
- // type Distinct.
- Set struct {
- equivalent Distinct
- }
-
- // Distinct wraps a variable-size array of KeyValue, constructed with keys
- // in sorted order. This can be used as a map key or for equality checking
- // between Sets.
- Distinct struct {
- iface interface{}
- }
-
- // Sortable implements sort.Interface, used for sorting KeyValue. This is
- // an exported type to support a memory optimization. A pointer to one of
- // these is needed for the call to sort.Stable(), which the caller may
- // provide in order to avoid an allocation. See NewSetWithSortable().
- Sortable []KeyValue
-)
-
-var (
- // keyValueType is used in computeDistinctReflect.
- keyValueType = reflect.TypeOf(KeyValue{})
-
- // emptySet is returned for empty attribute sets.
- emptySet = &Set{
- equivalent: Distinct{
- iface: [0]KeyValue{},
- },
- }
-
- // sortables is a pool of Sortables used to create Sets with a user does
- // not provide one.
- sortables = sync.Pool{
- New: func() interface{} { return new(Sortable) },
- }
-)
-
-// EmptySet returns a reference to a Set with no elements.
-//
-// This is a convenience provided for optimized calling utility.
-func EmptySet() *Set {
- return emptySet
-}
-
-// reflectValue abbreviates reflect.ValueOf(d).
-func (d Distinct) reflectValue() reflect.Value {
- return reflect.ValueOf(d.iface)
-}
-
-// Valid returns true if this value refers to a valid Set.
-func (d Distinct) Valid() bool {
- return d.iface != nil
-}
-
-// Len returns the number of attributes in this set.
-func (l *Set) Len() int {
- if l == nil || !l.equivalent.Valid() {
- return 0
- }
- return l.equivalent.reflectValue().Len()
-}
-
-// Get returns the KeyValue at ordered position idx in this set.
-func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil || !l.equivalent.Valid() {
- return KeyValue{}, false
- }
- value := l.equivalent.reflectValue()
-
- if idx >= 0 && idx < value.Len() {
- // Note: The Go compiler successfully avoids an allocation for
- // the interface{} conversion here:
- return value.Index(idx).Interface().(KeyValue), true
- }
-
- return KeyValue{}, false
-}
-
-// Value returns the value of a specified key in this set.
-func (l *Set) Value(k Key) (Value, bool) {
- if l == nil || !l.equivalent.Valid() {
- return Value{}, false
- }
- rValue := l.equivalent.reflectValue()
- vlen := rValue.Len()
-
- idx := sort.Search(vlen, func(idx int) bool {
- return rValue.Index(idx).Interface().(KeyValue).Key >= k
- })
- if idx >= vlen {
- return Value{}, false
- }
- keyValue := rValue.Index(idx).Interface().(KeyValue)
- if k == keyValue.Key {
- return keyValue.Value, true
- }
- return Value{}, false
-}
-
-// HasValue tests whether a key is defined in this set.
-func (l *Set) HasValue(k Key) bool {
- if l == nil {
- return false
- }
- _, ok := l.Value(k)
- return ok
-}
-
-// Iter returns an iterator for visiting the attributes in this set.
-func (l *Set) Iter() Iterator {
- return Iterator{
- storage: l,
- idx: -1,
- }
-}
-
-// ToSlice returns the set of attributes belonging to this set, sorted, where
-// keys appear no more than once.
-func (l *Set) ToSlice() []KeyValue {
- iter := l.Iter()
- return iter.ToSlice()
-}
-
-// Equivalent returns a value that may be used as a map key. The Distinct type
-// guarantees that the result will equal the equivalent. Distinct value of any
-// attribute set with the same elements as this, where sets are made unique by
-// choosing the last value in the input for any given key.
-func (l *Set) Equivalent() Distinct {
- if l == nil || !l.equivalent.Valid() {
- return emptySet.equivalent
- }
- return l.equivalent
-}
-
-// Equals returns true if the argument set is equivalent to this set.
-func (l *Set) Equals(o *Set) bool {
- return l.Equivalent() == o.Equivalent()
-}
-
-// Encoded returns the encoded form of this set, according to encoder.
-func (l *Set) Encoded(encoder Encoder) string {
- if l == nil || encoder == nil {
- return ""
- }
-
- return encoder.Encode(l.Iter())
-}
-
-func empty() Set {
- return Set{
- equivalent: emptySet.equivalent,
- }
-}
-
-// NewSet returns a new Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-//
-// Except for empty sets, this method adds an additional allocation compared
-// with calls that include a Sortable.
-func NewSet(kvs ...KeyValue) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- srt := sortables.Get().(*Sortable)
- s, _ := NewSetWithSortableFiltered(kvs, srt, nil)
- sortables.Put(srt)
- return s
-}
-
-// NewSetWithSortable returns a new Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-//
-// This call includes a Sortable option as a memory optimization.
-func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
- return s
-}
-
-// NewSetWithFiltered returns a new Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-//
-// This call includes a Filter to include/exclude attribute keys from the
-// return value. Excluded keys are returned as a slice of attribute values.
-func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
- srt := sortables.Get().(*Sortable)
- s, filtered := NewSetWithSortableFiltered(kvs, srt, filter)
- sortables.Put(srt)
- return s, filtered
-}
-
-// NewSetWithSortableFiltered returns a new Set.
-//
-// Duplicate keys are eliminated by taking the last value. This
-// re-orders the input slice so that unique last-values are contiguous
-// at the end of the slice.
-//
-// This ensures the following:
-//
-// - Last-value-wins semantics
-// - Caller sees the reordering, but doesn't lose values
-// - Repeated call preserve last-value wins.
-//
-// Note that methods are defined on Set, although this returns Set. Callers
-// can avoid memory allocations by:
-//
-// - allocating a Sortable for use as a temporary in this method
-// - allocating a Set for storing the return value of this constructor.
-//
-// The result maintains a cache of encoded attributes, by attribute.EncoderID.
-// This value should not be copied after its first use.
-//
-// The second []KeyValue return value is a list of attributes that were
-// excluded by the Filter (if non-nil).
-func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
-
- *tmp = kvs
-
- // Stable sort so the following de-duplication can implement
- // last-value-wins semantics.
- sort.Stable(tmp)
-
- *tmp = nil
-
- position := len(kvs) - 1
- offset := position - 1
-
- // The requirements stated above require that the stable
- // result be placed in the end of the input slice, while
- // overwritten values are swapped to the beginning.
- //
- // De-duplicate with last-value-wins semantics. Preserve
- // duplicate values at the beginning of the input slice.
- for ; offset >= 0; offset-- {
- if kvs[offset].Key == kvs[position].Key {
- continue
- }
- position--
- kvs[offset], kvs[position] = kvs[position], kvs[offset]
- }
- if filter != nil {
- return filterSet(kvs[position:], filter)
- }
- return Set{
- equivalent: computeDistinct(kvs[position:]),
- }, nil
-}
-
-// filterSet reorders kvs so that included keys are contiguous at the end of
-// the slice, while excluded keys precede the included keys.
-func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- var excluded []KeyValue
-
- // Move attributes that do not match the filter so they're adjacent before
- // calling computeDistinct().
- distinctPosition := len(kvs)
-
- // Swap indistinct keys forward and distinct keys toward the
- // end of the slice.
- offset := len(kvs) - 1
- for ; offset >= 0; offset-- {
- if filter(kvs[offset]) {
- distinctPosition--
- kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
- continue
- }
- }
- excluded = kvs[:distinctPosition]
-
- return Set{
- equivalent: computeDistinct(kvs[distinctPosition:]),
- }, excluded
-}
-
-// Filter returns a filtered copy of this Set. See the documentation for
-// NewSetWithSortableFiltered for more details.
-func (l *Set) Filter(re Filter) (Set, []KeyValue) {
- if re == nil {
- return Set{
- equivalent: l.equivalent,
- }, nil
- }
-
- // Note: This could be refactored to avoid the temporary slice
- // allocation, if it proves to be expensive.
- return filterSet(l.ToSlice(), re)
-}
-
-// computeDistinct returns a Distinct using either the fixed- or
-// reflect-oriented code path, depending on the size of the input. The input
-// slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
- iface := computeDistinctFixed(kvs)
- if iface == nil {
- iface = computeDistinctReflect(kvs)
- }
- return Distinct{
- iface: iface,
- }
-}
-
-// computeDistinctFixed computes a Distinct for small slices. It returns nil
-// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
- switch len(kvs) {
- case 1:
- ptr := new([1]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 2:
- ptr := new([2]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 3:
- ptr := new([3]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 4:
- ptr := new([4]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 5:
- ptr := new([5]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 6:
- ptr := new([6]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 7:
- ptr := new([7]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 8:
- ptr := new([8]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 9:
- ptr := new([9]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 10:
- ptr := new([10]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- default:
- return nil
- }
-}
-
-// computeDistinctReflect computes a Distinct using reflection, works for any
-// size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
- at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
- for i, keyValue := range kvs {
- *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
- }
- return at.Interface()
-}
-
-// MarshalJSON returns the JSON encoding of the Set.
-func (l *Set) MarshalJSON() ([]byte, error) {
- return json.Marshal(l.equivalent.iface)
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
-func (l Set) MarshalLog() interface{} {
- kvs := make(map[string]string)
- for _, kv := range l.ToSlice() {
- kvs[string(kv.Key)] = kv.Value.Emit()
- }
- return kvs
-}
-
-// Len implements sort.Interface.
-func (l *Sortable) Len() int {
- return len(*l)
-}
-
-// Swap implements sort.Interface.
-func (l *Sortable) Swap(i, j int) {
- (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
-}
-
-// Less implements sort.Interface.
-func (l *Sortable) Less(i, j int) bool {
- return (*l)[i].Key < (*l)[j].Key
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
deleted file mode 100644
index e584b24..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Code generated by "stringer -type=Type"; DO NOT EDIT.
-
-package attribute
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[INVALID-0]
- _ = x[BOOL-1]
- _ = x[INT64-2]
- _ = x[FLOAT64-3]
- _ = x[STRING-4]
- _ = x[BOOLSLICE-5]
- _ = x[INT64SLICE-6]
- _ = x[FLOAT64SLICE-7]
- _ = x[STRINGSLICE-8]
-}
-
-const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
-
-var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
-
-func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
- return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
deleted file mode 100644
index cb21dd5..0000000
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package attribute // import "go.opentelemetry.io/otel/attribute"
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strconv"
-
- "go.opentelemetry.io/otel/internal"
- "go.opentelemetry.io/otel/internal/attribute"
-)
-
-//go:generate stringer -type=Type
-
-// Type describes the type of the data Value holds.
-type Type int // nolint: revive // redefines builtin Type.
-
-// Value represents the value part in key-value pairs.
-type Value struct {
- vtype Type
- numeric uint64
- stringly string
- slice interface{}
-}
-
-const (
- // INVALID is used for a Value with no value set.
- INVALID Type = iota
- // BOOL is a boolean Type Value.
- BOOL
- // INT64 is a 64-bit signed integral Type Value.
- INT64
- // FLOAT64 is a 64-bit floating point Type Value.
- FLOAT64
- // STRING is a string Type Value.
- STRING
- // BOOLSLICE is a slice of booleans Type Value.
- BOOLSLICE
- // INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
- INT64SLICE
- // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
- FLOAT64SLICE
- // STRINGSLICE is a slice of strings Type Value.
- STRINGSLICE
-)
-
-// BoolValue creates a BOOL Value.
-func BoolValue(v bool) Value {
- return Value{
- vtype: BOOL,
- numeric: internal.BoolToRaw(v),
- }
-}
-
-// BoolSliceValue creates a BOOLSLICE Value.
-func BoolSliceValue(v []bool) Value {
- return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
-}
-
-// IntValue creates an INT64 Value.
-func IntValue(v int) Value {
- return Int64Value(int64(v))
-}
-
-// IntSliceValue creates an INTSLICE Value.
-func IntSliceValue(v []int) Value {
- var int64Val int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
- for i, val := range v {
- cp.Elem().Index(i).SetInt(int64(val))
- }
- return Value{
- vtype: INT64SLICE,
- slice: cp.Elem().Interface(),
- }
-}
-
-// Int64Value creates an INT64 Value.
-func Int64Value(v int64) Value {
- return Value{
- vtype: INT64,
- numeric: internal.Int64ToRaw(v),
- }
-}
-
-// Int64SliceValue creates an INT64SLICE Value.
-func Int64SliceValue(v []int64) Value {
- return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
-}
-
-// Float64Value creates a FLOAT64 Value.
-func Float64Value(v float64) Value {
- return Value{
- vtype: FLOAT64,
- numeric: internal.Float64ToRaw(v),
- }
-}
-
-// Float64SliceValue creates a FLOAT64SLICE Value.
-func Float64SliceValue(v []float64) Value {
- return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
-}
-
-// StringValue creates a STRING Value.
-func StringValue(v string) Value {
- return Value{
- vtype: STRING,
- stringly: v,
- }
-}
-
-// StringSliceValue creates a STRINGSLICE Value.
-func StringSliceValue(v []string) Value {
- return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
-}
-
-// Type returns a type of the Value.
-func (v Value) Type() Type {
- return v.vtype
-}
-
-// AsBool returns the bool value. Make sure that the Value's type is
-// BOOL.
-func (v Value) AsBool() bool {
- return internal.RawToBool(v.numeric)
-}
-
-// AsBoolSlice returns the []bool value. Make sure that the Value's type is
-// BOOLSLICE.
-func (v Value) AsBoolSlice() []bool {
- if v.vtype != BOOLSLICE {
- return nil
- }
- return v.asBoolSlice()
-}
-
-func (v Value) asBoolSlice() []bool {
- return attribute.AsBoolSlice(v.slice)
-}
-
-// AsInt64 returns the int64 value. Make sure that the Value's type is
-// INT64.
-func (v Value) AsInt64() int64 {
- return internal.RawToInt64(v.numeric)
-}
-
-// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
-// INT64SLICE.
-func (v Value) AsInt64Slice() []int64 {
- if v.vtype != INT64SLICE {
- return nil
- }
- return v.asInt64Slice()
-}
-
-func (v Value) asInt64Slice() []int64 {
- return attribute.AsInt64Slice(v.slice)
-}
-
-// AsFloat64 returns the float64 value. Make sure that the Value's
-// type is FLOAT64.
-func (v Value) AsFloat64() float64 {
- return internal.RawToFloat64(v.numeric)
-}
-
-// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
-// FLOAT64SLICE.
-func (v Value) AsFloat64Slice() []float64 {
- if v.vtype != FLOAT64SLICE {
- return nil
- }
- return v.asFloat64Slice()
-}
-
-func (v Value) asFloat64Slice() []float64 {
- return attribute.AsFloat64Slice(v.slice)
-}
-
-// AsString returns the string value. Make sure that the Value's type
-// is STRING.
-func (v Value) AsString() string {
- return v.stringly
-}
-
-// AsStringSlice returns the []string value. Make sure that the Value's type is
-// STRINGSLICE.
-func (v Value) AsStringSlice() []string {
- if v.vtype != STRINGSLICE {
- return nil
- }
- return v.asStringSlice()
-}
-
-func (v Value) asStringSlice() []string {
- return attribute.AsStringSlice(v.slice)
-}
-
-type unknownValueType struct{}
-
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
- switch v.Type() {
- case BOOL:
- return v.AsBool()
- case BOOLSLICE:
- return v.asBoolSlice()
- case INT64:
- return v.AsInt64()
- case INT64SLICE:
- return v.asInt64Slice()
- case FLOAT64:
- return v.AsFloat64()
- case FLOAT64SLICE:
- return v.asFloat64Slice()
- case STRING:
- return v.stringly
- case STRINGSLICE:
- return v.asStringSlice()
- }
- return unknownValueType{}
-}
-
-// Emit returns a string representation of Value's data.
-func (v Value) Emit() string {
- switch v.Type() {
- case BOOLSLICE:
- return fmt.Sprint(v.asBoolSlice())
- case BOOL:
- return strconv.FormatBool(v.AsBool())
- case INT64SLICE:
- return fmt.Sprint(v.asInt64Slice())
- case INT64:
- return strconv.FormatInt(v.AsInt64(), 10)
- case FLOAT64SLICE:
- return fmt.Sprint(v.asFloat64Slice())
- case FLOAT64:
- return fmt.Sprint(v.AsFloat64())
- case STRINGSLICE:
- return fmt.Sprint(v.asStringSlice())
- case STRING:
- return v.stringly
- default:
- return "unknown"
- }
-}
-
-// MarshalJSON returns the JSON encoding of the Value.
-func (v Value) MarshalJSON() ([]byte, error) {
- var jsonVal struct {
- Type string
- Value interface{}
- }
- jsonVal.Type = v.Type().String()
- jsonVal.Value = v.AsInterface()
- return json.Marshal(jsonVal)
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
deleted file mode 100644
index 84532cb..0000000
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ /dev/null
@@ -1,552 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package baggage // import "go.opentelemetry.io/otel/baggage"
-
-import (
- "errors"
- "fmt"
- "net/url"
- "regexp"
- "strings"
-
- "go.opentelemetry.io/otel/internal/baggage"
-)
-
-const (
- maxMembers = 180
- maxBytesPerMembers = 4096
- maxBytesPerBaggageString = 8192
-
- listDelimiter = ","
- keyValueDelimiter = "="
- propertyDelimiter = ";"
-
- keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
- valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
- keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
-)
-
-var (
- keyRe = regexp.MustCompile(`^` + keyDef + `$`)
- valueRe = regexp.MustCompile(`^` + valueDef + `$`)
- propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
-)
-
-var (
- errInvalidKey = errors.New("invalid key")
- errInvalidValue = errors.New("invalid value")
- errInvalidProperty = errors.New("invalid baggage list-member property")
- errInvalidMember = errors.New("invalid baggage list-member")
- errMemberNumber = errors.New("too many list-members in baggage-string")
- errMemberBytes = errors.New("list-member too large")
- errBaggageBytes = errors.New("baggage-string too large")
-)
-
-// Property is an additional metadata entry for a baggage list-member.
-type Property struct {
- key, value string
-
- // hasValue indicates if a zero-value value means the property does not
- // have a value or if it was the zero-value.
- hasValue bool
-}
-
-// NewKeyProperty returns a new Property for key.
-//
-// If key is invalid, an error will be returned.
-func NewKeyProperty(key string) (Property, error) {
- if !keyRe.MatchString(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
-
- p := Property{key: key}
- return p, nil
-}
-
-// NewKeyValueProperty returns a new Property for key with value.
-//
-// If key or value are invalid, an error will be returned.
-func NewKeyValueProperty(key, value string) (Property, error) {
- if !keyRe.MatchString(key) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
- if !valueRe.MatchString(value) {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
-
- p := Property{
- key: key,
- value: value,
- hasValue: true,
- }
- return p, nil
-}
-
-func newInvalidProperty() Property {
- return Property{}
-}
-
-// parseProperty attempts to decode a Property from the passed string. It
-// returns an error if the input is invalid according to the W3C Baggage
-// specification.
-func parseProperty(property string) (Property, error) {
- if property == "" {
- return newInvalidProperty(), nil
- }
-
- match := propertyRe.FindStringSubmatch(property)
- if len(match) != 4 {
- return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
- }
-
- var p Property
- if match[1] != "" {
- p.key = match[1]
- } else {
- p.key = match[2]
- p.value = match[3]
- p.hasValue = true
- }
-
- return p, nil
-}
-
-// validate ensures p conforms to the W3C Baggage specification, returning an
-// error otherwise.
-func (p Property) validate() error {
- errFunc := func(err error) error {
- return fmt.Errorf("invalid property: %w", err)
- }
-
- if !keyRe.MatchString(p.key) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
- }
- if p.hasValue && !valueRe.MatchString(p.value) {
- return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
- }
- if !p.hasValue && p.value != "" {
- return errFunc(errors.New("inconsistent value"))
- }
- return nil
-}
-
-// Key returns the Property key.
-func (p Property) Key() string {
- return p.key
-}
-
-// Value returns the Property value. Additionally, a boolean value is returned
-// indicating if the returned value is the empty if the Property has a value
-// that is empty or if the value is not set.
-func (p Property) Value() (string, bool) {
- return p.value, p.hasValue
-}
-
-// String encodes Property into a string compliant with the W3C Baggage
-// specification.
-func (p Property) String() string {
- if p.hasValue {
- return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
- }
- return p.key
-}
-
-type properties []Property
-
-func fromInternalProperties(iProps []baggage.Property) properties {
- if len(iProps) == 0 {
- return nil
- }
-
- props := make(properties, len(iProps))
- for i, p := range iProps {
- props[i] = Property{
- key: p.Key,
- value: p.Value,
- hasValue: p.HasValue,
- }
- }
- return props
-}
-
-func (p properties) asInternal() []baggage.Property {
- if len(p) == 0 {
- return nil
- }
-
- iProps := make([]baggage.Property, len(p))
- for i, prop := range p {
- iProps[i] = baggage.Property{
- Key: prop.key,
- Value: prop.value,
- HasValue: prop.hasValue,
- }
- }
- return iProps
-}
-
-func (p properties) Copy() properties {
- if len(p) == 0 {
- return nil
- }
-
- props := make(properties, len(p))
- copy(props, p)
- return props
-}
-
-// validate ensures each Property in p conforms to the W3C Baggage
-// specification, returning an error otherwise.
-func (p properties) validate() error {
- for _, prop := range p {
- if err := prop.validate(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// String encodes properties into a string compliant with the W3C Baggage
-// specification.
-func (p properties) String() string {
- props := make([]string, len(p))
- for i, prop := range p {
- props[i] = prop.String()
- }
- return strings.Join(props, propertyDelimiter)
-}
-
-// Member is a list-member of a baggage-string as defined by the W3C Baggage
-// specification.
-type Member struct {
- key, value string
- properties properties
-
- // hasData indicates whether the created property contains data or not.
- // Properties that do not contain data are invalid with no other check
- // required.
- hasData bool
-}
-
-// NewMember returns a new Member from the passed arguments. The key will be
-// used directly while the value will be url decoded after validation. An error
-// is returned if the created Member would be invalid according to the W3C
-// Baggage specification.
-func NewMember(key, value string, props ...Property) (Member, error) {
- m := Member{
- key: key,
- value: value,
- properties: properties(props).Copy(),
- hasData: true,
- }
- if err := m.validate(); err != nil {
- return newInvalidMember(), err
- }
- decodedValue, err := url.PathUnescape(value)
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
- m.value = decodedValue
- return m, nil
-}
-
-func newInvalidMember() Member {
- return Member{}
-}
-
-// parseMember attempts to decode a Member from the passed string. It returns
-// an error if the input is invalid according to the W3C Baggage
-// specification.
-func parseMember(member string) (Member, error) {
- if n := len(member); n > maxBytesPerMembers {
- return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
- }
-
- var (
- key, value string
- props properties
- )
-
- keyValue, properties, found := strings.Cut(member, propertyDelimiter)
- if found {
- // Parse the member properties.
- for _, pStr := range strings.Split(properties, propertyDelimiter) {
- p, err := parseProperty(pStr)
- if err != nil {
- return newInvalidMember(), err
- }
- props = append(props, p)
- }
- }
- // Parse the member key/value pair.
-
- // Take into account a value can contain equal signs (=).
- k, v, found := strings.Cut(keyValue, keyValueDelimiter)
- if !found {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
- }
- // "Leading and trailing whitespaces are allowed but MUST be trimmed
- // when converting the header into a data structure."
- key = strings.TrimSpace(k)
- var err error
- value, err = url.PathUnescape(strings.TrimSpace(v))
- if err != nil {
- return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
- }
- if !keyRe.MatchString(key) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
- }
- if !valueRe.MatchString(value) {
- return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
- }
-
- return Member{key: key, value: value, properties: props, hasData: true}, nil
-}
-
-// validate ensures m conforms to the W3C Baggage specification.
-// A key is just an ASCII string, but a value must be URL encoded UTF-8,
-// returning an error otherwise.
-func (m Member) validate() error {
- if !m.hasData {
- return fmt.Errorf("%w: %q", errInvalidMember, m)
- }
-
- if !keyRe.MatchString(m.key) {
- return fmt.Errorf("%w: %q", errInvalidKey, m.key)
- }
- if !valueRe.MatchString(m.value) {
- return fmt.Errorf("%w: %q", errInvalidValue, m.value)
- }
- return m.properties.validate()
-}
-
-// Key returns the Member key.
-func (m Member) Key() string { return m.key }
-
-// Value returns the Member value.
-func (m Member) Value() string { return m.value }
-
-// Properties returns a copy of the Member properties.
-func (m Member) Properties() []Property { return m.properties.Copy() }
-
-// String encodes Member into a string compliant with the W3C Baggage
-// specification.
-func (m Member) String() string {
- // A key is just an ASCII string, but a value is URL encoded UTF-8.
- s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
- if len(m.properties) > 0 {
- s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
- }
- return s
-}
-
-// Baggage is a list of baggage members representing the baggage-string as
-// defined by the W3C Baggage specification.
-type Baggage struct { //nolint:golint
- list baggage.List
-}
-
-// New returns a new valid Baggage. It returns an error if it results in a
-// Baggage exceeding limits set in that specification.
-//
-// It expects all the provided members to have already been validated.
-func New(members ...Member) (Baggage, error) {
- if len(members) == 0 {
- return Baggage{}, nil
- }
-
- b := make(baggage.List)
- for _, m := range members {
- if !m.hasData {
- return Baggage{}, errInvalidMember
- }
-
- // OpenTelemetry resolves duplicates by last-one-wins.
- b[m.key] = baggage.Item{
- Value: m.value,
- Properties: m.properties.asInternal(),
- }
- }
-
- // Check member numbers after deduplication.
- if len(b) > maxMembers {
- return Baggage{}, errMemberNumber
- }
-
- bag := Baggage{b}
- if n := len(bag.String()); n > maxBytesPerBaggageString {
- return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
- }
-
- return bag, nil
-}
-
-// Parse attempts to decode a baggage-string from the passed string. It
-// returns an error if the input is invalid according to the W3C Baggage
-// specification.
-//
-// If there are duplicate list-members contained in baggage, the last one
-// defined (reading left-to-right) will be the only one kept. This diverges
-// from the W3C Baggage specification which allows duplicate list-members, but
-// conforms to the OpenTelemetry Baggage specification.
-func Parse(bStr string) (Baggage, error) {
- if bStr == "" {
- return Baggage{}, nil
- }
-
- if n := len(bStr); n > maxBytesPerBaggageString {
- return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
- }
-
- b := make(baggage.List)
- for _, memberStr := range strings.Split(bStr, listDelimiter) {
- m, err := parseMember(memberStr)
- if err != nil {
- return Baggage{}, err
- }
- // OpenTelemetry resolves duplicates by last-one-wins.
- b[m.key] = baggage.Item{
- Value: m.value,
- Properties: m.properties.asInternal(),
- }
- }
-
- // OpenTelemetry does not allow for duplicate list-members, but the W3C
- // specification does. Now that we have deduplicated, ensure the baggage
- // does not exceed list-member limits.
- if len(b) > maxMembers {
- return Baggage{}, errMemberNumber
- }
-
- return Baggage{b}, nil
-}
-
-// Member returns the baggage list-member identified by key.
-//
-// If there is no list-member matching the passed key the returned Member will
-// be a zero-value Member.
-// The returned member is not validated, as we assume the validation happened
-// when it was added to the Baggage.
-func (b Baggage) Member(key string) Member {
- v, ok := b.list[key]
- if !ok {
- // We do not need to worry about distinguishing between the situation
- // where a zero-valued Member is included in the Baggage because a
- // zero-valued Member is invalid according to the W3C Baggage
- // specification (it has an empty key).
- return newInvalidMember()
- }
-
- return Member{
- key: key,
- value: v.Value,
- properties: fromInternalProperties(v.Properties),
- hasData: true,
- }
-}
-
-// Members returns all the baggage list-members.
-// The order of the returned list-members does not have significance.
-//
-// The returned members are not validated, as we assume the validation happened
-// when they were added to the Baggage.
-func (b Baggage) Members() []Member {
- if len(b.list) == 0 {
- return nil
- }
-
- members := make([]Member, 0, len(b.list))
- for k, v := range b.list {
- members = append(members, Member{
- key: k,
- value: v.Value,
- properties: fromInternalProperties(v.Properties),
- hasData: true,
- })
- }
- return members
-}
-
-// SetMember returns a copy the Baggage with the member included. If the
-// baggage contains a Member with the same key the existing Member is
-// replaced.
-//
-// If member is invalid according to the W3C Baggage specification, an error
-// is returned with the original Baggage.
-func (b Baggage) SetMember(member Member) (Baggage, error) {
- if !member.hasData {
- return b, errInvalidMember
- }
-
- n := len(b.list)
- if _, ok := b.list[member.key]; !ok {
- n++
- }
- list := make(baggage.List, n)
-
- for k, v := range b.list {
- // Do not copy if we are just going to overwrite.
- if k == member.key {
- continue
- }
- list[k] = v
- }
-
- list[member.key] = baggage.Item{
- Value: member.value,
- Properties: member.properties.asInternal(),
- }
-
- return Baggage{list: list}, nil
-}
-
-// DeleteMember returns a copy of the Baggage with the list-member identified
-// by key removed.
-func (b Baggage) DeleteMember(key string) Baggage {
- n := len(b.list)
- if _, ok := b.list[key]; ok {
- n--
- }
- list := make(baggage.List, n)
-
- for k, v := range b.list {
- if k == key {
- continue
- }
- list[k] = v
- }
-
- return Baggage{list: list}
-}
-
-// Len returns the number of list-members in the Baggage.
-func (b Baggage) Len() int {
- return len(b.list)
-}
-
-// String encodes Baggage into a string compliant with the W3C Baggage
-// specification. The returned string will be invalid if the Baggage contains
-// any invalid list-members.
-func (b Baggage) String() string {
- members := make([]string, 0, len(b.list))
- for k, v := range b.list {
- members = append(members, Member{
- key: k,
- value: v.Value,
- properties: fromInternalProperties(v.Properties),
- }.String())
- }
- return strings.Join(members, listDelimiter)
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
deleted file mode 100644
index 24b34b7..0000000
--- a/vendor/go.opentelemetry.io/otel/baggage/context.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package baggage // import "go.opentelemetry.io/otel/baggage"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/internal/baggage"
-)
-
-// ContextWithBaggage returns a copy of parent with baggage.
-func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
- // Delegate so any hooks for the OpenTracing bridge are handled.
- return baggage.ContextWithList(parent, b.list)
-}
-
-// ContextWithoutBaggage returns a copy of parent with no baggage.
-func ContextWithoutBaggage(parent context.Context) context.Context {
- // Delegate so any hooks for the OpenTracing bridge are handled.
- return baggage.ContextWithList(parent, nil)
-}
-
-// FromContext returns the baggage contained in ctx.
-func FromContext(ctx context.Context) Baggage {
- // Delegate so any hooks for the OpenTracing bridge are handled.
- return Baggage{list: baggage.ListFromContext(ctx)}
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
deleted file mode 100644
index 4545100..0000000
--- a/vendor/go.opentelemetry.io/otel/baggage/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package baggage provides functionality for storing and retrieving
-baggage items in Go context. For propagating the baggage, see the
-go.opentelemetry.io/otel/propagation package.
-*/
-package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
deleted file mode 100644
index 587ebae..0000000
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package codes // import "go.opentelemetry.io/otel/codes"
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
-)
-
-const (
- // Unset is the default status code.
- Unset Code = 0
-
- // Error indicates the operation contains an error.
- //
- // NOTE: The error code in OTLP is 2.
- // The value of this enum is only relevant to the internals
- // of the Go SDK.
- Error Code = 1
-
- // Ok indicates operation has been validated by an Application developers
- // or Operator to have completed successfully, or contain no error.
- //
- // NOTE: The Ok code in OTLP is 1.
- // The value of this enum is only relevant to the internals
- // of the Go SDK.
- Ok Code = 2
-
- maxCode = 3
-)
-
-// Code is an 32-bit representation of a status state.
-type Code uint32
-
-var codeToStr = map[Code]string{
- Unset: "Unset",
- Error: "Error",
- Ok: "Ok",
-}
-
-var strToCode = map[string]Code{
- `"Unset"`: Unset,
- `"Error"`: Error,
- `"Ok"`: Ok,
-}
-
-// String returns the Code as a string.
-func (c Code) String() string {
- return codeToStr[c]
-}
-
-// UnmarshalJSON unmarshals b into the Code.
-//
-// This is based on the functionality in the gRPC codes package:
-// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
-func (c *Code) UnmarshalJSON(b []byte) error {
- // From json.Unmarshaler: By convention, to approximate the behavior of
- // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
- // a no-op.
- if string(b) == "null" {
- return nil
- }
- if c == nil {
- return fmt.Errorf("nil receiver passed to UnmarshalJSON")
- }
-
- var x interface{}
- if err := json.Unmarshal(b, &x); err != nil {
- return err
- }
- switch x.(type) {
- case string:
- if jc, ok := strToCode[string(b)]; ok {
- *c = jc
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
- case float64:
- if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
- if ci >= maxCode {
- return fmt.Errorf("invalid code: %q", ci)
- }
-
- *c = Code(ci)
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
- default:
- return fmt.Errorf("invalid code: %q", string(b))
- }
-}
-
-// MarshalJSON returns c as the JSON encoding of c.
-func (c *Code) MarshalJSON() ([]byte, error) {
- if c == nil {
- return []byte("null"), nil
- }
- str, ok := codeToStr[*c]
- if !ok {
- return nil, fmt.Errorf("invalid code: %d", *c)
- }
- return []byte(fmt.Sprintf("%q", str)), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
deleted file mode 100644
index 4e328fb..0000000
--- a/vendor/go.opentelemetry.io/otel/codes/doc.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package codes defines the canonical error codes used by OpenTelemetry.
-
-It conforms to [the OpenTelemetry
-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
-*/
-package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
deleted file mode 100644
index daa36c8..0000000
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package otel provides global access to the OpenTelemetry API. The subpackages of
-the otel package provide an implementation of the OpenTelemetry API.
-
-The provided API is used to instrument code and measure data about that code's
-performance and operation. The measured data, by default, is not processed or
-transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
-default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
-exporters are used to process and transport this data.
-
-To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
-
-To read more about tracing, see go.opentelemetry.io/otel/trace.
-
-To read more about metrics, see go.opentelemetry.io/otel/metric.
-
-To read more about propagation, see go.opentelemetry.io/otel/propagation and
-go.opentelemetry.io/otel/baggage.
-*/
-package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
deleted file mode 100644
index 72fad85..0000000
--- a/vendor/go.opentelemetry.io/otel/error_handler.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-// ErrorHandler handles irremediable events.
-type ErrorHandler interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Handle handles any error deemed irremediable by an OpenTelemetry
- // component.
- Handle(error)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// ErrorHandlerFunc is a convenience adapter to allow the use of a function
-// as an ErrorHandler.
-type ErrorHandlerFunc func(error)
-
-var _ ErrorHandler = ErrorHandlerFunc(nil)
-
-// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
-func (f ErrorHandlerFunc) Handle(err error) {
- f(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
deleted file mode 100644
index 9a58fb1..0000000
--- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-top_dir='.'
-if [[ $# -gt 0 ]]; then
- top_dir="${1}"
-fi
-
-p=$(pwd)
-mod_dirs=()
-
-# Note `mapfile` does not exist in older bash versions:
-# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash
-
-while IFS= read -r line; do
- mod_dirs+=("$line")
-done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-for mod_dir in "${mod_dirs[@]}"; do
- cd "${mod_dir}"
-
- while IFS= read -r line; do
- echo ".${line#${p}}"
- done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
- cd "${p}"
-done
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
deleted file mode 100644
index 4115fe3..0000000
--- a/vendor/go.opentelemetry.io/otel/handler.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
-)
-
-var (
- // Compile-time check global.ErrDelegator implements ErrorHandler.
- _ ErrorHandler = (*global.ErrDelegator)(nil)
- // Compile-time check global.ErrLogger implements ErrorHandler.
- _ ErrorHandler = (*global.ErrLogger)(nil)
-)
-
-// GetErrorHandler returns the global ErrorHandler instance.
-//
-// The default ErrorHandler instance returned will log all errors to STDERR
-// until an override ErrorHandler is set with SetErrorHandler. All
-// ErrorHandler returned prior to this will automatically forward errors to
-// the set instance instead of logging.
-//
-// Subsequent calls to SetErrorHandler after the first will not forward errors
-// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
-
-// SetErrorHandler sets the global ErrorHandler to h.
-//
-// The first time this is called all ErrorHandler previously returned from
-// GetErrorHandler will send errors to h instead of the default logging
-// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
-// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
-
-// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) { global.Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
deleted file mode 100644
index 622c3ee..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package attribute provide several helper functions for some commonly used
-logic of processing attributes.
-*/
-package attribute // import "go.opentelemetry.io/otel/internal/attribute"
-
-import (
- "reflect"
-)
-
-// BoolSliceValue converts a bool slice into an array with same elements as slice.
-func BoolSliceValue(v []bool) interface{} {
- var zero bool
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v)
- return cp.Elem().Interface()
-}
-
-// Int64SliceValue converts an int64 slice into an array with same elements as slice.
-func Int64SliceValue(v []int64) interface{} {
- var zero int64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v)
- return cp.Elem().Interface()
-}
-
-// Float64SliceValue converts a float64 slice into an array with same elements as slice.
-func Float64SliceValue(v []float64) interface{} {
- var zero float64
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v)
- return cp.Elem().Interface()
-}
-
-// StringSliceValue converts a string slice into an array with same elements as slice.
-func StringSliceValue(v []string) interface{} {
- var zero string
- cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero)))
- copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v)
- return cp.Elem().Interface()
-}
-
-// AsBoolSlice converts a bool array into a slice into with same elements as array.
-func AsBoolSlice(v interface{}) []bool {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- var zero bool
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]bool)
-}
-
-// AsInt64Slice converts an int64 array into a slice into with same elements as array.
-func AsInt64Slice(v interface{}) []int64 {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- var zero int64
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]int64)
-}
-
-// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
-func AsFloat64Slice(v interface{}) []float64 {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- var zero float64
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]float64)
-}
-
-// AsStringSlice converts a string array into a slice into with same elements as array.
-func AsStringSlice(v interface{}) []string {
- rv := reflect.ValueOf(v)
- if rv.Type().Kind() != reflect.Array {
- return nil
- }
- var zero string
- correctLen := rv.Len()
- correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero))
- cpy := reflect.New(correctType)
- _ = reflect.Copy(cpy.Elem(), rv)
- return cpy.Elem().Slice(0, correctLen).Interface().([]string)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
deleted file mode 100644
index b96e540..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package baggage provides base types and functionality to store and retrieve
-baggage in Go context. This package exists because the OpenTracing bridge to
-OpenTelemetry needs to synchronize state whenever baggage for a context is
-modified and that context contains an OpenTracing span. If it were not for
-this need this package would not need to exist and the
-`go.opentelemetry.io/otel/baggage` package would be the singular place where
-W3C baggage is handled.
-*/
-package baggage // import "go.opentelemetry.io/otel/internal/baggage"
-
-// List is the collection of baggage members. The W3C allows for duplicates,
-// but OpenTelemetry does not, therefore, this is represented as a map.
-type List map[string]Item
-
-// Item is the value and metadata properties part of a list-member.
-type Item struct {
- Value string
- Properties []Property
-}
-
-// Property is a metadata entry for a list-member.
-type Property struct {
- Key, Value string
-
- // HasValue indicates if a zero-value value means the property does not
- // have a value or if it was the zero-value.
- HasValue bool
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
deleted file mode 100644
index 4469700..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package baggage // import "go.opentelemetry.io/otel/internal/baggage"
-
-import "context"
-
-type baggageContextKeyType int
-
-const baggageKey baggageContextKeyType = iota
-
-// SetHookFunc is a callback called when storing baggage in the context.
-type SetHookFunc func(context.Context, List) context.Context
-
-// GetHookFunc is a callback called when getting baggage from the context.
-type GetHookFunc func(context.Context, List) List
-
-type baggageState struct {
- list List
-
- setHook SetHookFunc
- getHook GetHookFunc
-}
-
-// ContextWithSetHook returns a copy of parent with hook configured to be
-// invoked every time ContextWithBaggage is called.
-//
-// Passing nil SetHookFunc creates a context with no set hook to call.
-func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
- var s baggageState
- if v, ok := parent.Value(baggageKey).(baggageState); ok {
- s = v
- }
-
- s.setHook = hook
- return context.WithValue(parent, baggageKey, s)
-}
-
-// ContextWithGetHook returns a copy of parent with hook configured to be
-// invoked every time FromContext is called.
-//
-// Passing nil GetHookFunc creates a context with no get hook to call.
-func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
- var s baggageState
- if v, ok := parent.Value(baggageKey).(baggageState); ok {
- s = v
- }
-
- s.getHook = hook
- return context.WithValue(parent, baggageKey, s)
-}
-
-// ContextWithList returns a copy of parent with baggage. Passing nil list
-// returns a context without any baggage.
-func ContextWithList(parent context.Context, list List) context.Context {
- var s baggageState
- if v, ok := parent.Value(baggageKey).(baggageState); ok {
- s = v
- }
-
- s.list = list
- ctx := context.WithValue(parent, baggageKey, s)
- if s.setHook != nil {
- ctx = s.setHook(ctx, list)
- }
-
- return ctx
-}
-
-// ListFromContext returns the baggage contained in ctx.
-func ListFromContext(ctx context.Context) List {
- switch v := ctx.Value(baggageKey).(type) {
- case baggageState:
- if v.getHook != nil {
- return v.getHook(ctx, v.list)
- }
- return v.list
- default:
- return nil
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go
deleted file mode 100644
index f532f07..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/gen.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/internal"
-
-//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
-//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
-//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
-
-//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
-//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
-//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
-//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
-//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
-//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
deleted file mode 100644
index 5e9b830..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "log"
- "os"
- "sync/atomic"
-)
-
-var (
- // GlobalErrorHandler provides an ErrorHandler that can be used
- // throughout an OpenTelemetry instrumented project. When a user
- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
- // `Handle` and will be delegated to the registered ErrorHandler.
- GlobalErrorHandler = defaultErrorHandler()
-
- // Compile-time check that delegator implements ErrorHandler.
- _ ErrorHandler = (*ErrDelegator)(nil)
- // Compile-time check that errLogger implements ErrorHandler.
- _ ErrorHandler = (*ErrLogger)(nil)
-)
-
-// ErrorHandler handles irremediable events.
-type ErrorHandler interface {
- // Handle handles any error deemed irremediable by an OpenTelemetry
- // component.
- Handle(error)
-}
-
-type ErrDelegator struct {
- delegate atomic.Pointer[ErrorHandler]
-}
-
-func (d *ErrDelegator) Handle(err error) {
- d.getDelegate().Handle(err)
-}
-
-func (d *ErrDelegator) getDelegate() ErrorHandler {
- return *d.delegate.Load()
-}
-
-// setDelegate sets the ErrorHandler delegate.
-func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
- d.delegate.Store(&eh)
-}
-
-func defaultErrorHandler() *ErrDelegator {
- d := &ErrDelegator{}
- d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
- return d
-}
-
-// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
-type ErrLogger struct {
- l *log.Logger
-}
-
-// Handle logs err if no delegate is set, otherwise it is delegated.
-func (h *ErrLogger) Handle(err error) {
- h.l.Print(err)
-}
-
-// GetErrorHandler returns the global ErrorHandler instance.
-//
-// The default ErrorHandler instance returned will log all errors to STDERR
-// until an override ErrorHandler is set with SetErrorHandler. All
-// ErrorHandler returned prior to this will automatically forward errors to
-// the set instance instead of logging.
-//
-// Subsequent calls to SetErrorHandler after the first will not forward errors
-// to the new ErrorHandler for prior returned instances.
-func GetErrorHandler() ErrorHandler {
- return GlobalErrorHandler
-}
-
-// SetErrorHandler sets the global ErrorHandler to h.
-//
-// The first time this is called all ErrorHandler previously returned from
-// GetErrorHandler will send errors to h instead of the default logging
-// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
-// delegate errors to h.
-func SetErrorHandler(h ErrorHandler) {
- GlobalErrorHandler.setDelegate(h)
-}
-
-// Handle is a convenience function for ErrorHandler().Handle(err).
-func Handle(err error) {
- GetErrorHandler().Handle(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
deleted file mode 100644
index ebb13c2..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
+++ /dev/null
@@ -1,371 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "context"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// unwrapper unwraps to return the underlying instrument implementation.
-type unwrapper interface {
- Unwrap() metric.Observable
-}
-
-type afCounter struct {
- embedded.Float64ObservableCounter
- metric.Float64Observable
-
- name string
- opts []metric.Float64ObservableCounterOption
-
- delegate atomic.Value // metric.Float64ObservableCounter
-}
-
-var (
- _ unwrapper = (*afCounter)(nil)
- _ metric.Float64ObservableCounter = (*afCounter)(nil)
-)
-
-func (i *afCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afCounter) Unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Float64ObservableCounter)
- }
- return nil
-}
-
-type afUpDownCounter struct {
- embedded.Float64ObservableUpDownCounter
- metric.Float64Observable
-
- name string
- opts []metric.Float64ObservableUpDownCounterOption
-
- delegate atomic.Value // metric.Float64ObservableUpDownCounter
-}
-
-var (
- _ unwrapper = (*afUpDownCounter)(nil)
- _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
-)
-
-func (i *afUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afUpDownCounter) Unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Float64ObservableUpDownCounter)
- }
- return nil
-}
-
-type afGauge struct {
- embedded.Float64ObservableGauge
- metric.Float64Observable
-
- name string
- opts []metric.Float64ObservableGaugeOption
-
- delegate atomic.Value // metric.Float64ObservableGauge
-}
-
-var (
- _ unwrapper = (*afGauge)(nil)
- _ metric.Float64ObservableGauge = (*afGauge)(nil)
-)
-
-func (i *afGauge) setDelegate(m metric.Meter) {
- ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *afGauge) Unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Float64ObservableGauge)
- }
- return nil
-}
-
-type aiCounter struct {
- embedded.Int64ObservableCounter
- metric.Int64Observable
-
- name string
- opts []metric.Int64ObservableCounterOption
-
- delegate atomic.Value // metric.Int64ObservableCounter
-}
-
-var (
- _ unwrapper = (*aiCounter)(nil)
- _ metric.Int64ObservableCounter = (*aiCounter)(nil)
-)
-
-func (i *aiCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiCounter) Unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Int64ObservableCounter)
- }
- return nil
-}
-
-type aiUpDownCounter struct {
- embedded.Int64ObservableUpDownCounter
- metric.Int64Observable
-
- name string
- opts []metric.Int64ObservableUpDownCounterOption
-
- delegate atomic.Value // metric.Int64ObservableUpDownCounter
-}
-
-var (
- _ unwrapper = (*aiUpDownCounter)(nil)
- _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
-)
-
-func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiUpDownCounter) Unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Int64ObservableUpDownCounter)
- }
- return nil
-}
-
-type aiGauge struct {
- embedded.Int64ObservableGauge
- metric.Int64Observable
-
- name string
- opts []metric.Int64ObservableGaugeOption
-
- delegate atomic.Value // metric.Int64ObservableGauge
-}
-
-var (
- _ unwrapper = (*aiGauge)(nil)
- _ metric.Int64ObservableGauge = (*aiGauge)(nil)
-)
-
-func (i *aiGauge) setDelegate(m metric.Meter) {
- ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *aiGauge) Unwrap() metric.Observable {
- if ctr := i.delegate.Load(); ctr != nil {
- return ctr.(metric.Int64ObservableGauge)
- }
- return nil
-}
-
-// Sync Instruments.
-type sfCounter struct {
- embedded.Float64Counter
-
- name string
- opts []metric.Float64CounterOption
-
- delegate atomic.Value // metric.Float64Counter
-}
-
-var _ metric.Float64Counter = (*sfCounter)(nil)
-
-func (i *sfCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64Counter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64Counter).Add(ctx, incr, opts...)
- }
-}
-
-type sfUpDownCounter struct {
- embedded.Float64UpDownCounter
-
- name string
- opts []metric.Float64UpDownCounterOption
-
- delegate atomic.Value // metric.Float64UpDownCounter
-}
-
-var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
-
-func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...)
- }
-}
-
-type sfHistogram struct {
- embedded.Float64Histogram
-
- name string
- opts []metric.Float64HistogramOption
-
- delegate atomic.Value // metric.Float64Histogram
-}
-
-var _ metric.Float64Histogram = (*sfHistogram)(nil)
-
-func (i *sfHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.Float64Histogram(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Float64Histogram).Record(ctx, x, opts...)
- }
-}
-
-type siCounter struct {
- embedded.Int64Counter
-
- name string
- opts []metric.Int64CounterOption
-
- delegate atomic.Value // metric.Int64Counter
-}
-
-var _ metric.Int64Counter = (*siCounter)(nil)
-
-func (i *siCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64Counter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64Counter).Add(ctx, x, opts...)
- }
-}
-
-type siUpDownCounter struct {
- embedded.Int64UpDownCounter
-
- name string
- opts []metric.Int64UpDownCounterOption
-
- delegate atomic.Value // metric.Int64UpDownCounter
-}
-
-var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
-
-func (i *siUpDownCounter) setDelegate(m metric.Meter) {
- ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...)
- }
-}
-
-type siHistogram struct {
- embedded.Int64Histogram
-
- name string
- opts []metric.Int64HistogramOption
-
- delegate atomic.Value // metric.Int64Histogram
-}
-
-var _ metric.Int64Histogram = (*siHistogram)(nil)
-
-func (i *siHistogram) setDelegate(m metric.Meter) {
- ctr, err := m.Int64Histogram(i.name, i.opts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- return
- }
- i.delegate.Store(ctr)
-}
-
-func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
- if ctr := i.delegate.Load(); ctr != nil {
- ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
deleted file mode 100644
index c6f305a..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "log"
- "os"
- "sync/atomic"
-
- "github.com/go-logr/logr"
- "github.com/go-logr/stdr"
-)
-
-// globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
-//
-// The default logger uses stdr which is backed by the standard `log.Logger`
-// interface. This logger will only show messages at the Error Level.
-var globalLogger atomic.Pointer[logr.Logger]
-
-func init() {
- SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
-}
-
-// SetLogger overrides the globalLogger with l.
-//
-// To see Warn messages use a logger with `l.V(1).Enabled() == true`
-// To see Info messages use a logger with `l.V(4).Enabled() == true`
-// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
-func SetLogger(l logr.Logger) {
- globalLogger.Store(&l)
-}
-
-func getLogger() logr.Logger {
- return *globalLogger.Load()
-}
-
-// Info prints messages about the general state of the API or SDK.
-// This should usually be less than 5 messages a minute.
-func Info(msg string, keysAndValues ...interface{}) {
- getLogger().V(4).Info(msg, keysAndValues...)
-}
-
-// Error prints messages about exceptional states of the API or SDK.
-func Error(err error, msg string, keysAndValues ...interface{}) {
- getLogger().Error(err, msg, keysAndValues...)
-}
-
-// Debug prints messages about all internal changes in the API or SDK.
-func Debug(msg string, keysAndValues ...interface{}) {
- getLogger().V(8).Info(msg, keysAndValues...)
-}
-
-// Warn prints messages about warnings in the API or SDK.
-// Not an error but is likely more important than an informational event.
-func Warn(msg string, keysAndValues ...interface{}) {
- getLogger().V(1).Info(msg, keysAndValues...)
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
deleted file mode 100644
index 0097db4..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "container/list"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// meterProvider is a placeholder for a configured SDK MeterProvider.
-//
-// All MeterProvider functionality is forwarded to a delegate once
-// configured.
-type meterProvider struct {
- embedded.MeterProvider
-
- mtx sync.Mutex
- meters map[il]*meter
-
- delegate metric.MeterProvider
-}
-
-// setDelegate configures p to delegate all MeterProvider functionality to
-// provider.
-//
-// All Meters provided prior to this function call are switched out to be
-// Meters provided by provider. All instruments and callbacks are recreated and
-// delegated.
-//
-// It is guaranteed by the caller that this happens only once.
-func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- p.delegate = provider
-
- if len(p.meters) == 0 {
- return
- }
-
- for _, meter := range p.meters {
- meter.setDelegate(provider)
- }
-
- p.meters = nil
-}
-
-// Meter implements MeterProvider.
-func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Meter(name, opts...)
- }
-
- // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
-
- c := metric.NewMeterConfig(opts...)
- key := il{
- name: name,
- version: c.InstrumentationVersion(),
- }
-
- if p.meters == nil {
- p.meters = make(map[il]*meter)
- }
-
- if val, ok := p.meters[key]; ok {
- return val
- }
-
- t := &meter{name: name, opts: opts}
- p.meters[key] = t
- return t
-}
-
-// meter is a placeholder for a metric.Meter.
-//
-// All Meter functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopMeter.
-type meter struct {
- embedded.Meter
-
- name string
- opts []metric.MeterOption
-
- mtx sync.Mutex
- instruments []delegatedInstrument
-
- registry list.List
-
- delegate atomic.Value // metric.Meter
-}
-
-type delegatedInstrument interface {
- setDelegate(metric.Meter)
-}
-
-// setDelegate configures m to delegate all Meter functionality to Meters
-// created by provider.
-//
-// All subsequent calls to the Meter methods will be passed to the delegate.
-//
-// It is guaranteed by the caller that this happens only once.
-func (m *meter) setDelegate(provider metric.MeterProvider) {
- meter := provider.Meter(m.name, m.opts...)
- m.delegate.Store(meter)
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for _, inst := range m.instruments {
- inst.setDelegate(meter)
- }
-
- for e := m.registry.Front(); e != nil; e = e.Next() {
- r := e.Value.(*registration)
- r.setDelegate(meter)
- m.registry.Remove(e)
- }
-
- m.instruments = nil
- m.registry.Init()
-}
-
-func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64Counter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &siCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64UpDownCounter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &siUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64Histogram(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &siHistogram{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64ObservableCounter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &aiCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64ObservableUpDownCounter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &aiUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Int64ObservableGauge(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &aiGauge{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64Counter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &sfCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64UpDownCounter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &sfUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64Histogram(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &sfHistogram{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64ObservableCounter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &afCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64ObservableUpDownCounter(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &afUpDownCounter{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- return del.Float64ObservableGauge(name, options...)
- }
- m.mtx.Lock()
- defer m.mtx.Unlock()
- i := &afGauge{name: name, opts: options}
- m.instruments = append(m.instruments, i)
- return i, nil
-}
-
-// RegisterCallback captures the function that will be called during Collect.
-func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
- if del, ok := m.delegate.Load().(metric.Meter); ok {
- insts = unwrapInstruments(insts)
- return del.RegisterCallback(f, insts...)
- }
-
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- reg := ®istration{instruments: insts, function: f}
- e := m.registry.PushBack(reg)
- reg.unreg = func() error {
- m.mtx.Lock()
- _ = m.registry.Remove(e)
- m.mtx.Unlock()
- return nil
- }
- return reg, nil
-}
-
-type wrapped interface {
- unwrap() metric.Observable
-}
-
-func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
- out := make([]metric.Observable, 0, len(instruments))
-
- for _, inst := range instruments {
- if in, ok := inst.(wrapped); ok {
- out = append(out, in.unwrap())
- } else {
- out = append(out, inst)
- }
- }
-
- return out
-}
-
-type registration struct {
- embedded.Registration
-
- instruments []metric.Observable
- function metric.Callback
-
- unreg func() error
- unregMu sync.Mutex
-}
-
-func (c *registration) setDelegate(m metric.Meter) {
- insts := unwrapInstruments(c.instruments)
-
- c.unregMu.Lock()
- defer c.unregMu.Unlock()
-
- if c.unreg == nil {
- // Unregister already called.
- return
- }
-
- reg, err := m.RegisterCallback(c.function, insts...)
- if err != nil {
- GetErrorHandler().Handle(err)
- }
-
- c.unreg = reg.Unregister
-}
-
-func (c *registration) Unregister() error {
- c.unregMu.Lock()
- defer c.unregMu.Unlock()
- if c.unreg == nil {
- // Unregister already called.
- return nil
- }
-
- var err error
- err, c.unreg = c.unreg(), nil
- return err
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
deleted file mode 100644
index 06bac35..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel/propagation"
-)
-
-// textMapPropagator is a default TextMapPropagator that delegates calls to a
-// registered delegate if one is set, otherwise it defaults to delegating the
-// calls to a the default no-op propagation.TextMapPropagator.
-type textMapPropagator struct {
- mtx sync.Mutex
- once sync.Once
- delegate propagation.TextMapPropagator
- noop propagation.TextMapPropagator
-}
-
-// Compile-time guarantee that textMapPropagator implements the
-// propagation.TextMapPropagator interface.
-var _ propagation.TextMapPropagator = (*textMapPropagator)(nil)
-
-func newTextMapPropagator() *textMapPropagator {
- return &textMapPropagator{
- noop: propagation.NewCompositeTextMapPropagator(),
- }
-}
-
-// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are
-// forwarded to. Delegation can only be performed once, all subsequent calls
-// perform no delegation.
-func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) {
- if delegate == nil {
- return
- }
-
- p.mtx.Lock()
- p.once.Do(func() { p.delegate = delegate })
- p.mtx.Unlock()
-}
-
-// effectiveDelegate returns the current delegate of p if one is set,
-// otherwise the default noop TextMapPropagator is returned. This method
-// can be called concurrently.
-func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator {
- p.mtx.Lock()
- defer p.mtx.Unlock()
- if p.delegate != nil {
- return p.delegate
- }
- return p.noop
-}
-
-// Inject set cross-cutting concerns from the Context into the carrier.
-func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
- p.effectiveDelegate().Inject(ctx, carrier)
-}
-
-// Extract reads cross-cutting concerns from the carrier into a Context.
-func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
- return p.effectiveDelegate().Extract(ctx, carrier)
-}
-
-// Fields returns the keys whose values are set with Inject.
-func (p *textMapPropagator) Fields() []string {
- return p.effectiveDelegate().Fields()
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
deleted file mode 100644
index 7985005..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/state.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-import (
- "errors"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/metric"
- "go.opentelemetry.io/otel/propagation"
- "go.opentelemetry.io/otel/trace"
-)
-
-type (
- tracerProviderHolder struct {
- tp trace.TracerProvider
- }
-
- propagatorsHolder struct {
- tm propagation.TextMapPropagator
- }
-
- meterProviderHolder struct {
- mp metric.MeterProvider
- }
-)
-
-var (
- globalTracer = defaultTracerValue()
- globalPropagators = defaultPropagatorsValue()
- globalMeterProvider = defaultMeterProvider()
-
- delegateTraceOnce sync.Once
- delegateTextMapPropagatorOnce sync.Once
- delegateMeterOnce sync.Once
-)
-
-// TracerProvider is the internal implementation for global.TracerProvider.
-func TracerProvider() trace.TracerProvider {
- return globalTracer.Load().(tracerProviderHolder).tp
-}
-
-// SetTracerProvider is the internal implementation for global.SetTracerProvider.
-func SetTracerProvider(tp trace.TracerProvider) {
- current := TracerProvider()
-
- if _, cOk := current.(*tracerProvider); cOk {
- if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
- // Do not assign the default delegating TracerProvider to delegate
- // to itself.
- Error(
- errors.New("no delegate configured in tracer provider"),
- "Setting tracer provider to it's current value. No delegate will be configured",
- )
- return
- }
- }
-
- delegateTraceOnce.Do(func() {
- if def, ok := current.(*tracerProvider); ok {
- def.setDelegate(tp)
- }
- })
- globalTracer.Store(tracerProviderHolder{tp: tp})
-}
-
-// TextMapPropagator is the internal implementation for global.TextMapPropagator.
-func TextMapPropagator() propagation.TextMapPropagator {
- return globalPropagators.Load().(propagatorsHolder).tm
-}
-
-// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
-func SetTextMapPropagator(p propagation.TextMapPropagator) {
- current := TextMapPropagator()
-
- if _, cOk := current.(*textMapPropagator); cOk {
- if _, pOk := p.(*textMapPropagator); pOk && current == p {
- // Do not assign the default delegating TextMapPropagator to
- // delegate to itself.
- Error(
- errors.New("no delegate configured in text map propagator"),
- "Setting text map propagator to it's current value. No delegate will be configured",
- )
- return
- }
- }
-
- // For the textMapPropagator already returned by TextMapPropagator
- // delegate to p.
- delegateTextMapPropagatorOnce.Do(func() {
- if def, ok := current.(*textMapPropagator); ok {
- def.SetDelegate(p)
- }
- })
- // Return p when subsequent calls to TextMapPropagator are made.
- globalPropagators.Store(propagatorsHolder{tm: p})
-}
-
-// MeterProvider is the internal implementation for global.MeterProvider.
-func MeterProvider() metric.MeterProvider {
- return globalMeterProvider.Load().(meterProviderHolder).mp
-}
-
-// SetMeterProvider is the internal implementation for global.SetMeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
- current := MeterProvider()
- if _, cOk := current.(*meterProvider); cOk {
- if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
- // Do not assign the default delegating MeterProvider to delegate
- // to itself.
- Error(
- errors.New("no delegate configured in meter provider"),
- "Setting meter provider to it's current value. No delegate will be configured",
- )
- return
- }
- }
-
- delegateMeterOnce.Do(func() {
- if def, ok := current.(*meterProvider); ok {
- def.setDelegate(mp)
- }
- })
- globalMeterProvider.Store(meterProviderHolder{mp: mp})
-}
-
-func defaultTracerValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(tracerProviderHolder{tp: &tracerProvider{}})
- return v
-}
-
-func defaultPropagatorsValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(propagatorsHolder{tm: newTextMapPropagator()})
- return v
-}
-
-func defaultMeterProvider() *atomic.Value {
- v := &atomic.Value{}
- v.Store(meterProviderHolder{mp: &meterProvider{}})
- return v
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
deleted file mode 100644
index 3f61ec1..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global // import "go.opentelemetry.io/otel/internal/global"
-
-/*
-This file contains the forwarding implementation of the TracerProvider used as
-the default global instance. Prior to initialization of an SDK, Tracers
-returned by the global TracerProvider will provide no-op functionality. This
-means that all Span created prior to initialization are no-op Spans.
-
-Once an SDK has been initialized, all provided no-op Tracers are swapped for
-Tracers provided by the SDK defined TracerProvider. However, any Span started
-prior to this initialization does not change its behavior. Meaning, the Span
-remains a no-op Span.
-
-The implementation to track and swap Tracers locks all new Tracer creation
-until the swap is complete. This assumes that this operation is not
-performance-critical. If that assumption is incorrect, be sure to configure an
-SDK prior to any Tracer creation.
-*/
-
-import (
- "context"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// tracerProvider is a placeholder for a configured SDK TracerProvider.
-//
-// All TracerProvider functionality is forwarded to a delegate once
-// configured.
-type tracerProvider struct {
- embedded.TracerProvider
-
- mtx sync.Mutex
- tracers map[il]*tracer
- delegate trace.TracerProvider
-}
-
-// Compile-time guarantee that tracerProvider implements the TracerProvider
-// interface.
-var _ trace.TracerProvider = &tracerProvider{}
-
-// setDelegate configures p to delegate all TracerProvider functionality to
-// provider.
-//
-// All Tracers provided prior to this function call are switched out to be
-// Tracers provided by provider.
-//
-// It is guaranteed by the caller that this happens only once.
-func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- p.delegate = provider
-
- if len(p.tracers) == 0 {
- return
- }
-
- for _, t := range p.tracers {
- t.setDelegate(provider)
- }
-
- p.tracers = nil
-}
-
-// Tracer implements TracerProvider.
-func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Tracer(name, opts...)
- }
-
- // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
-
- c := trace.NewTracerConfig(opts...)
- key := il{
- name: name,
- version: c.InstrumentationVersion(),
- }
-
- if p.tracers == nil {
- p.tracers = make(map[il]*tracer)
- }
-
- if val, ok := p.tracers[key]; ok {
- return val
- }
-
- t := &tracer{name: name, opts: opts, provider: p}
- p.tracers[key] = t
- return t
-}
-
-type il struct {
- name string
- version string
-}
-
-// tracer is a placeholder for a trace.Tracer.
-//
-// All Tracer functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopTracer.
-type tracer struct {
- embedded.Tracer
-
- name string
- opts []trace.TracerOption
- provider *tracerProvider
-
- delegate atomic.Value
-}
-
-// Compile-time guarantee that tracer implements the trace.Tracer interface.
-var _ trace.Tracer = &tracer{}
-
-// setDelegate configures t to delegate all Tracer functionality to Tracers
-// created by provider.
-//
-// All subsequent calls to the Tracer methods will be passed to the delegate.
-//
-// It is guaranteed by the caller that this happens only once.
-func (t *tracer) setDelegate(provider trace.TracerProvider) {
- t.delegate.Store(provider.Tracer(t.name, t.opts...))
-}
-
-// Start implements trace.Tracer by forwarding the call to t.delegate if
-// set, otherwise it forwards the call to a NoopTracer.
-func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
- delegate := t.delegate.Load()
- if delegate != nil {
- return delegate.(trace.Tracer).Start(ctx, name, opts...)
- }
-
- s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
- ctx = trace.ContextWithSpan(ctx, s)
- return ctx, s
-}
-
-// nonRecordingSpan is a minimal implementation of a Span that wraps a
-// SpanContext. It performs no operations other than to return the wrapped
-// SpanContext.
-type nonRecordingSpan struct {
- embedded.Span
-
- sc trace.SpanContext
- tracer *tracer
-}
-
-var _ trace.Span = nonRecordingSpan{}
-
-// SpanContext returns the wrapped SpanContext.
-func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
-
-// IsRecording always returns false.
-func (nonRecordingSpan) IsRecording() bool { return false }
-
-// SetStatus does nothing.
-func (nonRecordingSpan) SetStatus(codes.Code, string) {}
-
-// SetError does nothing.
-func (nonRecordingSpan) SetError(bool) {}
-
-// SetAttributes does nothing.
-func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
-
-// End does nothing.
-func (nonRecordingSpan) End(...trace.SpanEndOption) {}
-
-// RecordError does nothing.
-func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
-
-// AddEvent does nothing.
-func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
-
-// SetName does nothing.
-func (nonRecordingSpan) SetName(string) {}
-
-func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
deleted file mode 100644
index e07e794..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/internal"
-
-import (
- "math"
- "unsafe"
-)
-
-func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
- if b {
- return 1
- }
- return 0
-}
-
-func RawToBool(r uint64) bool {
- return r != 0
-}
-
-func Int64ToRaw(i int64) uint64 {
- return uint64(i)
-}
-
-func RawToInt64(r uint64) int64 {
- return int64(r)
-}
-
-func Float64ToRaw(f float64) uint64 {
- return math.Float64bits(f)
-}
-
-func RawToFloat64(r uint64) float64 {
- return math.Float64frombits(r)
-}
-
-func RawPtrToFloat64Ptr(r *uint64) *float64 {
- return (*float64)(unsafe.Pointer(r))
-}
-
-func RawPtrToInt64Ptr(r *uint64) *int64 {
- return (*int64)(unsafe.Pointer(r))
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
deleted file mode 100644
index c4f8acd..0000000
--- a/vendor/go.opentelemetry.io/otel/internal_logging.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "github.com/go-logr/logr"
-
- "go.opentelemetry.io/otel/internal/global"
-)
-
-// SetLogger configures the logger used internally to opentelemetry.
-func SetLogger(logger logr.Logger) {
- global.SetLogger(logger)
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
deleted file mode 100644
index f955171..0000000
--- a/vendor/go.opentelemetry.io/otel/metric.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/metric"
-)
-
-// Meter returns a Meter from the global MeterProvider. The name must be the
-// name of the library providing instrumentation. This name may be the same as
-// the instrumented code only if that code provides built-in instrumentation.
-// If the name is empty, then a implementation defined default name will be
-// used instead.
-//
-// If this is called before a global MeterProvider is registered the returned
-// Meter will be a No-op implementation of a Meter. When a global MeterProvider
-// is registered for the first time, the returned Meter, and all the
-// instruments it has created or will create, are recreated automatically from
-// the new MeterProvider.
-//
-// This is short for GetMeterProvider().Meter(name).
-func Meter(name string, opts ...metric.MeterOption) metric.Meter {
- return GetMeterProvider().Meter(name, opts...)
-}
-
-// GetMeterProvider returns the registered global meter provider.
-//
-// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
-// implementation is returned. When a global GetMeterProvider is registered for
-// the first time, the returned GetMeterProvider, and all the Meters it has
-// created or will create, are recreated automatically from the new
-// GetMeterProvider.
-func GetMeterProvider() metric.MeterProvider {
- return global.MeterProvider()
-}
-
-// SetMeterProvider registers mp as the global MeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
- global.SetMeterProvider(mp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
deleted file mode 100644
index 072baa8..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Float64Observable describes a set of instruments used asynchronously to
-// record float64 measurements once per collection cycle. Observations of
-// these instruments are only made within a callback.
-//
-// Warning: Methods may be added to this interface in minor releases.
-type Float64Observable interface {
- Observable
-
- float64Observable()
-}
-
-// Float64ObservableCounter is an instrument used to asynchronously record
-// increasing float64 measurements once per collection cycle. Observations are
-// only made within a callback for this instrument. The value observed is
-// assumed the to be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for
-// unimplemented methods.
-type Float64ObservableCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64ObservableCounter
-
- Float64Observable
-}
-
-// Float64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Float64ObservableCounterConfig struct {
- description string
- unit string
- callbacks []Float64Callback
-}
-
-// NewFloat64ObservableCounterConfig returns a new
-// [Float64ObservableCounterConfig] with all opts applied.
-func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig {
- var config Float64ObservableCounterConfig
- for _, o := range opts {
- config = o.applyFloat64ObservableCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64ObservableCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64ObservableCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
- return c.callbacks
-}
-
-// Float64ObservableCounterOption applies options to a
-// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
-// [InstrumentOption] for other options that can be used as a
-// Float64ObservableCounterOption.
-type Float64ObservableCounterOption interface {
- applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
-}
-
-// Float64ObservableUpDownCounter is an instrument used to asynchronously
-// record float64 measurements once per collection cycle. Observations are only
-// made within a callback for this instrument. The value observed is assumed
-// the to be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64ObservableUpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64ObservableUpDownCounter
-
- Float64Observable
-}
-
-// Float64ObservableUpDownCounterConfig contains options for asynchronous
-// counter instruments that record int64 values.
-type Float64ObservableUpDownCounterConfig struct {
- description string
- unit string
- callbacks []Float64Callback
-}
-
-// NewFloat64ObservableUpDownCounterConfig returns a new
-// [Float64ObservableUpDownCounterConfig] with all opts applied.
-func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig {
- var config Float64ObservableUpDownCounterConfig
- for _, o := range opts {
- config = o.applyFloat64ObservableUpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64ObservableUpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64ObservableUpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
- return c.callbacks
-}
-
-// Float64ObservableUpDownCounterOption applies options to a
-// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
-// [InstrumentOption] for other options that can be used as a
-// Float64ObservableUpDownCounterOption.
-type Float64ObservableUpDownCounterOption interface {
- applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
-}
-
-// Float64ObservableGauge is an instrument used to asynchronously record
-// instantaneous float64 measurements once per collection cycle. Observations
-// are only made within a callback for this instrument.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64ObservableGauge interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64ObservableGauge
-
- Float64Observable
-}
-
-// Float64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Float64ObservableGaugeConfig struct {
- description string
- unit string
- callbacks []Float64Callback
-}
-
-// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig]
-// with all opts applied.
-func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig {
- var config Float64ObservableGaugeConfig
- for _, o := range opts {
- config = o.applyFloat64ObservableGauge(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64ObservableGaugeConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64ObservableGaugeConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
- return c.callbacks
-}
-
-// Float64ObservableGaugeOption applies options to a
-// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
-// [InstrumentOption] for other options that can be used as a
-// Float64ObservableGaugeOption.
-type Float64ObservableGaugeOption interface {
- applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
-}
-
-// Float64Observer is a recorder of float64 measurements.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Observer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Observer
-
- // Observe records the float64 value.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Observe(value float64, options ...ObserveOption)
-}
-
-// Float64Callback is a function registered with a Meter that makes
-// observations for a Float64Observerable instrument it is registered with.
-// Calls to the Float64Observer record measurement values for the
-// Float64Observable.
-//
-// The function needs to complete in a finite amount of time and the deadline
-// of the passed context is expected to be honored.
-//
-// The function needs to make unique observations across all registered
-// Float64Callbacks. Meaning, it should not report measurements with the same
-// attributes as another Float64Callbacks also registered for the same
-// instrument.
-//
-// The function needs to be concurrent safe.
-type Float64Callback func(context.Context, Float64Observer) error
-
-// Float64ObservableOption applies options to float64 Observer instruments.
-type Float64ObservableOption interface {
- Float64ObservableCounterOption
- Float64ObservableUpDownCounterOption
- Float64ObservableGaugeOption
-}
-
-type float64CallbackOpt struct {
- cback Float64Callback
-}
-
-func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-// WithFloat64Callback adds callback to be called for an instrument.
-func WithFloat64Callback(callback Float64Callback) Float64ObservableOption {
- return float64CallbackOpt{callback}
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
deleted file mode 100644
index 9bd6ebf..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
+++ /dev/null
@@ -1,269 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Int64Observable describes a set of instruments used asynchronously to record
-// int64 measurements once per collection cycle. Observations of these
-// instruments are only made within a callback.
-//
-// Warning: Methods may be added to this interface in minor releases.
-type Int64Observable interface {
- Observable
-
- int64Observable()
-}
-
-// Int64ObservableCounter is an instrument used to asynchronously record
-// increasing int64 measurements once per collection cycle. Observations are
-// only made within a callback for this instrument. The value observed is
-// assumed the to be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64ObservableCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64ObservableCounter
-
- Int64Observable
-}
-
-// Int64ObservableCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Int64ObservableCounterConfig struct {
- description string
- unit string
- callbacks []Int64Callback
-}
-
-// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig]
-// with all opts applied.
-func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig {
- var config Int64ObservableCounterConfig
- for _, o := range opts {
- config = o.applyInt64ObservableCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64ObservableCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64ObservableCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
- return c.callbacks
-}
-
-// Int64ObservableCounterOption applies options to a
-// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
-// [InstrumentOption] for other options that can be used as an
-// Int64ObservableCounterOption.
-type Int64ObservableCounterOption interface {
- applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
-}
-
-// Int64ObservableUpDownCounter is an instrument used to asynchronously record
-// int64 measurements once per collection cycle. Observations are only made
-// within a callback for this instrument. The value observed is assumed the to
-// be the cumulative sum of the count.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64ObservableUpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64ObservableUpDownCounter
-
- Int64Observable
-}
-
-// Int64ObservableUpDownCounterConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Int64ObservableUpDownCounterConfig struct {
- description string
- unit string
- callbacks []Int64Callback
-}
-
-// NewInt64ObservableUpDownCounterConfig returns a new
-// [Int64ObservableUpDownCounterConfig] with all opts applied.
-func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig {
- var config Int64ObservableUpDownCounterConfig
- for _, o := range opts {
- config = o.applyInt64ObservableUpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64ObservableUpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64ObservableUpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
- return c.callbacks
-}
-
-// Int64ObservableUpDownCounterOption applies options to a
-// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
-// [InstrumentOption] for other options that can be used as an
-// Int64ObservableUpDownCounterOption.
-type Int64ObservableUpDownCounterOption interface {
- applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
-}
-
-// Int64ObservableGauge is an instrument used to asynchronously record
-// instantaneous int64 measurements once per collection cycle. Observations are
-// only made within a callback for this instrument.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64ObservableGauge interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64ObservableGauge
-
- Int64Observable
-}
-
-// Int64ObservableGaugeConfig contains options for asynchronous counter
-// instruments that record int64 values.
-type Int64ObservableGaugeConfig struct {
- description string
- unit string
- callbacks []Int64Callback
-}
-
-// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig]
-// with all opts applied.
-func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig {
- var config Int64ObservableGaugeConfig
- for _, o := range opts {
- config = o.applyInt64ObservableGauge(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64ObservableGaugeConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64ObservableGaugeConfig) Unit() string {
- return c.unit
-}
-
-// Callbacks returns the configured callbacks.
-func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
- return c.callbacks
-}
-
-// Int64ObservableGaugeOption applies options to a
-// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
-// [InstrumentOption] for other options that can be used as an
-// Int64ObservableGaugeOption.
-type Int64ObservableGaugeOption interface {
- applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
-}
-
-// Int64Observer is a recorder of int64 measurements.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Observer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Observer
-
- // Observe records the int64 value.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Observe(value int64, options ...ObserveOption)
-}
-
-// Int64Callback is a function registered with a Meter that makes observations
-// for an Int64Observerable instrument it is registered with. Calls to the
-// Int64Observer record measurement values for the Int64Observable.
-//
-// The function needs to complete in a finite amount of time and the deadline
-// of the passed context is expected to be honored.
-//
-// The function needs to make unique observations across all registered
-// Int64Callbacks. Meaning, it should not report measurements with the same
-// attributes as another Int64Callbacks also registered for the same
-// instrument.
-//
-// The function needs to be concurrent safe.
-type Int64Callback func(context.Context, Int64Observer) error
-
-// Int64ObservableOption applies options to int64 Observer instruments.
-type Int64ObservableOption interface {
- Int64ObservableCounterOption
- Int64ObservableUpDownCounterOption
- Int64ObservableGaugeOption
-}
-
-type int64CallbackOpt struct {
- cback Int64Callback
-}
-
-func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
- cfg.callbacks = append(cfg.callbacks, o.cback)
- return cfg
-}
-
-// WithInt64Callback adds callback to be called for an instrument.
-func WithInt64Callback(callback Int64Callback) Int64ObservableOption {
- return int64CallbackOpt{callback}
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
deleted file mode 100644
index 778ad2d..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/config.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// MeterConfig contains options for Meters.
-type MeterConfig struct {
- instrumentationVersion string
- schemaURL string
- attrs attribute.Set
-
- // Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
-}
-
-// InstrumentationVersion returns the version of the library providing
-// instrumentation.
-func (cfg MeterConfig) InstrumentationVersion() string {
- return cfg.instrumentationVersion
-}
-
-// InstrumentationAttributes returns the attributes associated with the library
-// providing instrumentation.
-func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
- return cfg.attrs
-}
-
-// SchemaURL is the schema_url of the library providing instrumentation.
-func (cfg MeterConfig) SchemaURL() string {
- return cfg.schemaURL
-}
-
-// MeterOption is an interface for applying Meter options.
-type MeterOption interface {
- // applyMeter is used to set a MeterOption value of a MeterConfig.
- applyMeter(MeterConfig) MeterConfig
-}
-
-// NewMeterConfig creates a new MeterConfig and applies
-// all the given options.
-func NewMeterConfig(opts ...MeterOption) MeterConfig {
- var config MeterConfig
- for _, o := range opts {
- config = o.applyMeter(config)
- }
- return config
-}
-
-type meterOptionFunc func(MeterConfig) MeterConfig
-
-func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
- return fn(cfg)
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.instrumentationVersion = version
- return config
- })
-}
-
-// WithInstrumentationAttributes sets the instrumentation attributes.
-//
-// The passed attributes will be de-duplicated.
-func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.attrs = attribute.NewSet(attr...)
- return config
- })
-}
-
-// WithSchemaURL sets the schema URL.
-func WithSchemaURL(schemaURL string) MeterOption {
- return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.schemaURL = schemaURL
- return config
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
deleted file mode 100644
index 54716e1..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/doc.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package metric provides the OpenTelemetry API used to measure metrics about
-source code operation.
-
-This API is separate from its implementation so the instrumentation built from
-it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official
-OpenTelemetry implementation of this API.
-
-All measurements made with this package are made via instruments. These
-instruments are created by a [Meter] which itself is created by a
-[MeterProvider]. Applications need to accept a [MeterProvider] implementation
-as a starting point when instrumenting. This can be done directly, or by using
-the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an
-appropriately named [Meter] from the accepted [MeterProvider], instrumentation
-can then be built from the [Meter]'s instruments.
-
-# Instruments
-
-Each instrument is designed to make measurements of a particular type. Broadly,
-all instruments fall into two overlapping logical categories: asynchronous or
-synchronous, and int64 or float64.
-
-All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
-[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
-[Float64Histogram]) are used to measure the operation and performance of source
-code during the source code execution. These instruments only make measurements
-when the source code they instrument is run.
-
-All asynchronous instruments ([Int64ObservableCounter],
-[Int64ObservableUpDownCounter], [Int64ObservableGauge],
-[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
-[Float64ObservableGauge]) are used to measure metrics outside of the execution
-of source code. They are said to make "observations" via a callback function
-called once every measurement collection cycle.
-
-Each instrument is also grouped by the value type it measures. Either int64 or
-float64. The value being measured will dictate which instrument in these
-categories to use.
-
-Outside of these two broad categories, instruments are described by the
-function they are designed to serve. All Counters ([Int64Counter],
-[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
-designed to measure values that never decrease in value, but instead only
-incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
-[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
-[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
-values that can increase and decrease. When more information needs to be
-conveyed about all the synchronous measurements made during a collection cycle,
-a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
-when just the most recent measurement needs to be conveyed about an
-asynchronous measurement, a Gauge ([Int64ObservableGauge] and
-[Float64ObservableGauge]) should be used.
-
-See the [OpenTelemetry documentation] for more information about instruments
-and their intended use.
-
-# Measurements
-
-Measurements are made by recording values and information about the values with
-an instrument. How these measurements are recorded depends on the instrument.
-
-Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
-[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
-[Float64Histogram]) are recorded using the instrument methods directly. All
-counter instruments have an Add method that is used to measure an increment
-value, and all histogram instruments have a Record method to measure a data
-point.
-
-Asynchronous instruments ([Int64ObservableCounter],
-[Int64ObservableUpDownCounter], [Int64ObservableGauge],
-[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
-[Float64ObservableGauge]) record measurements within a callback function. The
-callback is registered with the Meter which ensures the callback is called once
-per collection cycle. A callback can be registered two ways: during the
-instrument's creation using an option, or later using the RegisterCallback
-method of the [Meter] that created the instrument.
-
-If the following criteria are met, an option ([WithInt64Callback] or
-[WithFloat64Callback]) can be used during the asynchronous instrument's
-creation to register a callback ([Int64Callback] or [Float64Callback],
-respectively):
-
- - The measurement process is known when the instrument is created
- - Only that instrument will make a measurement within the callback
- - The callback never needs to be unregistered
-
-If the criteria are not met, use the RegisterCallback method of the [Meter] that
-created the instrument to register a [Callback].
-
-# API Implementations
-
-This package does not conform to the standard Go versioning policy, all of its
-interfaces may have methods added to them without a package major version bump.
-This non-standard API evolution could surprise an uninformed implementation
-author. They could unknowingly build their implementation in a way that would
-result in a runtime panic for their users that update to the new API.
-
-The API is designed to help inform an instrumentation author about this
-non-standard API evolution. It requires them to choose a default behavior for
-unimplemented interface methods. There are three behavior choices they can
-make:
-
- - Compilation failure
- - Panic
- - Default to another implementation
-
-All interfaces in this API embed a corresponding interface from
-[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default
-behavior of their implementations to be a compilation failure, signaling to
-their users they need to update to the latest version of that implementation,
-they need to embed the corresponding interface from
-[go.opentelemetry.io/otel/metric/embedded] in their implementation. For
-example,
-
- import "go.opentelemetry.io/otel/metric/embedded"
-
- type MeterProvider struct {
- embedded.MeterProvider
- // ...
- }
-
-If an author wants the default behavior of their implementations to a panic,
-they need to embed the API interface directly.
-
- import "go.opentelemetry.io/otel/metric"
-
- type MeterProvider struct {
- metric.MeterProvider
- // ...
- }
-
-This is not a recommended behavior as it could lead to publishing packages that
-contain runtime panics when users update other package that use newer versions
-of [go.opentelemetry.io/otel/metric].
-
-Finally, an author can embed another implementation in theirs. The embedded
-implementation will be used for methods not defined by the author. For example,
-an author who wants to default to silently dropping the call can use
-[go.opentelemetry.io/otel/metric/noop]:
-
- import "go.opentelemetry.io/otel/metric/noop"
-
- type MeterProvider struct {
- noop.MeterProvider
- // ...
- }
-
-It is strongly recommended that authors only embed
-[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior.
-That implementation is the only one OpenTelemetry authors can guarantee will
-fully implement all the API interfaces when a user updates their API.
-
-[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
-[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
-*/
-package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
deleted file mode 100644
index ae0bdbd..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package embedded provides interfaces embedded within the [OpenTelemetry
-// metric API].
-//
-// Implementers of the [OpenTelemetry metric API] can embed the relevant type
-// from this package into their implementation directly. Doing so will result
-// in a compilation error for users when the [OpenTelemetry metric API] is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-//
-// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric
-package embedded // import "go.opentelemetry.io/otel/metric/embedded"
-
-// MeterProvider is embedded in
-// [go.opentelemetry.io/otel/metric.MeterProvider].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type MeterProvider interface{ meterProvider() }
-
-// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Meter interface{ meter() }
-
-// Float64Observer is embedded in
-// [go.opentelemetry.io/otel/metric.Float64Observer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Observer] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64Observer] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Float64Observer interface{ float64Observer() }
-
-// Int64Observer is embedded in
-// [go.opentelemetry.io/otel/metric.Int64Observer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users
-// to experience a compilation error, signaling they need to update to your
-// latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64Observer] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64Observer interface{ int64Observer() }
-
-// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Observer]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Observer interface{ observer() }
-
-// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Registration] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/metric.Registration]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Registration interface{ registration() }
-
-// Float64Counter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64Counter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Counter] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64Counter] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Float64Counter interface{ float64Counter() }
-
-// Float64Histogram is embedded in
-// [go.opentelemetry.io/otel/metric.Float64Histogram].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Float64Histogram interface{ float64Histogram() }
-
-// Float64ObservableCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64ObservableCounter interface{ float64ObservableCounter() }
-
-// Float64ObservableGauge is embedded in
-// [go.opentelemetry.io/otel/metric.Float64ObservableGauge].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64ObservableGauge interface{ float64ObservableGauge() }
-
-// Float64ObservableUpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
-// if you want users to experience a compilation error, signaling they need to
-// update to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() }
-
-// Float64UpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Float64UpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Float64UpDownCounter interface{ float64UpDownCounter() }
-
-// Int64Counter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64Counter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users
-// to experience a compilation error, signaling they need to update to your
-// latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64Counter] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64Counter interface{ int64Counter() }
-
-// Int64Histogram is embedded in
-// [go.opentelemetry.io/otel/metric.Int64Histogram].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64Histogram interface{ int64Histogram() }
-
-// Int64ObservableCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Int64ObservableCounter interface{ int64ObservableCounter() }
-
-// Int64ObservableGauge is embedded in
-// [go.opentelemetry.io/otel/metric.Int64ObservableGauge].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you
-// want users to experience a compilation error, signaling they need to update
-// to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Int64ObservableGauge interface{ int64ObservableGauge() }
-
-// Int64ObservableUpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if
-// you want users to experience a compilation error, signaling they need to
-// update to your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() }
-
-// Int64UpDownCounter is embedded in
-// [go.opentelemetry.io/otel/metric.Int64UpDownCounter].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want
-// users to experience a compilation error, signaling they need to update to
-// your latest implementation, when the
-// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Int64UpDownCounter interface{ int64UpDownCounter() }
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
deleted file mode 100644
index be89cd5..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/instrument.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// Observable is used as a grouping mechanism for all instruments that are
-// updated within a Callback.
-type Observable interface {
- observable()
-}
-
-// InstrumentOption applies options to all instruments.
-type InstrumentOption interface {
- Int64CounterOption
- Int64UpDownCounterOption
- Int64HistogramOption
- Int64ObservableCounterOption
- Int64ObservableUpDownCounterOption
- Int64ObservableGaugeOption
-
- Float64CounterOption
- Float64UpDownCounterOption
- Float64HistogramOption
- Float64ObservableCounterOption
- Float64ObservableUpDownCounterOption
- Float64ObservableGaugeOption
-}
-
-// HistogramOption applies options to histogram instruments.
-type HistogramOption interface {
- Int64HistogramOption
- Float64HistogramOption
-}
-
-type descOpt string
-
-func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
- c.description = string(o)
- return c
-}
-
-func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
- c.description = string(o)
- return c
-}
-
-// WithDescription sets the instrument description.
-func WithDescription(desc string) InstrumentOption { return descOpt(desc) }
-
-type unitOpt string
-
-func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
- c.unit = string(o)
- return c
-}
-
-func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
- c.unit = string(o)
- return c
-}
-
-// WithUnit sets the instrument unit.
-//
-// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
-func WithUnit(u string) InstrumentOption { return unitOpt(u) }
-
-// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
-//
-// This option is considered "advisory", and may be ignored by API implementations.
-func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
-
-type bucketOpt []float64
-
-func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
- c.explicitBucketBoundaries = o
- return c
-}
-
-func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
- c.explicitBucketBoundaries = o
- return c
-}
-
-// AddOption applies options to an addition measurement. See
-// [MeasurementOption] for other options that can be used as an AddOption.
-type AddOption interface {
- applyAdd(AddConfig) AddConfig
-}
-
-// AddConfig contains options for an addition measurement.
-type AddConfig struct {
- attrs attribute.Set
-}
-
-// NewAddConfig returns a new [AddConfig] with all opts applied.
-func NewAddConfig(opts []AddOption) AddConfig {
- config := AddConfig{attrs: *attribute.EmptySet()}
- for _, o := range opts {
- config = o.applyAdd(config)
- }
- return config
-}
-
-// Attributes returns the configured attribute set.
-func (c AddConfig) Attributes() attribute.Set {
- return c.attrs
-}
-
-// RecordOption applies options to an addition measurement. See
-// [MeasurementOption] for other options that can be used as a RecordOption.
-type RecordOption interface {
- applyRecord(RecordConfig) RecordConfig
-}
-
-// RecordConfig contains options for a recorded measurement.
-type RecordConfig struct {
- attrs attribute.Set
-}
-
-// NewRecordConfig returns a new [RecordConfig] with all opts applied.
-func NewRecordConfig(opts []RecordOption) RecordConfig {
- config := RecordConfig{attrs: *attribute.EmptySet()}
- for _, o := range opts {
- config = o.applyRecord(config)
- }
- return config
-}
-
-// Attributes returns the configured attribute set.
-func (c RecordConfig) Attributes() attribute.Set {
- return c.attrs
-}
-
-// ObserveOption applies options to an addition measurement. See
-// [MeasurementOption] for other options that can be used as a ObserveOption.
-type ObserveOption interface {
- applyObserve(ObserveConfig) ObserveConfig
-}
-
-// ObserveConfig contains options for an observed measurement.
-type ObserveConfig struct {
- attrs attribute.Set
-}
-
-// NewObserveConfig returns a new [ObserveConfig] with all opts applied.
-func NewObserveConfig(opts []ObserveOption) ObserveConfig {
- config := ObserveConfig{attrs: *attribute.EmptySet()}
- for _, o := range opts {
- config = o.applyObserve(config)
- }
- return config
-}
-
-// Attributes returns the configured attribute set.
-func (c ObserveConfig) Attributes() attribute.Set {
- return c.attrs
-}
-
-// MeasurementOption applies options to all instrument measurement.
-type MeasurementOption interface {
- AddOption
- RecordOption
- ObserveOption
-}
-
-type attrOpt struct {
- set attribute.Set
-}
-
-// mergeSets returns the union of keys between a and b. Any duplicate keys will
-// use the value associated with b.
-func mergeSets(a, b attribute.Set) attribute.Set {
- // NewMergeIterator uses the first value for any duplicates.
- iter := attribute.NewMergeIterator(&b, &a)
- merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
- for iter.Next() {
- merged = append(merged, iter.Attribute())
- }
- return attribute.NewSet(merged...)
-}
-
-func (o attrOpt) applyAdd(c AddConfig) AddConfig {
- switch {
- case o.set.Len() == 0:
- case c.attrs.Len() == 0:
- c.attrs = o.set
- default:
- c.attrs = mergeSets(c.attrs, o.set)
- }
- return c
-}
-
-func (o attrOpt) applyRecord(c RecordConfig) RecordConfig {
- switch {
- case o.set.Len() == 0:
- case c.attrs.Len() == 0:
- c.attrs = o.set
- default:
- c.attrs = mergeSets(c.attrs, o.set)
- }
- return c
-}
-
-func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig {
- switch {
- case o.set.Len() == 0:
- case c.attrs.Len() == 0:
- c.attrs = o.set
- default:
- c.attrs = mergeSets(c.attrs, o.set)
- }
- return c
-}
-
-// WithAttributeSet sets the attribute Set associated with a measurement is
-// made with.
-//
-// If multiple WithAttributeSet or WithAttributes options are passed the
-// attributes will be merged together in the order they are passed. Attributes
-// with duplicate keys will use the last value passed.
-func WithAttributeSet(attributes attribute.Set) MeasurementOption {
- return attrOpt{set: attributes}
-}
-
-// WithAttributes converts attributes into an attribute Set and sets the Set to
-// be associated with a measurement. This is shorthand for:
-//
-// cp := make([]attribute.KeyValue, len(attributes))
-// copy(cp, attributes)
-// WithAttributes(attribute.NewSet(cp...))
-//
-// [attribute.NewSet] may modify the passed attributes so this will make a copy
-// of attributes before creating a set in order to ensure this function is
-// concurrent safe. This makes this option function less optimized in
-// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be
-// preferred for performance sensitive code.
-//
-// See [WithAttributeSet] for information about how multiple WithAttributes are
-// merged.
-func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption {
- cp := make([]attribute.KeyValue, len(attributes))
- copy(cp, attributes)
- return attrOpt{set: attribute.NewSet(cp...)}
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
deleted file mode 100644
index 2520bc7..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// MeterProvider provides access to named Meter instances, for instrumenting
-// an application or package.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type MeterProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.MeterProvider
-
- // Meter returns a new Meter with the provided name and configuration.
- //
- // A Meter should be scoped at most to a single package. The name needs to
- // be unique so it does not collide with other names used by
- // an application, nor other applications. To achieve this, the import path
- // of the instrumentation package is recommended to be used as name.
- //
- // If the name is empty, then an implementation defined default name will
- // be used instead.
- Meter(name string, opts ...MeterOption) Meter
-}
-
-// Meter provides access to instrument instances for recording metrics.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Meter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Meter
-
- // Int64Counter returns a new Int64Counter instrument identified by name
- // and configured with options. The instrument is used to synchronously
- // record increasing int64 measurements during a computational operation.
- Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
- // Int64UpDownCounter returns a new Int64UpDownCounter instrument
- // identified by name and configured with options. The instrument is used
- // to synchronously record int64 measurements during a computational
- // operation.
- Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
- // Int64Histogram returns a new Int64Histogram instrument identified by
- // name and configured with options. The instrument is used to
- // synchronously record the distribution of int64 measurements during a
- // computational operation.
- Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
- // Int64ObservableCounter returns a new Int64ObservableCounter identified
- // by name and configured with options. The instrument is used to
- // asynchronously record increasing int64 measurements once per a
- // measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithInt64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
- // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
- // instrument identified by name and configured with options. The
- // instrument is used to asynchronously record int64 measurements once per
- // a measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithInt64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
- // Int64ObservableGauge returns a new Int64ObservableGauge instrument
- // identified by name and configured with options. The instrument is used
- // to asynchronously record instantaneous int64 measurements once per a
- // measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithInt64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
-
- // Float64Counter returns a new Float64Counter instrument identified by
- // name and configured with options. The instrument is used to
- // synchronously record increasing float64 measurements during a
- // computational operation.
- Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
- // Float64UpDownCounter returns a new Float64UpDownCounter instrument
- // identified by name and configured with options. The instrument is used
- // to synchronously record float64 measurements during a computational
- // operation.
- Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
- // Float64Histogram returns a new Float64Histogram instrument identified by
- // name and configured with options. The instrument is used to
- // synchronously record the distribution of float64 measurements during a
- // computational operation.
- Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
- // Float64ObservableCounter returns a new Float64ObservableCounter
- // instrument identified by name and configured with options. The
- // instrument is used to asynchronously record increasing float64
- // measurements once per a measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithFloat64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
- // Float64ObservableUpDownCounter returns a new
- // Float64ObservableUpDownCounter instrument identified by name and
- // configured with options. The instrument is used to asynchronously record
- // float64 measurements once per a measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithFloat64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
- // Float64ObservableGauge returns a new Float64ObservableGauge instrument
- // identified by name and configured with options. The instrument is used
- // to asynchronously record instantaneous float64 measurements once per a
- // measurement collection cycle.
- //
- // Measurements for the returned instrument are made via a callback. Use
- // the WithFloat64Callback option to register the callback here, or use the
- // RegisterCallback method of this Meter to register one later. See the
- // Measurements section of the package documentation for more information.
- Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
-
- // RegisterCallback registers f to be called during the collection of a
- // measurement cycle.
- //
- // If Unregister of the returned Registration is called, f needs to be
- // unregistered and not called during collection.
- //
- // The instruments f is registered with are the only instruments that f may
- // observe values for.
- //
- // If no instruments are passed, f should not be registered nor called
- // during collection.
- //
- // The function f needs to be concurrent safe.
- RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
-}
-
-// Callback is a function registered with a Meter that makes observations for
-// the set of instruments it is registered with. The Observer parameter is used
-// to record measurement observations for these instruments.
-//
-// The function needs to complete in a finite amount of time and the deadline
-// of the passed context is expected to be honored.
-//
-// The function needs to make unique observations across all registered
-// Callbacks. Meaning, it should not report measurements for an instrument with
-// the same attributes as another Callback will report.
-//
-// The function needs to be concurrent safe.
-type Callback func(context.Context, Observer) error
-
-// Observer records measurements for multiple instruments in a Callback.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Observer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Observer
-
- // ObserveFloat64 records the float64 value for obsrv.
- ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
- // ObserveInt64 records the int64 value for obsrv.
- ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
-}
-
-// Registration is an token representing the unique registration of a callback
-// for a set of instruments with a Meter.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Registration interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Registration
-
- // Unregister removes the callback registration from a Meter.
- //
- // This method needs to be idempotent and concurrent safe.
- Unregister() error
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
deleted file mode 100644
index 0a4825a..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Float64Counter is an instrument that records increasing float64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Counter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Counter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr float64, options ...AddOption)
-}
-
-// Float64CounterConfig contains options for synchronous counter instruments that
-// record int64 values.
-type Float64CounterConfig struct {
- description string
- unit string
-}
-
-// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts
-// applied.
-func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig {
- var config Float64CounterConfig
- for _, o := range opts {
- config = o.applyFloat64Counter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64CounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64CounterConfig) Unit() string {
- return c.unit
-}
-
-// Float64CounterOption applies options to a [Float64CounterConfig]. See
-// [InstrumentOption] for other options that can be used as a
-// Float64CounterOption.
-type Float64CounterOption interface {
- applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
-}
-
-// Float64UpDownCounter is an instrument that records increasing or decreasing
-// float64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64UpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64UpDownCounter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr float64, options ...AddOption)
-}
-
-// Float64UpDownCounterConfig contains options for synchronous counter
-// instruments that record int64 values.
-type Float64UpDownCounterConfig struct {
- description string
- unit string
-}
-
-// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig]
-// with all opts applied.
-func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig {
- var config Float64UpDownCounterConfig
- for _, o := range opts {
- config = o.applyFloat64UpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64UpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64UpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Float64UpDownCounterOption applies options to a
-// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
-// can be used as a Float64UpDownCounterOption.
-type Float64UpDownCounterOption interface {
- applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
-}
-
-// Float64Histogram is an instrument that records a distribution of float64
-// values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Float64Histogram interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Float64Histogram
-
- // Record adds an additional value to the distribution.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Record(ctx context.Context, incr float64, options ...RecordOption)
-}
-
-// Float64HistogramConfig contains options for synchronous counter instruments
-// that record int64 values.
-type Float64HistogramConfig struct {
- description string
- unit string
- explicitBucketBoundaries []float64
-}
-
-// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
-// opts applied.
-func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig {
- var config Float64HistogramConfig
- for _, o := range opts {
- config = o.applyFloat64Histogram(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Float64HistogramConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Float64HistogramConfig) Unit() string {
- return c.unit
-}
-
-// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
-func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
- return c.explicitBucketBoundaries
-}
-
-// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
-// [InstrumentOption] for other options that can be used as a
-// Float64HistogramOption.
-type Float64HistogramOption interface {
- applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
deleted file mode 100644
index 56667d3..0000000
--- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric // import "go.opentelemetry.io/otel/metric"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/metric/embedded"
-)
-
-// Int64Counter is an instrument that records increasing int64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Counter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Counter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr int64, options ...AddOption)
-}
-
-// Int64CounterConfig contains options for synchronous counter instruments that
-// record int64 values.
-type Int64CounterConfig struct {
- description string
- unit string
-}
-
-// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts
-// applied.
-func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig {
- var config Int64CounterConfig
- for _, o := range opts {
- config = o.applyInt64Counter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64CounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64CounterConfig) Unit() string {
- return c.unit
-}
-
-// Int64CounterOption applies options to a [Int64CounterConfig]. See
-// [InstrumentOption] for other options that can be used as an
-// Int64CounterOption.
-type Int64CounterOption interface {
- applyInt64Counter(Int64CounterConfig) Int64CounterConfig
-}
-
-// Int64UpDownCounter is an instrument that records increasing or decreasing
-// int64 values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64UpDownCounter interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64UpDownCounter
-
- // Add records a change to the counter.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Add(ctx context.Context, incr int64, options ...AddOption)
-}
-
-// Int64UpDownCounterConfig contains options for synchronous counter
-// instruments that record int64 values.
-type Int64UpDownCounterConfig struct {
- description string
- unit string
-}
-
-// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with
-// all opts applied.
-func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig {
- var config Int64UpDownCounterConfig
- for _, o := range opts {
- config = o.applyInt64UpDownCounter(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64UpDownCounterConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64UpDownCounterConfig) Unit() string {
- return c.unit
-}
-
-// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
-// See [InstrumentOption] for other options that can be used as an
-// Int64UpDownCounterOption.
-type Int64UpDownCounterOption interface {
- applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
-}
-
-// Int64Histogram is an instrument that records a distribution of int64
-// values.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Int64Histogram interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Int64Histogram
-
- // Record adds an additional value to the distribution.
- //
- // Use the WithAttributeSet (or, if performance is not a concern,
- // the WithAttributes) option to include measurement attributes.
- Record(ctx context.Context, incr int64, options ...RecordOption)
-}
-
-// Int64HistogramConfig contains options for synchronous counter instruments
-// that record int64 values.
-type Int64HistogramConfig struct {
- description string
- unit string
- explicitBucketBoundaries []float64
-}
-
-// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
-// applied.
-func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig {
- var config Int64HistogramConfig
- for _, o := range opts {
- config = o.applyInt64Histogram(config)
- }
- return config
-}
-
-// Description returns the configured description.
-func (c Int64HistogramConfig) Description() string {
- return c.description
-}
-
-// Unit returns the configured unit.
-func (c Int64HistogramConfig) Unit() string {
- return c.unit
-}
-
-// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
-func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
- return c.explicitBucketBoundaries
-}
-
-// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
-// [InstrumentOption] for other options that can be used as an
-// Int64HistogramOption.
-type Int64HistogramOption interface {
- applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
deleted file mode 100644
index d29aaa3..0000000
--- a/vendor/go.opentelemetry.io/otel/propagation.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/propagation"
-)
-
-// GetTextMapPropagator returns the global TextMapPropagator. If none has been
-// set, a No-Op TextMapPropagator is returned.
-func GetTextMapPropagator() propagation.TextMapPropagator {
- return global.TextMapPropagator()
-}
-
-// SetTextMapPropagator sets propagator as the global TextMapPropagator.
-func SetTextMapPropagator(propagator propagation.TextMapPropagator) {
- global.SetTextMapPropagator(propagator)
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
deleted file mode 100644
index 303cdf1..0000000
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package propagation // import "go.opentelemetry.io/otel/propagation"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/baggage"
-)
-
-const baggageHeader = "baggage"
-
-// Baggage is a propagator that supports the W3C Baggage format.
-//
-// This propagates user-defined baggage associated with a trace. The complete
-// specification is defined at https://www.w3.org/TR/baggage/.
-type Baggage struct{}
-
-var _ TextMapPropagator = Baggage{}
-
-// Inject sets baggage key-values from ctx into the carrier.
-func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
- bStr := baggage.FromContext(ctx).String()
- if bStr != "" {
- carrier.Set(baggageHeader, bStr)
- }
-}
-
-// Extract returns a copy of parent with the baggage from the carrier added.
-func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
- bStr := carrier.Get(baggageHeader)
- if bStr == "" {
- return parent
- }
-
- bag, err := baggage.Parse(bStr)
- if err != nil {
- return parent
- }
- return baggage.ContextWithBaggage(parent, bag)
-}
-
-// Fields returns the keys who's values are set with Inject.
-func (b Baggage) Fields() []string {
- return []string{baggageHeader}
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
deleted file mode 100644
index c119eb2..0000000
--- a/vendor/go.opentelemetry.io/otel/propagation/doc.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package propagation contains OpenTelemetry context propagators.
-
-OpenTelemetry propagators are used to extract and inject context data from and
-into messages exchanged by applications. The propagator supported by this
-package is the W3C Trace Context encoding
-(https://www.w3.org/TR/trace-context/), and W3C Baggage
-(https://www.w3.org/TR/baggage/).
-*/
-package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
deleted file mode 100644
index c94438f..0000000
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package propagation // import "go.opentelemetry.io/otel/propagation"
-
-import (
- "context"
- "net/http"
-)
-
-// TextMapCarrier is the storage medium used by a TextMapPropagator.
-type TextMapCarrier interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Get returns the value associated with the passed key.
- Get(key string) string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Set stores the key-value pair.
- Set(key string, value string)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Keys lists the keys stored in this carrier.
- Keys() []string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
-// medium for propagated key-value pairs.
-type MapCarrier map[string]string
-
-// Compile time check that MapCarrier implements the TextMapCarrier.
-var _ TextMapCarrier = MapCarrier{}
-
-// Get returns the value associated with the passed key.
-func (c MapCarrier) Get(key string) string {
- return c[key]
-}
-
-// Set stores the key-value pair.
-func (c MapCarrier) Set(key, value string) {
- c[key] = value
-}
-
-// Keys lists the keys stored in this carrier.
-func (c MapCarrier) Keys() []string {
- keys := make([]string, 0, len(c))
- for k := range c {
- keys = append(keys, k)
- }
- return keys
-}
-
-// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface.
-type HeaderCarrier http.Header
-
-// Get returns the value associated with the passed key.
-func (hc HeaderCarrier) Get(key string) string {
- return http.Header(hc).Get(key)
-}
-
-// Set stores the key-value pair.
-func (hc HeaderCarrier) Set(key string, value string) {
- http.Header(hc).Set(key, value)
-}
-
-// Keys lists the keys stored in this carrier.
-func (hc HeaderCarrier) Keys() []string {
- keys := make([]string, 0, len(hc))
- for k := range hc {
- keys = append(keys, k)
- }
- return keys
-}
-
-// TextMapPropagator propagates cross-cutting concerns as key-value text
-// pairs within a carrier that travels in-band across process boundaries.
-type TextMapPropagator interface {
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Inject set cross-cutting concerns from the Context into the carrier.
- Inject(ctx context.Context, carrier TextMapCarrier)
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Extract reads cross-cutting concerns from the carrier into a Context.
- Extract(ctx context.Context, carrier TextMapCarrier) context.Context
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-
- // Fields returns the keys whose values are set with Inject.
- Fields() []string
- // DO NOT CHANGE: any modification will not be backwards compatible and
- // must never be done outside of a new major release.
-}
-
-type compositeTextMapPropagator []TextMapPropagator
-
-func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
- for _, i := range p {
- i.Inject(ctx, carrier)
- }
-}
-
-func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
- for _, i := range p {
- ctx = i.Extract(ctx, carrier)
- }
- return ctx
-}
-
-func (p compositeTextMapPropagator) Fields() []string {
- unique := make(map[string]struct{})
- for _, i := range p {
- for _, k := range i.Fields() {
- unique[k] = struct{}{}
- }
- }
-
- fields := make([]string, 0, len(unique))
- for k := range unique {
- fields = append(fields, k)
- }
- return fields
-}
-
-// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
-// group of passed TextMapPropagator. This allows different cross-cutting
-// concerns to be propagates in a unified manner.
-//
-// The returned TextMapPropagator will inject and extract cross-cutting
-// concerns in the order the TextMapPropagators were provided. Additionally,
-// the Fields method will return a de-duplicated slice of the keys that are
-// set with the Inject method.
-func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
- return compositeTextMapPropagator(p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
deleted file mode 100644
index 75a8f34..0000000
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package propagation // import "go.opentelemetry.io/otel/propagation"
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "regexp"
-
- "go.opentelemetry.io/otel/trace"
-)
-
-const (
- supportedVersion = 0
- maxVersion = 254
- traceparentHeader = "traceparent"
- tracestateHeader = "tracestate"
-)
-
-// TraceContext is a propagator that supports the W3C Trace Context format
-// (https://www.w3.org/TR/trace-context/)
-//
-// This propagator will propagate the traceparent and tracestate headers to
-// guarantee traces are not broken. It is up to the users of this propagator
-// to choose if they want to participate in a trace by modifying the
-// traceparent header and relevant parts of the tracestate header containing
-// their proprietary information.
-type TraceContext struct{}
-
-var (
- _ TextMapPropagator = TraceContext{}
- traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
-)
-
-// Inject set tracecontext from the Context into the carrier.
-func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
- sc := trace.SpanContextFromContext(ctx)
- if !sc.IsValid() {
- return
- }
-
- if ts := sc.TraceState().String(); ts != "" {
- carrier.Set(tracestateHeader, ts)
- }
-
- // Clear all flags other than the trace-context supported sampling bit.
- flags := sc.TraceFlags() & trace.FlagsSampled
-
- h := fmt.Sprintf("%.2x-%s-%s-%s",
- supportedVersion,
- sc.TraceID(),
- sc.SpanID(),
- flags)
- carrier.Set(traceparentHeader, h)
-}
-
-// Extract reads tracecontext from the carrier into a returned Context.
-//
-// The returned Context will be a copy of ctx and contain the extracted
-// tracecontext as the remote SpanContext. If the extracted tracecontext is
-// invalid, the passed ctx will be returned directly instead.
-func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
- sc := tc.extract(carrier)
- if !sc.IsValid() {
- return ctx
- }
- return trace.ContextWithRemoteSpanContext(ctx, sc)
-}
-
-func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
- h := carrier.Get(traceparentHeader)
- if h == "" {
- return trace.SpanContext{}
- }
-
- matches := traceCtxRegExp.FindStringSubmatch(h)
-
- if len(matches) == 0 {
- return trace.SpanContext{}
- }
-
- if len(matches) < 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[1]) != 2 {
- return trace.SpanContext{}
- }
- ver, err := hex.DecodeString(matches[1])
- if err != nil {
- return trace.SpanContext{}
- }
- version := int(ver[0])
- if version > maxVersion {
- return trace.SpanContext{}
- }
-
- if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
- return trace.SpanContext{}
- }
-
- if len(matches[2]) != 32 {
- return trace.SpanContext{}
- }
-
- var scc trace.SpanContextConfig
-
- scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
- if err != nil {
- return trace.SpanContext{}
- }
-
- if len(matches[3]) != 16 {
- return trace.SpanContext{}
- }
- scc.SpanID, err = trace.SpanIDFromHex(matches[3])
- if err != nil {
- return trace.SpanContext{}
- }
-
- if len(matches[4]) != 2 {
- return trace.SpanContext{}
- }
- opts, err := hex.DecodeString(matches[4])
- if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
- return trace.SpanContext{}
- }
- // Clear all flags other than the trace-context supported sampling bit.
- scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
-
- // Ignore the error returned here. Failure to parse tracestate MUST NOT
- // affect the parsing of traceparent according to the W3C tracecontext
- // specification.
- scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader))
- scc.Remote = true
-
- sc := trace.NewSpanContext(scc)
- if !sc.IsValid() {
- return trace.SpanContext{}
- }
-
- return sc
-}
-
-// Fields returns the keys who's values are set with Inject.
-func (tc TraceContext) Fields() []string {
- return []string{traceparentHeader, tracestateHeader}
-}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
deleted file mode 100644
index e0a43e1..0000000
--- a/vendor/go.opentelemetry.io/otel/requirements.txt
+++ /dev/null
@@ -1 +0,0 @@
-codespell==2.2.6
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
deleted file mode 100644
index 71a1f77..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package semconv implements OpenTelemetry semantic conventions.
-//
-// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.17.0 version of the OpenTelemetry specification.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
deleted file mode 100644
index 679c40c..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// This semantic convention defines the attributes used to represent a feature
-// flag evaluation as an event.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// RPC received/sent message.
-const (
- // MessageTypeKey is the attribute Key conforming to the "message.type"
- // semantic conventions. It represents the whether this is a received or
- // sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessageTypeKey = attribute.Key("message.type")
-
- // MessageIDKey is the attribute Key conforming to the "message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- MessageIDKey = attribute.Key("message.id")
-
- // MessageCompressedSizeKey is the attribute Key conforming to the
- // "message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageCompressedSizeKey = attribute.Key("message.compressed_size")
-
- // MessageUncompressedSizeKey is the attribute Key conforming to the
- // "message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
-)
-
-var (
- // sent
- MessageTypeSent = MessageTypeKey.String("SENT")
- // received
- MessageTypeReceived = MessageTypeKey.String("RECEIVED")
-)
-
-// MessageID returns an attribute KeyValue conforming to the "message.id"
-// semantic conventions. It represents the mUST be calculated as two different
-// counters starting from `1` one for sent messages and one for received
-// message.
-func MessageID(val int) attribute.KeyValue {
- return MessageIDKey.Int(val)
-}
-
-// MessageCompressedSize returns an attribute KeyValue conforming to the
-// "message.compressed_size" semantic conventions. It represents the compressed
-// size of the message in bytes.
-func MessageCompressedSize(val int) attribute.KeyValue {
- return MessageCompressedSizeKey.Int(val)
-}
-
-// MessageUncompressedSize returns an attribute KeyValue conforming to the
-// "message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func MessageUncompressedSize(val int) attribute.KeyValue {
- return MessageUncompressedSizeKey.Int(val)
-}
-
-// The attributes used to report a single exception associated with a span.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example above](#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
deleted file mode 100644
index 9b8c559..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-
-const (
- // ExceptionEventName is the name of the Span event representing an exception.
- ExceptionEventName = "exception"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
deleted file mode 100644
index d5c4b5c..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-
-// HTTP scheme attributes.
-var (
- HTTPSchemeHTTP = HTTPSchemeKey.String("http")
- HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
deleted file mode 100644
index 39a2eab..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
+++ /dev/null
@@ -1,2010 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The web browser in which the application represented by the resource is
-// running. The `browser.*` attributes MUST be used only for resources that
-// represent applications running in a web browser (regardless of whether
-// running on a mobile or desktop device).
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserUserAgentKey is the attribute Key conforming to the
- // "browser.user_agent" semantic conventions. It represents the full
- // user-agent string provided by the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)
- // AppleWebKit/537.36 (KHTML, '
- // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
- // Note: The user-agent value SHOULD be provided only from browsers that do
- // not have a mechanism to retrieve brands and platform individually from
- // the User-Agent Client Hints API. To retrieve the value, the legacy
- // `navigator.userAgent` API can be used.
- BrowserUserAgentKey = attribute.Key("browser.user_agent")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserUserAgent returns an attribute KeyValue conforming to the
-// "browser.user_agent" semantic conventions. It represents the full user-agent
-// string provided by the browser
-func BrowserUserAgent(val string) attribute.KeyValue {
- return BrowserUserAgentKey.String(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS)
-const (
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://intl.cloud.tencent.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// Resources used by AWS Elastic Container Service (ECS).
-const (
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
- // [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the task
- // definition family this task definition is a member of.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for this task definition.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
-// task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the task
-// definition family this task definition is a member of.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// this task definition.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Resources used by AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Resources specific to Amazon Web Services.
-const (
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-)
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// A container instance.
-const (
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageTagKey is the attribute Key conforming to the
- // "container.image.tag" semantic conventions. It represents the container
- // image tag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- ContainerImageTagKey = attribute.Key("container.image.tag")
-)
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageTag returns an attribute KeyValue conforming to the
-// "container.image.tag" semantic conventions. It represents the container
-// image tag.
-func ContainerImageTag(val string) attribute.KeyValue {
- return ContainerImageTagKey.String(val)
-}
-
-// The software deployment.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'staging', 'production'
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment
-// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
-// deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// The device on which the process represented by this resource is running.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human readable version of
- // the device model rather than a machine readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// A serverless instance.
-const (
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `faas.id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic
- // conventions. It represents the unique ID of the single function that
- // this runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so consider setting `faas.id` as a span attribute instead.
- //
- // The exact value to use for `faas.id` depends on the cloud provider:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- FaaSIDKey = attribute.Key("faas.id")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run:** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function in MiB.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 128
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-)
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic
-// conventions. It represents the unique ID of the single function that this
-// runtime instance executes.
-func FaaSID(val string) attribute.KeyValue {
- return FaaSIDKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function in MiB.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// A host is defined as a general computing instance.
-const (
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // Linux systems, the `machine-id` located in `/etc/machine-id` or
- // `/var/lib/dbus/machine-id` may be used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- HostArchKey = attribute.Key("host.arch")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID. For Cloud, this
- // value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image as defined in [Version
- // Attributes](README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized Linux
-// systems, the `machine-id` located in `/etc/machine-id` or
-// `/var/lib/dbus/machine-id` may be used.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID. For
-// Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image as defined in [Version
-// Attributes](README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// A Kubernetes Cluster.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// A Kubernetes Node object.
-const (
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-)
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// A Kubernetes Namespace.
-const (
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-)
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// A Kubernetes Pod object.
-const (
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-)
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// A container in a
-// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
-const (
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-)
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// A Kubernetes ReplicaSet object.
-const (
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-)
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// A Kubernetes Deployment object.
-const (
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-)
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// A Kubernetes StatefulSet object.
-const (
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-)
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// A Kubernetes DaemonSet object.
-const (
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-)
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// A Kubernetes Job object.
-const (
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-)
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// A Kubernetes CronJob object.
-const (
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-)
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- OSTypeKey = attribute.Key("os.type")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: ConditionallyRequired (See alternative attributes
- // below.)
- // Stability: stable
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-)
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// The single (language) runtime instance which is monitored.
-const (
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-)
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// A service instance.
-const (
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md#process), e.g.
- // `unknown_service:bash`. If `process.executable.name` is not available,
- // the value MUST be set to `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to distinguish instances of the same
- // service that exist at the same time (e.g. instances of a horizontally
- // scaled service). It is preferable for the ID to be persistent and stay
- // the same for the lifetime of the service instance, however it is
- // acceptable that the ID is ephemeral and changes during important
- // lifetime events for the service (e.g. service restarts). If the service
- // has no inherent unique ID that can be used as the value of this
- // attribute it is recommended to generate a random Version 1 or Version 4
- // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
- // Version 5, see RFC 4122 for more recommendations).
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// The telemetry SDK used to capture data recorded by the instrumentation
-// libraries.
-const (
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'opentelemetry'
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-
- // TelemetryAutoVersionKey is the attribute Key conforming to the
- // "telemetry.auto.version" semantic conventions. It represents the version
- // string of the auto instrumentation agent, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.2.3'
- TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// TelemetryAutoVersion returns an attribute KeyValue conforming to the
-// "telemetry.auto.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent, if used.
-func TelemetryAutoVersion(val string) attribute.KeyValue {
- return TelemetryAutoVersionKey.String(val)
-}
-
-// Resource describing the packaged software running the application code. Web
-// engines are typically executed using process.runtime.
-const (
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-)
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OtelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OtelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OtelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OtelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OtelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OtelScopeName(val string) attribute.KeyValue {
- return OtelScopeNameKey.String(val)
-}
-
-// OtelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OtelScopeVersion(val string) attribute.KeyValue {
- return OtelScopeVersionKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry
-// Scope's concepts.
-const (
- // OtelLibraryNameKey is the attribute Key conforming to the
- // "otel.library.name" semantic conventions. It represents the deprecated,
- // use the `otel.scope.name` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OtelLibraryNameKey = attribute.Key("otel.library.name")
-
- // OtelLibraryVersionKey is the attribute Key conforming to the
- // "otel.library.version" semantic conventions. It represents the
- // deprecated, use the `otel.scope.version` attribute.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: deprecated
- // Examples: '1.0.0'
- OtelLibraryVersionKey = attribute.Key("otel.library.version")
-)
-
-// OtelLibraryName returns an attribute KeyValue conforming to the
-// "otel.library.name" semantic conventions. It represents the deprecated, use
-// the `otel.scope.name` attribute.
-func OtelLibraryName(val string) attribute.KeyValue {
- return OtelLibraryNameKey.String(val)
-}
-
-// OtelLibraryVersion returns an attribute KeyValue conforming to the
-// "otel.library.version" semantic conventions. It represents the deprecated,
-// use the `otel.scope.version` attribute.
-func OtelLibraryVersion(val string) attribute.KeyValue {
- return OtelLibraryVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
deleted file mode 100644
index 42fc525..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-
-// SchemaURL is the schema URL that matches the version of the semantic conventions
-// that this package defines. Semconv packages starting from v1.4.0 must declare
-// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
-const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
deleted file mode 100644
index 8c4a729..0000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
+++ /dev/null
@@ -1,3375 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-)
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the name identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'click', 'exception'
- EventNameKey = attribute.Key("event.name")
-
- // EventDomainKey is the attribute Key conforming to the "event.domain"
- // semantic conventions. It represents the domain identifies the business
- // context for the events.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: Events across different domains may have same `event.name`, yet be
- // unrelated events.
- EventDomainKey = attribute.Key("event.domain")
-)
-
-var (
- // Events from browser apps
- EventDomainBrowser = EventDomainKey.String("browser")
- // Events from mobile apps
- EventDomainDevice = EventDomainKey.String("device")
- // Events from Kubernetes
- EventDomainK8S = EventDomainKey.String("k8s")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the name identifies the event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// Span attributes used by AWS Lambda (in addition to general `faas`
-// attributes).
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `faas.id` if an alias is involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for CloudEvents. CloudEvents is a specification on how to define
-// event data in a standard way. These attributes can be attached to spans when
-// performing operations with CloudEvents, regardless of the protocol being
-// used.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// Semantic conventions for the OpenTracing Shim
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span does not depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The attributes used to perform database client calls.
-const (
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents an identifier for the database management
- // system (DBMS) product being used. See below for a list of well-known
- // identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- DBSystemKey = attribute.Key("db.system")
-
- // DBConnectionStringKey is the attribute Key conforming to the
- // "db.connection_string" semantic conventions. It represents the
- // connection string used to connect to the database. It is recommended to
- // remove embedded credentials.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
- DBConnectionStringKey = attribute.Key("db.connection_string")
-
- // DBUserKey is the attribute Key conforming to the "db.user" semantic
- // conventions. It represents the username for accessing the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'readonly_user', 'reporting_user'
- DBUserKey = attribute.Key("db.user")
-
- // DBJDBCDriverClassnameKey is the attribute Key conforming to the
- // "db.jdbc.driver_classname" semantic conventions. It represents the
- // fully-qualified class name of the [Java Database Connectivity
- // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
- // driver used to connect.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'org.postgresql.Driver',
- // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
- DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
-
- // DBNameKey is the attribute Key conforming to the "db.name" semantic
- // conventions. It represents the this attribute is used to report the name
- // of the database being accessed. For commands that switch the database,
- // this should be set to the target database (even if the command fails).
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If applicable.)
- // Stability: stable
- // Examples: 'customers', 'main'
- // Note: In some SQL databases, the database name to be used is called
- // "schema name". In case there are multiple layers that could be
- // considered for database name (e.g. Oracle instance name and schema
- // name), the database name to be used is the more specific layer (e.g.
- // Oracle schema name).
- DBNameKey = attribute.Key("db.name")
-
- // DBStatementKey is the attribute Key conforming to the "db.statement"
- // semantic conventions. It represents the database statement being
- // executed.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If applicable and not
- // explicitly disabled via instrumentation configuration.)
- // Stability: stable
- // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
- // Note: The value may be sanitized to exclude sensitive information.
- DBStatementKey = attribute.Key("db.statement")
-
- // DBOperationKey is the attribute Key conforming to the "db.operation"
- // semantic conventions. It represents the name of the operation being
- // executed, e.g. the [MongoDB command
- // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
- // such as `findAndModify`, or the SQL keyword.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If `db.statement` is not
- // applicable.)
- // Stability: stable
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: When setting this to an SQL keyword, it is not recommended to
- // attempt any client-side parsing of `db.statement` just to get this
- // property, but it should be set if the operation name is provided by the
- // library being instrumented. If the SQL statement has an ambiguous
- // operation, or performs more than one operation, this value may be
- // omitted.
- DBOperationKey = attribute.Key("db.operation")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
-)
-
-// DBConnectionString returns an attribute KeyValue conforming to the
-// "db.connection_string" semantic conventions. It represents the connection
-// string used to connect to the database. It is recommended to remove embedded
-// credentials.
-func DBConnectionString(val string) attribute.KeyValue {
- return DBConnectionStringKey.String(val)
-}
-
-// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
-// conventions. It represents the username for accessing the database.
-func DBUser(val string) attribute.KeyValue {
- return DBUserKey.String(val)
-}
-
-// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
-// "db.jdbc.driver_classname" semantic conventions. It represents the
-// fully-qualified class name of the [Java Database Connectivity
-// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
-// used to connect.
-func DBJDBCDriverClassname(val string) attribute.KeyValue {
- return DBJDBCDriverClassnameKey.String(val)
-}
-
-// DBName returns an attribute KeyValue conforming to the "db.name" semantic
-// conventions. It represents the this attribute is used to report the name of
-// the database being accessed. For commands that switch the database, this
-// should be set to the target database (even if the command fails).
-func DBName(val string) attribute.KeyValue {
- return DBNameKey.String(val)
-}
-
-// DBStatement returns an attribute KeyValue conforming to the
-// "db.statement" semantic conventions. It represents the database statement
-// being executed.
-func DBStatement(val string) attribute.KeyValue {
- return DBStatementKey.String(val)
-}
-
-// DBOperation returns an attribute KeyValue conforming to the
-// "db.operation" semantic conventions. It represents the name of the operation
-// being executed, e.g. the [MongoDB command
-// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
-// such as `findAndModify`, or the SQL keyword.
-func DBOperation(val string) attribute.KeyValue {
- return DBOperationKey.String(val)
-}
-
-// Connection-level attributes for Microsoft SQL Server
-const (
- // DBMSSQLInstanceNameKey is the attribute Key conforming to the
- // "db.mssql.instance_name" semantic conventions. It represents the
- // Microsoft SQL Server [instance
- // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
- // connecting to. This name is used to determine the port of a named
- // instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MSSQLSERVER'
- // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
- // longer required (but still recommended if non-standard).
- DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
-)
-
-// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
-// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
-// SQL Server [instance
-// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
-// connecting to. This name is used to determine the port of a named instance.
-func DBMSSQLInstanceName(val string) attribute.KeyValue {
- return DBMSSQLInstanceNameKey.String(val)
-}
-
-// Call-level attributes for Cassandra
-const (
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraTableKey is the attribute Key conforming to the
- // "db.cassandra.table" semantic conventions. It represents the name of the
- // primary table that the operation is acting upon, including the keyspace
- // name (if applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'mytable'
- // Note: This mirrors the db.sql.table attribute but references cassandra
- // rather than sql. It is not recommended to attempt any client-side
- // parsing of `db.statement` just to get this property, but it should be
- // set if it is provided by the library being instrumented. If the
- // operation is acting upon an anonymous table, or more than one table,
- // this value MUST NOT be set.
- DBCassandraTableKey = attribute.Key("db.cassandra.table")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraTable returns an attribute KeyValue conforming to the
-// "db.cassandra.table" semantic conventions. It represents the name of the
-// primary table that the operation is acting upon, including the keyspace name
-// (if applicable).
-func DBCassandraTable(val string) attribute.KeyValue {
- return DBCassandraTableKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// Call-level attributes for Redis
-const (
- // DBRedisDBIndexKey is the attribute Key conforming to the
- // "db.redis.database_index" semantic conventions. It represents the index
- // of the database being accessed as used in the [`SELECT`
- // command](https://redis.io/commands/select), provided as an integer. To
- // be used instead of the generic `db.name` attribute.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If other than the default
- // database (`0`).)
- // Stability: stable
- // Examples: 0, 1, 15
- DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
-)
-
-// DBRedisDBIndex returns an attribute KeyValue conforming to the
-// "db.redis.database_index" semantic conventions. It represents the index of
-// the database being accessed as used in the [`SELECT`
-// command](https://redis.io/commands/select), provided as an integer. To be
-// used instead of the generic `db.name` attribute.
-func DBRedisDBIndex(val int) attribute.KeyValue {
- return DBRedisDBIndexKey.Int(val)
-}
-
-// Call-level attributes for MongoDB
-const (
- // DBMongoDBCollectionKey is the attribute Key conforming to the
- // "db.mongodb.collection" semantic conventions. It represents the
- // collection being accessed within the database stated in `db.name`.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'customers', 'products'
- DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
-)
-
-// DBMongoDBCollection returns an attribute KeyValue conforming to the
-// "db.mongodb.collection" semantic conventions. It represents the collection
-// being accessed within the database stated in `db.name`.
-func DBMongoDBCollection(val string) attribute.KeyValue {
- return DBMongoDBCollectionKey.String(val)
-}
-
-// Call-level attributes for SQL databases
-const (
- // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
- // semantic conventions. It represents the name of the primary table that
- // the operation is acting upon, including the database name (if
- // applicable).
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'public.users', 'customers'
- // Note: It is not recommended to attempt any client-side parsing of
- // `db.statement` just to get this property, but it should be set if it is
- // provided by the library being instrumented. If the operation is acting
- // upon an anonymous table, or more than one table, this value MUST NOT be
- // set.
- DBSQLTableKey = attribute.Key("db.sql.table")
-)
-
-// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
-// semantic conventions. It represents the name of the primary table that the
-// operation is acting upon, including the database name (if applicable).
-func DBSQLTable(val string) attribute.KeyValue {
- return DBSQLTableKey.String(val)
-}
-
-// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
-// concepts.
-const (
- // OtelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OtelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OtelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OtelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
- // The operation contains an error
- OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
-)
-
-// OtelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OtelStatusDescription(val string) attribute.KeyValue {
- return OtelStatusDescriptionKey.String(val)
-}
-
-// This semantic convention describes an instance of a function that runs
-// without provisioning or managing of servers (also known as serverless
-// functions or Function as a Service (FaaS)) with spans.
-const (
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function execution.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: For the server/consumer span on the incoming side,
- // `faas.trigger` MUST be set.
- //
- // Clients invoking FaaS instances usually cannot set `faas.trigger`,
- // since they would typically need to look in the payload to determine
- // the event type. If clients set it, it should be the same as the
- // trigger that corresponding incoming would have (i.e., this has
- // nothing to do with the underlying transport used to make the API
- // call to invoke the lambda, which is often HTTP).
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSExecutionKey is the attribute Key conforming to the "faas.execution"
- // semantic conventions. It represents the execution ID of the current
- // function execution.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSExecutionKey = attribute.Key("faas.execution")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSExecution returns an attribute KeyValue conforming to the
-// "faas.execution" semantic conventions. It represents the execution ID of the
-// current function execution.
-func FaaSExecution(val string) attribute.KeyValue {
- return FaaSExecutionKey.String(val)
-}
-
-// Semantic Convention for FaaS triggered as a response to some data source
-// operation such as a database or filesystem read/write.
-const (
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// Semantic Convention for FaaS scheduled to be executed regularly.
-const (
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-)
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// Contains additional attributes for incoming FaaS spans.
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// Contains additional attributes for outgoing FaaS spans.
-const (
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (For some cloud providers, like
- // AWS or GCP, the region in which a function is hosted is essential to
- // uniquely identify the function and also part of its endpoint. Since it's
- // part of the endpoint being called, the region is always known to
- // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
- // If the region is unknown to the client or not required for identifying
- // the invoked function, setting `faas.invoked_region` is optional.)
- // Stability: stable
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetTransportKey is the attribute Key conforming to the "net.transport"
- // semantic conventions. It represents the transport protocol used. See
- // note below.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- NetTransportKey = attribute.Key("net.transport")
-
- // NetAppProtocolNameKey is the attribute Key conforming to the
- // "net.app.protocol.name" semantic conventions. It represents the
- // application layer protocol used. The value SHOULD be normalized to
- // lowercase.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
-
- // NetAppProtocolVersionKey is the attribute Key conforming to the
- // "net.app.protocol.version" semantic conventions. It represents the
- // version of the application layer protocol used. See note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '3.1.1'
- // Note: `net.app.protocol.version` refers to the version of the protocol
- // used and might be different from the protocol client's version. If the
- // HTTP client used has a version of `0.27.2`, but sends HTTP version
- // `1.1`, this attribute should be set to `1.1`.
- NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
-
- // NetSockPeerNameKey is the attribute Key conforming to the
- // "net.sock.peer.name" semantic conventions. It represents the remote
- // socket peer name.
- //
- // Type: string
- // RequirementLevel: Recommended (If available and different from
- // `net.peer.name` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 'proxy.example.com'
- NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
-
- // NetSockPeerAddrKey is the attribute Key conforming to the
- // "net.sock.peer.addr" semantic conventions. It represents the remote
- // socket peer address: IPv4 or IPv6 for internet protocols, path for local
- // communication,
- // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '127.0.0.1', '/tmp/mysql.sock'
- NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
-
- // NetSockPeerPortKey is the attribute Key conforming to the
- // "net.sock.peer.port" semantic conventions. It represents the remote
- // socket peer port.
- //
- // Type: int
- // RequirementLevel: Recommended (If defined for the address family and if
- // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
- // Stability: stable
- // Examples: 16456
- NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
-
- // NetSockFamilyKey is the attribute Key conforming to the
- // "net.sock.family" semantic conventions. It represents the protocol
- // [address
- // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
- // which is used for communication.
- //
- // Type: Enum
- // RequirementLevel: ConditionallyRequired (If different than `inet` and if
- // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
- // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
- // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
- // instrumentations that follow previous versions of this document.)
- // Stability: stable
- // Examples: 'inet6', 'bluetooth'
- NetSockFamilyKey = attribute.Key("net.sock.family")
-
- // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
- // semantic conventions. It represents the logical remote hostname, see
- // note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com'
- // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
- // extra DNS lookup.
- NetPeerNameKey = attribute.Key("net.peer.name")
-
- // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
- // semantic conventions. It represents the logical remote port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- NetPeerPortKey = attribute.Key("net.peer.port")
-
- // NetHostNameKey is the attribute Key conforming to the "net.host.name"
- // semantic conventions. It represents the logical local hostname or
- // similar, see note below.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'localhost'
- NetHostNameKey = attribute.Key("net.host.name")
-
- // NetHostPortKey is the attribute Key conforming to the "net.host.port"
- // semantic conventions. It represents the logical local port number,
- // preferably the one that the peer used to connect
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 8080
- NetHostPortKey = attribute.Key("net.host.port")
-
- // NetSockHostAddrKey is the attribute Key conforming to the
- // "net.sock.host.addr" semantic conventions. It represents the local
- // socket address. Useful in case of a multi-IP host.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '192.168.0.1'
- NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
-
- // NetSockHostPortKey is the attribute Key conforming to the
- // "net.sock.host.port" semantic conventions. It represents the local
- // socket port number.
- //
- // Type: int
- // RequirementLevel: Recommended (If defined for the address family and if
- // different than `net.host.port` and if `net.sock.host.addr` is set.)
- // Stability: stable
- // Examples: 35555
- NetSockHostPortKey = attribute.Key("net.sock.host.port")
-
- // NetHostConnectionTypeKey is the attribute Key conforming to the
- // "net.host.connection.type" semantic conventions. It represents the
- // internet connection type currently being used by the host.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'wifi'
- NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
-
- // NetHostConnectionSubtypeKey is the attribute Key conforming to the
- // "net.host.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'LTE'
- NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
-
- // NetHostCarrierNameKey is the attribute Key conforming to the
- // "net.host.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'sprint'
- NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
-
- // NetHostCarrierMccKey is the attribute Key conforming to the
- // "net.host.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '310'
- NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
-
- // NetHostCarrierMncKey is the attribute Key conforming to the
- // "net.host.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '001'
- NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
-
- // NetHostCarrierIccKey is the attribute Key conforming to the
- // "net.host.carrier.icc" semantic conventions. It represents the ISO
- // 3166-1 alpha-2 2-character country code associated with the mobile
- // carrier network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'DE'
- NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
-)
-
-var (
- // ip_tcp
- NetTransportTCP = NetTransportKey.String("ip_tcp")
- // ip_udp
- NetTransportUDP = NetTransportKey.String("ip_udp")
- // Named or anonymous pipe. See note below
- NetTransportPipe = NetTransportKey.String("pipe")
- // In-process communication
- NetTransportInProc = NetTransportKey.String("inproc")
- // Something else (non IP-based)
- NetTransportOther = NetTransportKey.String("other")
-)
-
-var (
- // IPv4 address
- NetSockFamilyInet = NetSockFamilyKey.String("inet")
- // IPv6 address
- NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
- // Unix domain socket path
- NetSockFamilyUnix = NetSockFamilyKey.String("unix")
-)
-
-var (
- // wifi
- NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
- // wired
- NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
- // cell
- NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
- // unavailable
- NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
- // unknown
- NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
-)
-
-var (
- // GPRS
- NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
- // EDGE
- NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
- // UMTS
- NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
- // CDMA
- NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
- // IDEN
- NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
- // EHRPD
- NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
- // GSM
- NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
-)
-
-// NetAppProtocolName returns an attribute KeyValue conforming to the
-// "net.app.protocol.name" semantic conventions. It represents the application
-// layer protocol used. The value SHOULD be normalized to lowercase.
-func NetAppProtocolName(val string) attribute.KeyValue {
- return NetAppProtocolNameKey.String(val)
-}
-
-// NetAppProtocolVersion returns an attribute KeyValue conforming to the
-// "net.app.protocol.version" semantic conventions. It represents the version
-// of the application layer protocol used. See note below.
-func NetAppProtocolVersion(val string) attribute.KeyValue {
- return NetAppProtocolVersionKey.String(val)
-}
-
-// NetSockPeerName returns an attribute KeyValue conforming to the
-// "net.sock.peer.name" semantic conventions. It represents the remote socket
-// peer name.
-func NetSockPeerName(val string) attribute.KeyValue {
- return NetSockPeerNameKey.String(val)
-}
-
-// NetSockPeerAddr returns an attribute KeyValue conforming to the
-// "net.sock.peer.addr" semantic conventions. It represents the remote socket
-// peer address: IPv4 or IPv6 for internet protocols, path for local
-// communication,
-// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
-func NetSockPeerAddr(val string) attribute.KeyValue {
- return NetSockPeerAddrKey.String(val)
-}
-
-// NetSockPeerPort returns an attribute KeyValue conforming to the
-// "net.sock.peer.port" semantic conventions. It represents the remote socket
-// peer port.
-func NetSockPeerPort(val int) attribute.KeyValue {
- return NetSockPeerPortKey.Int(val)
-}
-
-// NetPeerName returns an attribute KeyValue conforming to the
-// "net.peer.name" semantic conventions. It represents the logical remote
-// hostname, see note below.
-func NetPeerName(val string) attribute.KeyValue {
- return NetPeerNameKey.String(val)
-}
-
-// NetPeerPort returns an attribute KeyValue conforming to the
-// "net.peer.port" semantic conventions. It represents the logical remote port
-// number
-func NetPeerPort(val int) attribute.KeyValue {
- return NetPeerPortKey.Int(val)
-}
-
-// NetHostName returns an attribute KeyValue conforming to the
-// "net.host.name" semantic conventions. It represents the logical local
-// hostname or similar, see note below.
-func NetHostName(val string) attribute.KeyValue {
- return NetHostNameKey.String(val)
-}
-
-// NetHostPort returns an attribute KeyValue conforming to the
-// "net.host.port" semantic conventions. It represents the logical local port
-// number, preferably the one that the peer used to connect
-func NetHostPort(val int) attribute.KeyValue {
- return NetHostPortKey.Int(val)
-}
-
-// NetSockHostAddr returns an attribute KeyValue conforming to the
-// "net.sock.host.addr" semantic conventions. It represents the local socket
-// address. Useful in case of a multi-IP host.
-func NetSockHostAddr(val string) attribute.KeyValue {
- return NetSockHostAddrKey.String(val)
-}
-
-// NetSockHostPort returns an attribute KeyValue conforming to the
-// "net.sock.host.port" semantic conventions. It represents the local socket
-// port number.
-func NetSockHostPort(val int) attribute.KeyValue {
- return NetSockHostPortKey.Int(val)
-}
-
-// NetHostCarrierName returns an attribute KeyValue conforming to the
-// "net.host.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetHostCarrierName(val string) attribute.KeyValue {
- return NetHostCarrierNameKey.String(val)
-}
-
-// NetHostCarrierMcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mcc" semantic conventions. It represents the mobile
-// carrier country code.
-func NetHostCarrierMcc(val string) attribute.KeyValue {
- return NetHostCarrierMccKey.String(val)
-}
-
-// NetHostCarrierMnc returns an attribute KeyValue conforming to the
-// "net.host.carrier.mnc" semantic conventions. It represents the mobile
-// carrier network code.
-func NetHostCarrierMnc(val string) attribute.KeyValue {
- return NetHostCarrierMncKey.String(val)
-}
-
-// NetHostCarrierIcc returns an attribute KeyValue conforming to the
-// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetHostCarrierIcc(val string) attribute.KeyValue {
- return NetHostCarrierIccKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](../../resource/semantic_conventions/README.md#service)
- // of the remote service. SHOULD be equal to the actual `service.name`
- // resource attribute of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](../../resource/semantic_conventions/README.md#service) of
-// the remote service. SHOULD be equal to the actual `service.name` resource
-// attribute of the remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// These attributes may be used for any operation with an authenticated and/or
-// authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-)
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// Semantic conventions for HTTP client and server Spans.
-const (
- // HTTPMethodKey is the attribute Key conforming to the "http.method"
- // semantic conventions. It represents the hTTP request method.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- HTTPMethodKey = attribute.Key("http.method")
-
- // HTTPStatusCodeKey is the attribute Key conforming to the
- // "http.status_code" semantic conventions. It represents the [HTTP
- // response status code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If and only if one was
- // received/sent.)
- // Stability: stable
- // Examples: 200
- HTTPStatusCodeKey = attribute.Key("http.status_code")
-
- // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
- // semantic conventions. It represents the kind of HTTP protocol used.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Note: If `net.transport` is not specified, it can be assumed to be
- // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is
- // assumed.
- HTTPFlavorKey = attribute.Key("http.flavor")
-
- // HTTPUserAgentKey is the attribute Key conforming to the
- // "http.user_agent" semantic conventions. It represents the value of the
- // [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
- HTTPUserAgentKey = attribute.Key("http.user_agent")
-
- // HTTPRequestContentLengthKey is the attribute Key conforming to the
- // "http.request_content_length" semantic conventions. It represents the
- // size of the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
-
- // HTTPResponseContentLengthKey is the attribute Key conforming to the
- // "http.response_content_length" semantic conventions. It represents the
- // size of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3495
- HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
-)
-
-var (
- // HTTP/1.0
- HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
- // HTTP/1.1
- HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
- // HTTP/2
- HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
- // HTTP/3
- HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
- // SPDY protocol
- HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
- // QUIC protocol
- HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
-)
-
-// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
-// semantic conventions. It represents the hTTP request method.
-func HTTPMethod(val string) attribute.KeyValue {
- return HTTPMethodKey.String(val)
-}
-
-// HTTPStatusCode returns an attribute KeyValue conforming to the
-// "http.status_code" semantic conventions. It represents the [HTTP response
-// status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPStatusCode(val int) attribute.KeyValue {
- return HTTPStatusCodeKey.Int(val)
-}
-
-// HTTPUserAgent returns an attribute KeyValue conforming to the
-// "http.user_agent" semantic conventions. It represents the value of the [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func HTTPUserAgent(val string) attribute.KeyValue {
- return HTTPUserAgentKey.String(val)
-}
-
-// HTTPRequestContentLength returns an attribute KeyValue conforming to the
-// "http.request_content_length" semantic conventions. It represents the size
-// of the request payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestContentLength(val int) attribute.KeyValue {
- return HTTPRequestContentLengthKey.Int(val)
-}
-
-// HTTPResponseContentLength returns an attribute KeyValue conforming to the
-// "http.response_content_length" semantic conventions. It represents the size
-// of the response payload body in bytes. This is the number of bytes
-// transferred excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseContentLength(val int) attribute.KeyValue {
- return HTTPResponseContentLengthKey.Int(val)
-}
-
-// Semantic Convention for HTTP Client
-const (
- // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
- // conventions. It represents the full HTTP request URL in the form
- // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
- // not transmitted over HTTP, but if it is known, it should be included
- // nevertheless.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
- // Note: `http.url` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case the
- // attribute's value should be `https://www.example.com/`.
- HTTPURLKey = attribute.Key("http.url")
-
- // HTTPResendCountKey is the attribute Key conforming to the
- // "http.resend_count" semantic conventions. It represents the ordinal
- // number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Recommended (if and only if request was retried.)
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPResendCountKey = attribute.Key("http.resend_count")
-)
-
-// HTTPURL returns an attribute KeyValue conforming to the "http.url"
-// semantic conventions. It represents the full HTTP request URL in the form
-// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
-// transmitted over HTTP, but if it is known, it should be included
-// nevertheless.
-func HTTPURL(val string) attribute.KeyValue {
- return HTTPURLKey.String(val)
-}
-
-// HTTPResendCount returns an attribute KeyValue conforming to the
-// "http.resend_count" semantic conventions. It represents the ordinal number
-// of request resending attempt (for any reason, including redirects).
-func HTTPResendCount(val int) attribute.KeyValue {
- return HTTPResendCountKey.Int(val)
-}
-
-// Semantic Convention for HTTP Server
-const (
- // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
- // semantic conventions. It represents the URI scheme identifying the used
- // protocol.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'http', 'https'
- HTTPSchemeKey = attribute.Key("http.scheme")
-
- // HTTPTargetKey is the attribute Key conforming to the "http.target"
- // semantic conventions. It represents the full request target as passed in
- // a HTTP request line or equivalent.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '/path/12314/?q=ddds'
- HTTPTargetKey = attribute.Key("http.target")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route (path template in
- // the format used by the respective server framework). See note below
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if it's available)
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: 'http.route' MUST NOT be populated when this is not supported by
- // the HTTP server framework as the route attribute should have
- // low-cardinality and the URI path can NOT substitute it.
- HTTPRouteKey = attribute.Key("http.route")
-
- // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
- // semantic conventions. It represents the IP address of the original
- // client behind all proxies, if known (e.g. from
- // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '83.164.160.102'
- // Note: This is not necessarily the same as `net.sock.peer.addr`, which
- // would
- // identify the network-level peer, which may be a proxy.
- //
- // This attribute should be set when a source of information different
- // from the one used for `net.sock.peer.addr`, is available even if that
- // other
- // source just confirms the same value as `net.sock.peer.addr`.
- // Rationale: For `net.sock.peer.addr`, one typically does not know if it
- // comes from a proxy, reverse proxy, or the actual client. Setting
- // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
- // one is at least somewhat confident that the address is not that of
- // the closest proxy.
- HTTPClientIPKey = attribute.Key("http.client_ip")
-)
-
-// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
-// semantic conventions. It represents the URI scheme identifying the used
-// protocol.
-func HTTPScheme(val string) attribute.KeyValue {
- return HTTPSchemeKey.String(val)
-}
-
-// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
-// semantic conventions. It represents the full request target as passed in a
-// HTTP request line or equivalent.
-func HTTPTarget(val string) attribute.KeyValue {
- return HTTPTargetKey.String(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route (path template in the
-// format used by the respective server framework). See note below
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// HTTPClientIP returns an attribute KeyValue conforming to the
-// "http.client_ip" semantic conventions. It represents the IP address of the
-// original client behind all proxies, if known (e.g. from
-// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
-func HTTPClientIP(val string) attribute.KeyValue {
- return HTTPClientIPKey.String(val)
-}
-
-// Attributes that exist for multiple DynamoDB request types.
-const (
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-)
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// DynamoDB.CreateTable
-const (
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-)
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// DynamoDB.ListTables
-const (
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-)
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the the
-// number of items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// DynamoDB.Query
-const (
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-)
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// DynamoDB.Scan
-const (
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-)
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// DynamoDB.UpdateTable
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // the `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// Semantic conventions to apply when instrumenting the GraphQL implementation.
-// They map GraphQL operations to attributes on a Span.
-const (
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// Semantic convention describing per-message attributes populated on messaging
-// spans or links.
-const (
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the [conversation ID](#conversations) identifying the conversation to
- // which the message belongs, represented as a string. Sometimes called
- // "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
- // the "messaging.message.payload_size_bytes" semantic conventions. It
- // represents the (uncompressed) size of the message payload in bytes. Also
- // use this attribute if it is unknown whether the compressed or
- // uncompressed payload size is reported.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2738
- MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
-
- // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
- // conforming to the "messaging.message.payload_compressed_size_bytes"
- // semantic conventions. It represents the compressed size of the message
- // payload in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2048
- MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
-)
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the [conversation ID](#conversations) identifying the
-// conversation to which the message belongs, represented as a string.
-// Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
-// to the "messaging.message.payload_size_bytes" semantic conventions. It
-// represents the (uncompressed) size of the message payload in bytes. Also use
-// this attribute if it is unknown whether the compressed or uncompressed
-// payload size is reported.
-func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadSizeBytesKey.Int(val)
-}
-
-// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
-// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
-// conventions. It represents the compressed size of the message payload in
-// bytes.
-func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
- return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
-}
-
-// Semantic convention for attributes that describe messaging destination on
-// broker
-const (
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker does not have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationKindKey is the attribute Key conforming to the
- // "messaging.destination.kind" semantic conventions. It represents the
- // kind of message destination
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-)
-
-var (
- // A message sent to a queue
- MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
- // A message sent to a topic
- MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
-)
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// Semantic convention for attributes that describe messaging source on broker
-const (
- // MessagingSourceNameKey is the attribute Key conforming to the
- // "messaging.source.name" semantic conventions. It represents the message
- // source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Source name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker does not have such notion, the source name SHOULD uniquely
- // identify the broker.
- MessagingSourceNameKey = attribute.Key("messaging.source.name")
-
- // MessagingSourceKindKey is the attribute Key conforming to the
- // "messaging.source.kind" semantic conventions. It represents the kind of
- // message source
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceKindKey = attribute.Key("messaging.source.kind")
-
- // MessagingSourceTemplateKey is the attribute Key conforming to the
- // "messaging.source.template" semantic conventions. It represents the low
- // cardinality representation of the messaging source name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/customers/{customerID}'
- // Note: Source names could be constructed from templates. An example would
- // be a source name involving a user name or product id. Although the
- // source name in this case is of high cardinality, the underlying template
- // is of low cardinality and can be effectively used for grouping and
- // aggregation.
- MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
-
- // MessagingSourceTemporaryKey is the attribute Key conforming to the
- // "messaging.source.temporary" semantic conventions. It represents a
- // boolean that is true if the message source is temporary and might not
- // exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
-
- // MessagingSourceAnonymousKey is the attribute Key conforming to the
- // "messaging.source.anonymous" semantic conventions. It represents a
- // boolean that is true if the message source is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
-)
-
-var (
- // A message received from a queue
- MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
- // A message received from a topic
- MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
-)
-
-// MessagingSourceName returns an attribute KeyValue conforming to the
-// "messaging.source.name" semantic conventions. It represents the message
-// source name
-func MessagingSourceName(val string) attribute.KeyValue {
- return MessagingSourceNameKey.String(val)
-}
-
-// MessagingSourceTemplate returns an attribute KeyValue conforming to the
-// "messaging.source.template" semantic conventions. It represents the low
-// cardinality representation of the messaging source name
-func MessagingSourceTemplate(val string) attribute.KeyValue {
- return MessagingSourceTemplateKey.String(val)
-}
-
-// MessagingSourceTemporary returns an attribute KeyValue conforming to the
-// "messaging.source.temporary" semantic conventions. It represents a boolean
-// that is true if the message source is temporary and might not exist anymore
-// after messages are processed.
-func MessagingSourceTemporary(val bool) attribute.KeyValue {
- return MessagingSourceTemporaryKey.Bool(val)
-}
-
-// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
-// "messaging.source.anonymous" semantic conventions. It represents a boolean
-// that is true if the message source is anonymous (could be unnamed or have
-// auto-generated name).
-func MessagingSourceAnonymous(val bool) attribute.KeyValue {
- return MessagingSourceAnonymousKey.Bool(val)
-}
-
-// General attributes used in messaging systems.
-const (
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents a string
- // identifying the messaging system.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
- MessagingSystemKey = attribute.Key("messaging.system")
-
- // MessagingOperationKey is the attribute Key conforming to the
- // "messaging.operation" semantic conventions. It represents a string
- // identifying the kind of messaging operation as defined in the [Operation
- // names](#operation-names) section above.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationKey = attribute.Key("messaging.operation")
-
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the span describes an
- // operation on a batch of messages.)
- // Stability: stable
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-)
-
-var (
- // publish
- MessagingOperationPublish = MessagingOperationKey.String("publish")
- // receive
- MessagingOperationReceive = MessagingOperationKey.String("receive")
- // process
- MessagingOperationProcess = MessagingOperationKey.String("process")
-)
-
-// MessagingSystem returns an attribute KeyValue conforming to the
-// "messaging.system" semantic conventions. It represents a string identifying
-// the messaging system.
-func MessagingSystem(val string) attribute.KeyValue {
- return MessagingSystemKey.String(val)
-}
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// Semantic convention for a consumer of messages received from a messaging
-// system
-const (
- // MessagingConsumerIDKey is the attribute Key conforming to the
- // "messaging.consumer.id" semantic conventions. It represents the
- // identifier for the consumer receiving a message. For Kafka, set it to
- // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
- // both are present, or only `messaging.kafka.consumer.group`. For brokers,
- // such as RabbitMQ and Artemis, set it to the `client_id` of the client
- // consuming the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'mygroup - client-6'
- MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
-)
-
-// MessagingConsumerID returns an attribute KeyValue conforming to the
-// "messaging.consumer.id" semantic conventions. It represents the identifier
-// for the consumer receiving a message. For Kafka, set it to
-// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
-// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
-// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
-// message.
-func MessagingConsumerID(val string) attribute.KeyValue {
- return MessagingConsumerIDKey.String(val)
-}
-
-// Attributes for RabbitMQ
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If not empty.)
- // Stability: stable
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// Attributes for Apache Kafka
-const (
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaClientIDKey is the attribute Key conforming to the
- // "messaging.kafka.client_id" semantic conventions. It represents the
- // client ID for the Consumer or Producer that is handling the message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client-5'
- MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
-
- // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
- // the "messaging.kafka.destination.partition" semantic conventions. It
- // represents the partition the message is sent to.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
-
- // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
- // "messaging.kafka.source.partition" semantic conventions. It represents
- // the partition the message is received from.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 2
- MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: ConditionallyRequired (If value is `true`. When
- // missing, the value is assumed to be `false`.)
- // Stability: stable
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaClientID returns an attribute KeyValue conforming to the
-// "messaging.kafka.client_id" semantic conventions. It represents the client
-// ID for the Consumer or Producer that is handling the message.
-func MessagingKafkaClientID(val string) attribute.KeyValue {
- return MessagingKafkaClientIDKey.String(val)
-}
-
-// MessagingKafkaDestinationPartition returns an attribute KeyValue
-// conforming to the "messaging.kafka.destination.partition" semantic
-// conventions. It represents the partition the message is sent to.
-func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
- return MessagingKafkaDestinationPartitionKey.Int(val)
-}
-
-// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
-// the "messaging.kafka.source.partition" semantic conventions. It represents
-// the partition the message is received from.
-func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
- return MessagingKafkaSourcePartitionKey.Int(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// Attributes for Apache RocketMQ
-const (
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqClientIDKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_id" semantic conventions. It represents the
- // unique identifier for each client.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'myhost@8742@s8083jm'
- MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delay time level is not specified.)
- // Stability: stable
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If the message type is delay
- // and delivery timestamp is not specified.)
- // Stability: stable
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
- // Stability: stable
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
-// "messaging.rocketmq.client_id" semantic conventions. It represents the
-// unique identifier for each client.
-func MessagingRocketmqClientID(val string) attribute.KeyValue {
- return MessagingRocketmqClientIDKey.String(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// Semantic conventions for remote procedure calls.
-const (
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCSystemKey = attribute.Key("rpc.system")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Recommended
- // Stability: stable
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
-)
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// Tech-specific attributes for gRPC.
-const (
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
-const (
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // does not specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If other than the default
- // version (`1.0`))
- // Stability: stable
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: ConditionallyRequired (If response is not successful.)
- // Stability: stable
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-)
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// does not specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
deleted file mode 100644
index caf7249..0000000
--- a/vendor/go.opentelemetry.io/otel/trace.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-import (
- "go.opentelemetry.io/otel/internal/global"
- "go.opentelemetry.io/otel/trace"
-)
-
-// Tracer creates a named tracer that implements Tracer interface.
-// If the name is an empty string then provider uses default name.
-//
-// This is short for GetTracerProvider().Tracer(name, opts...)
-func Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
- return GetTracerProvider().Tracer(name, opts...)
-}
-
-// GetTracerProvider returns the registered global trace provider.
-// If none is registered then an instance of NoopTracerProvider is returned.
-//
-// Use the trace provider to create a named tracer. E.g.
-//
-// tracer := otel.GetTracerProvider().Tracer("example.com/foo")
-//
-// or
-//
-// tracer := otel.Tracer("example.com/foo")
-func GetTracerProvider() trace.TracerProvider {
- return global.TracerProvider()
-}
-
-// SetTracerProvider registers `tp` as the global trace provider.
-func SetTracerProvider(tp trace.TracerProvider) {
- global.SetTracerProvider(tp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
deleted file mode 100644
index 3aadc66..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ /dev/null
@@ -1,334 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "time"
-
- "go.opentelemetry.io/otel/attribute"
-)
-
-// TracerConfig is a group of options for a Tracer.
-type TracerConfig struct {
- instrumentationVersion string
- // Schema URL of the telemetry emitted by the Tracer.
- schemaURL string
- attrs attribute.Set
-}
-
-// InstrumentationVersion returns the version of the library providing instrumentation.
-func (t *TracerConfig) InstrumentationVersion() string {
- return t.instrumentationVersion
-}
-
-// InstrumentationAttributes returns the attributes associated with the library
-// providing instrumentation.
-func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
- return t.attrs
-}
-
-// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
-func (t *TracerConfig) SchemaURL() string {
- return t.schemaURL
-}
-
-// NewTracerConfig applies all the options to a returned TracerConfig.
-func NewTracerConfig(options ...TracerOption) TracerConfig {
- var config TracerConfig
- for _, option := range options {
- config = option.apply(config)
- }
- return config
-}
-
-// TracerOption applies an option to a TracerConfig.
-type TracerOption interface {
- apply(TracerConfig) TracerConfig
-}
-
-type tracerOptionFunc func(TracerConfig) TracerConfig
-
-func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig {
- return fn(cfg)
-}
-
-// SpanConfig is a group of options for a Span.
-type SpanConfig struct {
- attributes []attribute.KeyValue
- timestamp time.Time
- links []Link
- newRoot bool
- spanKind SpanKind
- stackTrace bool
-}
-
-// Attributes describe the associated qualities of a Span.
-func (cfg *SpanConfig) Attributes() []attribute.KeyValue {
- return cfg.attributes
-}
-
-// Timestamp is a time in a Span life-cycle.
-func (cfg *SpanConfig) Timestamp() time.Time {
- return cfg.timestamp
-}
-
-// StackTrace checks whether stack trace capturing is enabled.
-func (cfg *SpanConfig) StackTrace() bool {
- return cfg.stackTrace
-}
-
-// Links are the associations a Span has with other Spans.
-func (cfg *SpanConfig) Links() []Link {
- return cfg.links
-}
-
-// NewRoot identifies a Span as the root Span for a new trace. This is
-// commonly used when an existing trace crosses trust boundaries and the
-// remote parent span context should be ignored for security.
-func (cfg *SpanConfig) NewRoot() bool {
- return cfg.newRoot
-}
-
-// SpanKind is the role a Span has in a trace.
-func (cfg *SpanConfig) SpanKind() SpanKind {
- return cfg.spanKind
-}
-
-// NewSpanStartConfig applies all the options to a returned SpanConfig.
-// No validation is performed on the returned SpanConfig (e.g. no uniqueness
-// checking or bounding of data), it is left to the SDK to perform this
-// action.
-func NewSpanStartConfig(options ...SpanStartOption) SpanConfig {
- var c SpanConfig
- for _, option := range options {
- c = option.applySpanStart(c)
- }
- return c
-}
-
-// NewSpanEndConfig applies all the options to a returned SpanConfig.
-// No validation is performed on the returned SpanConfig (e.g. no uniqueness
-// checking or bounding of data), it is left to the SDK to perform this
-// action.
-func NewSpanEndConfig(options ...SpanEndOption) SpanConfig {
- var c SpanConfig
- for _, option := range options {
- c = option.applySpanEnd(c)
- }
- return c
-}
-
-// SpanStartOption applies an option to a SpanConfig. These options are applicable
-// only when the span is created.
-type SpanStartOption interface {
- applySpanStart(SpanConfig) SpanConfig
-}
-
-type spanOptionFunc func(SpanConfig) SpanConfig
-
-func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig {
- return fn(cfg)
-}
-
-// SpanEndOption applies an option to a SpanConfig. These options are
-// applicable only when the span is ended.
-type SpanEndOption interface {
- applySpanEnd(SpanConfig) SpanConfig
-}
-
-// EventConfig is a group of options for an Event.
-type EventConfig struct {
- attributes []attribute.KeyValue
- timestamp time.Time
- stackTrace bool
-}
-
-// Attributes describe the associated qualities of an Event.
-func (cfg *EventConfig) Attributes() []attribute.KeyValue {
- return cfg.attributes
-}
-
-// Timestamp is a time in an Event life-cycle.
-func (cfg *EventConfig) Timestamp() time.Time {
- return cfg.timestamp
-}
-
-// StackTrace checks whether stack trace capturing is enabled.
-func (cfg *EventConfig) StackTrace() bool {
- return cfg.stackTrace
-}
-
-// NewEventConfig applies all the EventOptions to a returned EventConfig. If no
-// timestamp option is passed, the returned EventConfig will have a Timestamp
-// set to the call time, otherwise no validation is performed on the returned
-// EventConfig.
-func NewEventConfig(options ...EventOption) EventConfig {
- var c EventConfig
- for _, option := range options {
- c = option.applyEvent(c)
- }
- if c.timestamp.IsZero() {
- c.timestamp = time.Now()
- }
- return c
-}
-
-// EventOption applies span event options to an EventConfig.
-type EventOption interface {
- applyEvent(EventConfig) EventConfig
-}
-
-// SpanOption are options that can be used at both the beginning and end of a span.
-type SpanOption interface {
- SpanStartOption
- SpanEndOption
-}
-
-// SpanStartEventOption are options that can be used at the start of a span, or with an event.
-type SpanStartEventOption interface {
- SpanStartOption
- EventOption
-}
-
-// SpanEndEventOption are options that can be used at the end of a span, or with an event.
-type SpanEndEventOption interface {
- SpanEndOption
- EventOption
-}
-
-type attributeOption []attribute.KeyValue
-
-func (o attributeOption) applySpan(c SpanConfig) SpanConfig {
- c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
- return c
-}
-func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
-func (o attributeOption) applyEvent(c EventConfig) EventConfig {
- c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
- return c
-}
-
-var _ SpanStartEventOption = attributeOption{}
-
-// WithAttributes adds the attributes related to a span life-cycle event.
-// These attributes are used to describe the work a Span represents when this
-// option is provided to a Span's start or end events. Otherwise, these
-// attributes provide additional information about the event being recorded
-// (e.g. error, state change, processing progress, system event).
-//
-// If multiple of these options are passed the attributes of each successive
-// option will extend the attributes instead of overwriting. There is no
-// guarantee of uniqueness in the resulting attributes.
-func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption {
- return attributeOption(attributes)
-}
-
-// SpanEventOption are options that can be used with an event or a span.
-type SpanEventOption interface {
- SpanOption
- EventOption
-}
-
-type timestampOption time.Time
-
-func (o timestampOption) applySpan(c SpanConfig) SpanConfig {
- c.timestamp = time.Time(o)
- return c
-}
-func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
-func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
-func (o timestampOption) applyEvent(c EventConfig) EventConfig {
- c.timestamp = time.Time(o)
- return c
-}
-
-var _ SpanEventOption = timestampOption{}
-
-// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g.
-// started, stopped, errored).
-func WithTimestamp(t time.Time) SpanEventOption {
- return timestampOption(t)
-}
-
-type stackTraceOption bool
-
-func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
- c.stackTrace = bool(o)
- return c
-}
-
-func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
- c.stackTrace = bool(o)
- return c
-}
-func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
-
-// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false).
-func WithStackTrace(b bool) SpanEndEventOption {
- return stackTraceOption(b)
-}
-
-// WithLinks adds links to a Span. The links are added to the existing Span
-// links, i.e. this does not overwrite. Links with invalid span context are ignored.
-func WithLinks(links ...Link) SpanStartOption {
- return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
- cfg.links = append(cfg.links, links...)
- return cfg
- })
-}
-
-// WithNewRoot specifies that the Span should be treated as a root Span. Any
-// existing parent span context will be ignored when defining the Span's trace
-// identifiers.
-func WithNewRoot() SpanStartOption {
- return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
- cfg.newRoot = true
- return cfg
- })
-}
-
-// WithSpanKind sets the SpanKind of a Span.
-func WithSpanKind(kind SpanKind) SpanStartOption {
- return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
- cfg.spanKind = kind
- return cfg
- })
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) TracerOption {
- return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
- cfg.instrumentationVersion = version
- return cfg
- })
-}
-
-// WithInstrumentationAttributes sets the instrumentation attributes.
-//
-// The passed attributes will be de-duplicated.
-func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
- return tracerOptionFunc(func(config TracerConfig) TracerConfig {
- config.attrs = attribute.NewSet(attr...)
- return config
- })
-}
-
-// WithSchemaURL sets the schema URL for the Tracer.
-func WithSchemaURL(schemaURL string) TracerOption {
- return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
- cfg.schemaURL = schemaURL
- return cfg
- })
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
deleted file mode 100644
index 76f9a08..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/context.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import "context"
-
-type traceContextKeyType int
-
-const currentSpanKey traceContextKeyType = iota
-
-// ContextWithSpan returns a copy of parent with span set as the current Span.
-func ContextWithSpan(parent context.Context, span Span) context.Context {
- return context.WithValue(parent, currentSpanKey, span)
-}
-
-// ContextWithSpanContext returns a copy of parent with sc as the current
-// Span. The Span implementation that wraps sc is non-recording and performs
-// no operations other than to return sc as the SpanContext from the
-// SpanContext method.
-func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
- return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
-}
-
-// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly
-// as a remote SpanContext and as the current Span. The Span implementation
-// that wraps rsc is non-recording and performs no operations other than to
-// return rsc as the SpanContext from the SpanContext method.
-func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context {
- return ContextWithSpanContext(parent, rsc.WithRemote(true))
-}
-
-// SpanFromContext returns the current Span from ctx.
-//
-// If no Span is currently set in ctx an implementation of a Span that
-// performs no operations is returned.
-func SpanFromContext(ctx context.Context) Span {
- if ctx == nil {
- return noopSpan{}
- }
- if span, ok := ctx.Value(currentSpanKey).(Span); ok {
- return span
- }
- return noopSpan{}
-}
-
-// SpanContextFromContext returns the current Span's SpanContext.
-func SpanContextFromContext(ctx context.Context) SpanContext {
- return SpanFromContext(ctx).SpanContext()
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
deleted file mode 100644
index 440f3d7..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package trace provides an implementation of the tracing part of the
-OpenTelemetry API.
-
-To participate in distributed traces a Span needs to be created for the
-operation being performed as part of a traced workflow. In its simplest form:
-
- var tracer trace.Tracer
-
- func init() {
- tracer = otel.Tracer("instrumentation/package/name")
- }
-
- func operation(ctx context.Context) {
- var span trace.Span
- ctx, span = tracer.Start(ctx, "operation")
- defer span.End()
- // ...
- }
-
-A Tracer is unique to the instrumentation and is used to create Spans.
-Instrumentation should be designed to accept a TracerProvider from which it
-can create its own unique Tracer. Alternatively, the registered global
-TracerProvider from the go.opentelemetry.io/otel package can be used as
-a default.
-
- const (
- name = "instrumentation/package/name"
- version = "0.1.0"
- )
-
- type Instrumentation struct {
- tracer trace.Tracer
- }
-
- func NewInstrumentation(tp trace.TracerProvider) *Instrumentation {
- if tp == nil {
- tp = otel.TracerProvider()
- }
- return &Instrumentation{
- tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)),
- }
- }
-
- func operation(ctx context.Context, inst *Instrumentation) {
- var span trace.Span
- ctx, span = inst.tracer.Start(ctx, "operation")
- defer span.End()
- // ...
- }
-
-# API Implementations
-
-This package does not conform to the standard Go versioning policy; all of its
-interfaces may have methods added to them without a package major version bump.
-This non-standard API evolution could surprise an uninformed implementation
-author. They could unknowingly build their implementation in a way that would
-result in a runtime panic for their users that update to the new API.
-
-The API is designed to help inform an instrumentation author about this
-non-standard API evolution. It requires them to choose a default behavior for
-unimplemented interface methods. There are three behavior choices they can
-make:
-
- - Compilation failure
- - Panic
- - Default to another implementation
-
-All interfaces in this API embed a corresponding interface from
-[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
-behavior of their implementations to be a compilation failure, signaling to
-their users they need to update to the latest version of that implementation,
-they need to embed the corresponding interface from
-[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
-example,
-
- import "go.opentelemetry.io/otel/trace/embedded"
-
- type TracerProvider struct {
- embedded.TracerProvider
- // ...
- }
-
-If an author wants the default behavior of their implementations to panic, they
-can embed the API interface directly.
-
- import "go.opentelemetry.io/otel/trace"
-
- type TracerProvider struct {
- trace.TracerProvider
- // ...
- }
-
-This option is not recommended. It will lead to publishing packages that
-contain runtime panics when users update to newer versions of
-[go.opentelemetry.io/otel/trace], which may be done with a trasitive
-dependency.
-
-Finally, an author can embed another implementation in theirs. The embedded
-implementation will be used for methods not defined by the author. For example,
-an author who wants to default to silently dropping the call can use
-[go.opentelemetry.io/otel/trace/noop]:
-
- import "go.opentelemetry.io/otel/trace/noop"
-
- type TracerProvider struct {
- noop.TracerProvider
- // ...
- }
-
-It is strongly recommended that authors only embed
-[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
-That implementation is the only one OpenTelemetry authors can guarantee will
-fully implement all the API interfaces when a user updates their API.
-*/
-package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
deleted file mode 100644
index 898db5a..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package embedded provides interfaces embedded within the [OpenTelemetry
-// trace API].
-//
-// Implementers of the [OpenTelemetry trace API] can embed the relevant type
-// from this package into their implementation directly. Doing so will result
-// in a compilation error for users when the [OpenTelemetry trace API] is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-//
-// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
-package embedded // import "go.opentelemetry.io/otel/trace/embedded"
-
-// TracerProvider is embedded in
-// [go.opentelemetry.io/otel/trace.TracerProvider].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
-// experience a compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
-// interface is extended (which is something that can happen without a major
-// version bump of the API package).
-type TracerProvider interface{ tracerProvider() }
-
-// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
-// is extended (which is something that can happen without a major version bump
-// of the API package).
-type Tracer interface{ tracer() }
-
-// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
-//
-// Embed this interface in your implementation of the
-// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
-// compilation error, signaling they need to update to your latest
-// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
-// extended (which is something that can happen without a major version bump of
-// the API package).
-type Span interface{ span() }
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
deleted file mode 100644
index 88fcb81..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-// nonRecordingSpan is a minimal implementation of a Span that wraps a
-// SpanContext. It performs no operations other than to return the wrapped
-// SpanContext.
-type nonRecordingSpan struct {
- noopSpan
-
- sc SpanContext
-}
-
-// SpanContext returns the wrapped SpanContext.
-func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
deleted file mode 100644
index c125491..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-// NewNoopTracerProvider returns an implementation of TracerProvider that
-// performs no operations. The Tracer and Spans created from the returned
-// TracerProvider also perform no operations.
-//
-// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
-// instead.
-func NewNoopTracerProvider() TracerProvider {
- return noopTracerProvider{}
-}
-
-type noopTracerProvider struct{ embedded.TracerProvider }
-
-var _ TracerProvider = noopTracerProvider{}
-
-// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
- return noopTracer{}
-}
-
-// noopTracer is an implementation of Tracer that performs no operations.
-type noopTracer struct{ embedded.Tracer }
-
-var _ Tracer = noopTracer{}
-
-// Start carries forward a non-recording Span, if one is present in the context, otherwise it
-// creates a no-op Span.
-func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
- span := SpanFromContext(ctx)
- if _, ok := span.(nonRecordingSpan); !ok {
- // span is likely already a noopSpan, but let's be sure
- span = noopSpan{}
- }
- return ContextWithSpan(ctx, span), span
-}
-
-// noopSpan is an implementation of Span that performs no operations.
-type noopSpan struct{ embedded.Span }
-
-var _ Span = noopSpan{}
-
-// SpanContext returns an empty span context.
-func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
-
-// IsRecording always returns false.
-func (noopSpan) IsRecording() bool { return false }
-
-// SetStatus does nothing.
-func (noopSpan) SetStatus(codes.Code, string) {}
-
-// SetError does nothing.
-func (noopSpan) SetError(bool) {}
-
-// SetAttributes does nothing.
-func (noopSpan) SetAttributes(...attribute.KeyValue) {}
-
-// End does nothing.
-func (noopSpan) End(...SpanEndOption) {}
-
-// RecordError does nothing.
-func (noopSpan) RecordError(error, ...EventOption) {}
-
-// AddEvent does nothing.
-func (noopSpan) AddEvent(string, ...EventOption) {}
-
-// SetName does nothing.
-func (noopSpan) SetName(string) {}
-
-// TracerProvider returns a no-op TracerProvider.
-func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
deleted file mode 100644
index 26a4b22..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ /dev/null
@@ -1,577 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "bytes"
- "context"
- "encoding/hex"
- "encoding/json"
-
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/trace/embedded"
-)
-
-const (
- // FlagsSampled is a bitmask with the sampled bit set. A SpanContext
- // with the sampling bit set means the span is sampled.
- FlagsSampled = TraceFlags(0x01)
-
- errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
-
- errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
- errNilTraceID errorConst = "trace-id can't be all zero"
-
- errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
- errNilSpanID errorConst = "span-id can't be all zero"
-)
-
-type errorConst string
-
-func (e errorConst) Error() string {
- return string(e)
-}
-
-// TraceID is a unique identity of a trace.
-// nolint:revive // revive complains about stutter of `trace.TraceID`.
-type TraceID [16]byte
-
-var (
- nilTraceID TraceID
- _ json.Marshaler = nilTraceID
-)
-
-// IsValid checks whether the trace TraceID is valid. A valid trace ID does
-// not consist of zeros only.
-func (t TraceID) IsValid() bool {
- return !bytes.Equal(t[:], nilTraceID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode TraceID
-// as a hex string.
-func (t TraceID) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
-}
-
-// String returns the hex string representation form of a TraceID.
-func (t TraceID) String() string {
- return hex.EncodeToString(t[:])
-}
-
-// SpanID is a unique identity of a span in a trace.
-type SpanID [8]byte
-
-var (
- nilSpanID SpanID
- _ json.Marshaler = nilSpanID
-)
-
-// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
-// of zeros only.
-func (s SpanID) IsValid() bool {
- return !bytes.Equal(s[:], nilSpanID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode SpanID
-// as a hex string.
-func (s SpanID) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
-}
-
-// String returns the hex string representation form of a SpanID.
-func (s SpanID) String() string {
- return hex.EncodeToString(s[:])
-}
-
-// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
-// the W3C trace-context specification. See more at
-// https://www.w3.org/TR/trace-context/#trace-id
-// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
-func TraceIDFromHex(h string) (TraceID, error) {
- t := TraceID{}
- if len(h) != 32 {
- return t, errInvalidTraceIDLength
- }
-
- if err := decodeHex(h, t[:]); err != nil {
- return t, err
- }
-
- if !t.IsValid() {
- return t, errNilTraceID
- }
- return t, nil
-}
-
-// SpanIDFromHex returns a SpanID from a hex string if it is compliant
-// with the w3c trace-context specification.
-// See more at https://www.w3.org/TR/trace-context/#parent-id
-func SpanIDFromHex(h string) (SpanID, error) {
- s := SpanID{}
- if len(h) != 16 {
- return s, errInvalidSpanIDLength
- }
-
- if err := decodeHex(h, s[:]); err != nil {
- return s, err
- }
-
- if !s.IsValid() {
- return s, errNilSpanID
- }
- return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
- for _, r := range h {
- switch {
- case 'a' <= r && r <= 'f':
- continue
- case '0' <= r && r <= '9':
- continue
- default:
- return errInvalidHexID
- }
- }
-
- decoded, err := hex.DecodeString(h)
- if err != nil {
- return err
- }
-
- copy(b, decoded)
- return nil
-}
-
-// TraceFlags contains flags that can be set on a SpanContext.
-type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
-
-// IsSampled returns if the sampling bit is set in the TraceFlags.
-func (tf TraceFlags) IsSampled() bool {
- return tf&FlagsSampled == FlagsSampled
-}
-
-// WithSampled sets the sampling bit in a new copy of the TraceFlags.
-func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag.
- if sampled {
- return tf | FlagsSampled
- }
-
- return tf &^ FlagsSampled
-}
-
-// MarshalJSON implements a custom marshal function to encode TraceFlags
-// as a hex string.
-func (tf TraceFlags) MarshalJSON() ([]byte, error) {
- return json.Marshal(tf.String())
-}
-
-// String returns the hex string representation form of TraceFlags.
-func (tf TraceFlags) String() string {
- return hex.EncodeToString([]byte{byte(tf)}[:])
-}
-
-// SpanContextConfig contains mutable fields usable for constructing
-// an immutable SpanContext.
-type SpanContextConfig struct {
- TraceID TraceID
- SpanID SpanID
- TraceFlags TraceFlags
- TraceState TraceState
- Remote bool
-}
-
-// NewSpanContext constructs a SpanContext using values from the provided
-// SpanContextConfig.
-func NewSpanContext(config SpanContextConfig) SpanContext {
- return SpanContext{
- traceID: config.TraceID,
- spanID: config.SpanID,
- traceFlags: config.TraceFlags,
- traceState: config.TraceState,
- remote: config.Remote,
- }
-}
-
-// SpanContext contains identifying trace information about a Span.
-type SpanContext struct {
- traceID TraceID
- spanID SpanID
- traceFlags TraceFlags
- traceState TraceState
- remote bool
-}
-
-var _ json.Marshaler = SpanContext{}
-
-// IsValid returns if the SpanContext is valid. A valid span context has a
-// valid TraceID and SpanID.
-func (sc SpanContext) IsValid() bool {
- return sc.HasTraceID() && sc.HasSpanID()
-}
-
-// IsRemote indicates whether the SpanContext represents a remotely-created Span.
-func (sc SpanContext) IsRemote() bool {
- return sc.remote
-}
-
-// WithRemote returns a copy of sc with the Remote property set to remote.
-func (sc SpanContext) WithRemote(remote bool) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: sc.spanID,
- traceFlags: sc.traceFlags,
- traceState: sc.traceState,
- remote: remote,
- }
-}
-
-// TraceID returns the TraceID from the SpanContext.
-func (sc SpanContext) TraceID() TraceID {
- return sc.traceID
-}
-
-// HasTraceID checks if the SpanContext has a valid TraceID.
-func (sc SpanContext) HasTraceID() bool {
- return sc.traceID.IsValid()
-}
-
-// WithTraceID returns a new SpanContext with the TraceID replaced.
-func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext {
- return SpanContext{
- traceID: traceID,
- spanID: sc.spanID,
- traceFlags: sc.traceFlags,
- traceState: sc.traceState,
- remote: sc.remote,
- }
-}
-
-// SpanID returns the SpanID from the SpanContext.
-func (sc SpanContext) SpanID() SpanID {
- return sc.spanID
-}
-
-// HasSpanID checks if the SpanContext has a valid SpanID.
-func (sc SpanContext) HasSpanID() bool {
- return sc.spanID.IsValid()
-}
-
-// WithSpanID returns a new SpanContext with the SpanID replaced.
-func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: spanID,
- traceFlags: sc.traceFlags,
- traceState: sc.traceState,
- remote: sc.remote,
- }
-}
-
-// TraceFlags returns the flags from the SpanContext.
-func (sc SpanContext) TraceFlags() TraceFlags {
- return sc.traceFlags
-}
-
-// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
-func (sc SpanContext) IsSampled() bool {
- return sc.traceFlags.IsSampled()
-}
-
-// WithTraceFlags returns a new SpanContext with the TraceFlags replaced.
-func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: sc.spanID,
- traceFlags: flags,
- traceState: sc.traceState,
- remote: sc.remote,
- }
-}
-
-// TraceState returns the TraceState from the SpanContext.
-func (sc SpanContext) TraceState() TraceState {
- return sc.traceState
-}
-
-// WithTraceState returns a new SpanContext with the TraceState replaced.
-func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
- return SpanContext{
- traceID: sc.traceID,
- spanID: sc.spanID,
- traceFlags: sc.traceFlags,
- traceState: state,
- remote: sc.remote,
- }
-}
-
-// Equal is a predicate that determines whether two SpanContext values are equal.
-func (sc SpanContext) Equal(other SpanContext) bool {
- return sc.traceID == other.traceID &&
- sc.spanID == other.spanID &&
- sc.traceFlags == other.traceFlags &&
- sc.traceState.String() == other.traceState.String() &&
- sc.remote == other.remote
-}
-
-// MarshalJSON implements a custom marshal function to encode a SpanContext.
-func (sc SpanContext) MarshalJSON() ([]byte, error) {
- return json.Marshal(SpanContextConfig{
- TraceID: sc.traceID,
- SpanID: sc.spanID,
- TraceFlags: sc.traceFlags,
- TraceState: sc.traceState,
- Remote: sc.remote,
- })
-}
-
-// Span is the individual component of a trace. It represents a single named
-// and timed operation of a workflow that is traced. A Tracer is used to
-// create a Span and it is then up to the operation the Span represents to
-// properly end the Span when the operation itself ends.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Span interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Span
-
- // End completes the Span. The Span is considered complete and ready to be
- // delivered through the rest of the telemetry pipeline after this method
- // is called. Therefore, updates to the Span are not allowed after this
- // method has been called.
- End(options ...SpanEndOption)
-
- // AddEvent adds an event with the provided name and options.
- AddEvent(name string, options ...EventOption)
-
- // IsRecording returns the recording state of the Span. It will return
- // true if the Span is active and events can be recorded.
- IsRecording() bool
-
- // RecordError will record err as an exception span event for this span. An
- // additional call to SetStatus is required if the Status of the Span should
- // be set to Error, as this method does not change the Span status. If this
- // span is not being recorded or err is nil then this method does nothing.
- RecordError(err error, options ...EventOption)
-
- // SpanContext returns the SpanContext of the Span. The returned SpanContext
- // is usable even after the End method has been called for the Span.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the Span in the form of a code and a
- // description, provided the status hasn't already been set to a higher
- // value before (OK > Error > Unset). The description is only included in a
- // status when the code is for an error.
- SetStatus(code codes.Code, description string)
-
- // SetName sets the Span name.
- SetName(name string)
-
- // SetAttributes sets kv as attributes of the Span. If a key from kv
- // already exists for an attribute of the Span it will be overwritten with
- // the value contained in kv.
- SetAttributes(kv ...attribute.KeyValue)
-
- // TracerProvider returns a TracerProvider that can be used to generate
- // additional Spans on the same telemetry pipeline as the current Span.
- TracerProvider() TracerProvider
-}
-
-// Link is the relationship between two Spans. The relationship can be within
-// the same Trace or across different Traces.
-//
-// For example, a Link is used in the following situations:
-//
-// 1. Batch Processing: A batch of operations may contain operations
-// associated with one or more traces/spans. Since there can only be one
-// parent SpanContext, a Link is used to keep reference to the
-// SpanContext of all operations in the batch.
-// 2. Public Endpoint: A SpanContext for an in incoming client request on a
-// public endpoint should be considered untrusted. In such a case, a new
-// trace with its own identity and sampling decision needs to be created,
-// but this new trace needs to be related to the original trace in some
-// form. A Link is used to keep reference to the original SpanContext and
-// track the relationship.
-type Link struct {
- // SpanContext of the linked Span.
- SpanContext SpanContext
-
- // Attributes describe the aspects of the link.
- Attributes []attribute.KeyValue
-}
-
-// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx.
-func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
- return Link{
- SpanContext: SpanContextFromContext(ctx),
- Attributes: attrs,
- }
-}
-
-// SpanKind is the role a Span plays in a Trace.
-type SpanKind int
-
-// As a convenience, these match the proto definition, see
-// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
-//
-// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
-// to coerce a span kind to a valid value.
-const (
- // SpanKindUnspecified is an unspecified SpanKind and is not a valid
- // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
- // if it is received.
- SpanKindUnspecified SpanKind = 0
- // SpanKindInternal is a SpanKind for a Span that represents an internal
- // operation within an application.
- SpanKindInternal SpanKind = 1
- // SpanKindServer is a SpanKind for a Span that represents the operation
- // of handling a request from a client.
- SpanKindServer SpanKind = 2
- // SpanKindClient is a SpanKind for a Span that represents the operation
- // of client making a request to a server.
- SpanKindClient SpanKind = 3
- // SpanKindProducer is a SpanKind for a Span that represents the operation
- // of a producer sending a message to a message broker. Unlike
- // SpanKindClient and SpanKindServer, there is often no direct
- // relationship between this kind of Span and a SpanKindConsumer kind. A
- // SpanKindProducer Span will end once the message is accepted by the
- // message broker which might not overlap with the processing of that
- // message.
- SpanKindProducer SpanKind = 4
- // SpanKindConsumer is a SpanKind for a Span that represents the operation
- // of a consumer receiving a message from a message broker. Like
- // SpanKindProducer Spans, there is often no direct relationship between
- // this Span and the Span that produced the message.
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
-
-// Tracer is the creator of Spans.
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type Tracer interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.Tracer
-
- // Start creates a span and a context.Context containing the newly-created span.
- //
- // If the context.Context provided in `ctx` contains a Span then the newly-created
- // Span will be a child of that span, otherwise it will be a root span. This behavior
- // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
- // newly-created Span to be a root span even if `ctx` contains a Span.
- //
- // When creating a Span it is recommended to provide all known span attributes using
- // the `WithAttributes()` SpanOption as samplers will only have access to the
- // attributes provided when a Span is created.
- //
- // Any Span that is created MUST also be ended. This is the responsibility of the user.
- // Implementations of this API may leak memory or other resources if Spans are not ended.
- Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
-}
-
-// TracerProvider provides Tracers that are used by instrumentation code to
-// trace computational workflows.
-//
-// A TracerProvider is the collection destination of all Spans from Tracers it
-// provides, it represents a unique telemetry collection pipeline. How that
-// pipeline is defined, meaning how those Spans are collected, processed, and
-// where they are exported, depends on its implementation. Instrumentation
-// authors do not need to define this implementation, rather just use the
-// provided Tracers to instrument code.
-//
-// Commonly, instrumentation code will accept a TracerProvider implementation
-// at runtime from its users or it can simply use the globally registered one
-// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
-//
-// Warning: Methods may be added to this interface in minor releases. See
-// package documentation on API implementation for information on how to set
-// default behavior for unimplemented methods.
-type TracerProvider interface {
- // Users of the interface can ignore this. This embedded type is only used
- // by implementations of this interface. See the "API Implementations"
- // section of the package documentation for more information.
- embedded.TracerProvider
-
- // Tracer returns a unique Tracer scoped to be used by instrumentation code
- // to trace computational workflows. The scope and identity of that
- // instrumentation code is uniquely defined by the name and options passed.
- //
- // The passed name needs to uniquely identify instrumentation code.
- // Therefore, it is recommended that name is the Go package name of the
- // library providing instrumentation (note: not the code being
- // instrumented). Instrumentation libraries can have multiple versions,
- // therefore, the WithInstrumentationVersion option should be used to
- // distinguish these different codebases. Additionally, instrumentation
- // libraries may sometimes use traces to communicate different domains of
- // workflow data (i.e. using spans to communicate workflow events only). If
- // this is the case, the WithScopeAttributes option should be used to
- // uniquely identify Tracers that handle the different domains of workflow
- // data.
- //
- // If the same name and options are passed multiple times, the same Tracer
- // will be returned (it is up to the implementation if this will be the
- // same underlying instance of that Tracer or not). It is not necessary to
- // call this multiple times with the same name and options to get an
- // up-to-date Tracer. All implementations will ensure any TracerProvider
- // configuration changes are propagated to all provided Tracers.
- //
- // If name is empty, then an implementation defined default name will be
- // used instead.
- //
- // This method is safe to call concurrently.
- Tracer(name string, options ...TracerOption) Tracer
-}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
deleted file mode 100644
index d1e47ca..0000000
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace // import "go.opentelemetry.io/otel/trace"
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strings"
-)
-
-const (
- maxListMembers = 32
-
- listDelimiter = ","
-
- // based on the W3C Trace Context specification, see
- // https://www.w3.org/TR/trace-context-1/#tracestate-header
- noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*`
- withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*`
- valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
-
- errInvalidKey errorConst = "invalid tracestate key"
- errInvalidValue errorConst = "invalid tracestate value"
- errInvalidMember errorConst = "invalid tracestate list-member"
- errMemberNumber errorConst = "too many list-members in tracestate"
- errDuplicate errorConst = "duplicate list-member in tracestate"
-)
-
-var (
- noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`)
- withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`)
- valueRe = regexp.MustCompile(`^` + valueFormat + `$`)
- memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
-)
-
-type member struct {
- Key string
- Value string
-}
-
-func newMember(key, value string) (member, error) {
- if len(key) > 256 {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
- }
- if !noTenantKeyRe.MatchString(key) {
- if !withTenantKeyRe.MatchString(key) {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
- }
- atIndex := strings.LastIndex(key, "@")
- if atIndex > 241 || len(key)-1-atIndex > 14 {
- return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
- }
- }
- if len(value) > 256 || !valueRe.MatchString(value) {
- return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
- }
- return member{Key: key, Value: value}, nil
-}
-
-func parseMember(m string) (member, error) {
- matches := memberRe.FindStringSubmatch(m)
- if len(matches) != 3 {
- return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
- }
- result, e := newMember(matches[1], matches[2])
- if e != nil {
- return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
- }
- return result, nil
-}
-
-// String encodes member into a string compliant with the W3C Trace Context
-// specification.
-func (m member) String() string {
- return fmt.Sprintf("%s=%s", m.Key, m.Value)
-}
-
-// TraceState provides additional vendor-specific trace identification
-// information across different distributed tracing systems. It represents an
-// immutable list consisting of key/value pairs, each pair is referred to as a
-// list-member.
-//
-// TraceState conforms to the W3C Trace Context specification
-// (https://www.w3.org/TR/trace-context-1). All operations that create or copy
-// a TraceState do so by validating all input and will only produce TraceState
-// that conform to the specification. Specifically, this means that all
-// list-member's key/value pairs are valid, no duplicate list-members exist,
-// and the maximum number of list-members (32) is not exceeded.
-type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState`
- // list is the members in order.
- list []member
-}
-
-var _ json.Marshaler = TraceState{}
-
-// ParseTraceState attempts to decode a TraceState from the passed
-// string. It returns an error if the input is invalid according to the W3C
-// Trace Context specification.
-func ParseTraceState(tracestate string) (TraceState, error) {
- if tracestate == "" {
- return TraceState{}, nil
- }
-
- wrapErr := func(err error) error {
- return fmt.Errorf("failed to parse tracestate: %w", err)
- }
-
- var members []member
- found := make(map[string]struct{})
- for _, memberStr := range strings.Split(tracestate, listDelimiter) {
- if len(memberStr) == 0 {
- continue
- }
-
- m, err := parseMember(memberStr)
- if err != nil {
- return TraceState{}, wrapErr(err)
- }
-
- if _, ok := found[m.Key]; ok {
- return TraceState{}, wrapErr(errDuplicate)
- }
- found[m.Key] = struct{}{}
-
- members = append(members, m)
- if n := len(members); n > maxListMembers {
- return TraceState{}, wrapErr(errMemberNumber)
- }
- }
-
- return TraceState{list: members}, nil
-}
-
-// MarshalJSON marshals the TraceState into JSON.
-func (ts TraceState) MarshalJSON() ([]byte, error) {
- return json.Marshal(ts.String())
-}
-
-// String encodes the TraceState into a string compliant with the W3C
-// Trace Context specification. The returned string will be invalid if the
-// TraceState contains any invalid members.
-func (ts TraceState) String() string {
- members := make([]string, len(ts.list))
- for i, m := range ts.list {
- members[i] = m.String()
- }
- return strings.Join(members, listDelimiter)
-}
-
-// Get returns the value paired with key from the corresponding TraceState
-// list-member if it exists, otherwise an empty string is returned.
-func (ts TraceState) Get(key string) string {
- for _, member := range ts.list {
- if member.Key == key {
- return member.Value
- }
- }
-
- return ""
-}
-
-// Insert adds a new list-member defined by the key/value pair to the
-// TraceState. If a list-member already exists for the given key, that
-// list-member's value is updated. The new or updated list-member is always
-// moved to the beginning of the TraceState as specified by the W3C Trace
-// Context specification.
-//
-// If key or value are invalid according to the W3C Trace Context
-// specification an error is returned with the original TraceState.
-//
-// If adding a new list-member means the TraceState would have more members
-// then is allowed, the new list-member will be inserted and the right-most
-// list-member will be dropped in the returned TraceState.
-func (ts TraceState) Insert(key, value string) (TraceState, error) {
- m, err := newMember(key, value)
- if err != nil {
- return ts, err
- }
-
- cTS := ts.Delete(key)
- if cTS.Len()+1 <= maxListMembers {
- cTS.list = append(cTS.list, member{})
- }
- // When the number of members exceeds capacity, drop the "right-most".
- copy(cTS.list[1:], cTS.list)
- cTS.list[0] = m
-
- return cTS, nil
-}
-
-// Delete returns a copy of the TraceState with the list-member identified by
-// key removed.
-func (ts TraceState) Delete(key string) TraceState {
- members := make([]member, ts.Len())
- copy(members, ts.list)
- for i, member := range ts.list {
- if member.Key == key {
- members = append(members[:i], members[i+1:]...)
- // TraceState should contain no duplicate members.
- break
- }
- }
- return TraceState{list: members}
-}
-
-// Len returns the number of list-members in the TraceState.
-func (ts TraceState) Len() int {
- return len(ts.list)
-}
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
deleted file mode 100644
index dbb61a4..0000000
--- a/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-cd $(dirname $0)
-TOOLS_DIR=$(pwd)/.tools
-
-if [ -z "${GOPATH}" ] ; then
- printf "GOPATH is not defined.\n"
- exit -1
-fi
-
-if [ ! -d "${GOPATH}" ] ; then
- printf "GOPATH ${GOPATH} is invalid \n"
- exit -1
-fi
-
-# Pre-requisites
-if ! git diff --quiet; then \
- git status
- printf "\n\nError: working tree is not clean\n"
- exit -1
-fi
-
-if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
- printf "$(git log -1)"
- printf "\n\nError: HEAD is not pointing to a tagged version"
-fi
-
-make ${TOOLS_DIR}/gojq
-
-DIR_TMP="${GOPATH}/src/oteltmp/"
-rm -rf $DIR_TMP
-mkdir -p $DIR_TMP
-
-printf "Copy examples to ${DIR_TMP}\n"
-cp -a ./example ${DIR_TMP}
-
-# Update go.mod files
-printf "Update go.mod: rename module and remove replace\n"
-
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
- printf " Update go.mod for $dir\n"
- (cd "${DIR_TMP}/${dir}" && \
- # replaces is ("mod1" "mod2" …)
- replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
- # strip double quotes
- replaces=("${replaces[@]%\"}") && \
- replaces=("${replaces[@]#\"}") && \
- # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
- dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
- go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
- go mod tidy)
-done
-printf "Update done:\n\n"
-
-# Build directories that contain main package. These directories are different than
-# directories that contain go.mod files.
-printf "Build examples:\n"
-EXAMPLES=$(./get_main_pkgs.sh ./example)
-for ex in $EXAMPLES; do
- printf " Build $ex in ${DIR_TMP}/${ex}\n"
- (cd "${DIR_TMP}/${ex}" && \
- go build .)
-done
-
-# Cleanup
-printf "Remove copied files.\n"
-rm -rf $DIR_TMP
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
deleted file mode 100644
index e2f7435..0000000
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel // import "go.opentelemetry.io/otel"
-
-// Version is the current release version of OpenTelemetry in use.
-func Version() string {
- return "1.21.0"
-}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
deleted file mode 100644
index 3c153c9..0000000
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-module-sets:
- stable-v1:
- version: v1.21.0
- modules:
- - go.opentelemetry.io/otel
- - go.opentelemetry.io/otel/bridge/opentracing
- - go.opentelemetry.io/otel/bridge/opentracing/test
- - go.opentelemetry.io/otel/example/dice
- - go.opentelemetry.io/otel/example/namedtracer
- - go.opentelemetry.io/otel/example/otel-collector
- - go.opentelemetry.io/otel/example/passthrough
- - go.opentelemetry.io/otel/example/zipkin
- - go.opentelemetry.io/otel/exporters/otlp/otlptrace
- - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- - go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- - go.opentelemetry.io/otel/exporters/zipkin
- - go.opentelemetry.io/otel/metric
- - go.opentelemetry.io/otel/sdk
- - go.opentelemetry.io/otel/sdk/metric
- - go.opentelemetry.io/otel/trace
- experimental-metrics:
- version: v0.44.0
- modules:
- - go.opentelemetry.io/otel/bridge/opencensus
- - go.opentelemetry.io/otel/bridge/opencensus/test
- - go.opentelemetry.io/otel/example/opencensus
- - go.opentelemetry.io/otel/example/prometheus
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- - go.opentelemetry.io/otel/exporters/prometheus
- - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- experimental-schema:
- version: v0.0.7
- modules:
- - go.opentelemetry.io/otel/schema
-excluded-modules:
- - go.opentelemetry.io/otel/internal/tools
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
new file mode 100644
index 0000000..d33c889
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.13
+
+package poly1305
+
+// Generic fallbacks for the math/bits intrinsics, copied from
+// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
+// variable time fallbacks until Go 1.13.
+
+func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
+ sum = x + y + carry
+ carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
+ return
+}
+
+func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ diff = x - y - borrow
+ borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
+ return
+}
+
+func bitsMul64(x, y uint64) (hi, lo uint64) {
+ const mask32 = 1<<32 - 1
+ x0 := x & mask32
+ x1 := x >> 32
+ y0 := y & mask32
+ y1 := y >> 32
+ w0 := x0 * y0
+ t := x1*y0 + w0>>32
+ w1 := t & mask32
+ w2 := t >> 32
+ w1 += x0 * y1
+ hi = x1*y1 + w2 + w1>>32
+ lo = x * y
+ return
+}
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
new file mode 100644
index 0000000..495c1fa
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.13
+
+package poly1305
+
+import "math/bits"
+
+func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
+ return bits.Add64(x, y, carry)
+}
+
+func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
+ return bits.Sub64(x, y, borrow)
+}
+
+func bitsMul64(x, y uint64) (hi, lo uint64) {
+ return bits.Mul64(x, y)
+}
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
index ec2202b..e041da5 100644
--- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -7,10 +7,7 @@
package poly1305
-import (
- "encoding/binary"
- "math/bits"
-)
+import "encoding/binary"
// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
// for a 64 bytes message is approximately
@@ -117,13 +114,13 @@
}
func mul64(a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
+ hi, lo := bitsMul64(a, b)
return uint128{lo, hi}
}
func add128(a, b uint128) uint128 {
- lo, c := bits.Add64(a.lo, b.lo, 0)
- hi, c := bits.Add64(a.hi, b.hi, c)
+ lo, c := bitsAdd64(a.lo, b.lo, 0)
+ hi, c := bitsAdd64(a.hi, b.hi, c)
if c != 0 {
panic("poly1305: unexpected overflow")
}
@@ -158,8 +155,8 @@
// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
// add 1 to the most significant (2¹²⁸) limb, h2.
if len(msg) >= TagSize {
- h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
- h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
h2 += c + 1
msg = msg[TagSize:]
@@ -168,8 +165,8 @@
copy(buf[:], msg)
buf[len(msg)] = 1
- h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
- h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
+ h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
+ h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
h2 += c
msg = nil
@@ -222,9 +219,9 @@
m3 := h2r1
t0 := m0.lo
- t1, c := bits.Add64(m1.lo, m0.hi, 0)
- t2, c := bits.Add64(m2.lo, m1.hi, c)
- t3, _ := bits.Add64(m3.lo, m2.hi, c)
+ t1, c := bitsAdd64(m1.lo, m0.hi, 0)
+ t2, c := bitsAdd64(m2.lo, m1.hi, c)
+ t3, _ := bitsAdd64(m3.lo, m2.hi, c)
// Now we have the result as 4 64-bit limbs, and we need to reduce it
// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
@@ -246,14 +243,14 @@
// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
- h0, c = bits.Add64(h0, cc.lo, 0)
- h1, c = bits.Add64(h1, cc.hi, c)
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
h2 += c
cc = shiftRightBy2(cc)
- h0, c = bits.Add64(h0, cc.lo, 0)
- h1, c = bits.Add64(h1, cc.hi, c)
+ h0, c = bitsAdd64(h0, cc.lo, 0)
+ h1, c = bitsAdd64(h1, cc.hi, c)
h2 += c
// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
@@ -290,9 +287,9 @@
// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
// result if the subtraction underflows, and t otherwise.
- hMinusP0, b := bits.Sub64(h0, p0, 0)
- hMinusP1, b := bits.Sub64(h1, p1, b)
- _, b = bits.Sub64(h2, p2, b)
+ hMinusP0, b := bitsSub64(h0, p0, 0)
+ hMinusP1, b := bitsSub64(h1, p1, b)
+ _, b = bitsSub64(h2, p2, b)
// h = h if h < p else h - p
h0 = select64(b, h0, hMinusP0)
@@ -304,8 +301,8 @@
//
// by just doing a wide addition with the 128 low bits of h and discarding
// the overflow.
- h0, c := bits.Add64(h0, s[0], 0)
- h1, _ = bits.Add64(h1, s[1], c)
+ h0, c := bitsAdd64(h0, s[0], 0)
+ h1, _ = bitsAdd64(h1, s[1], c)
binary.LittleEndian.PutUint64(out[0:8], h0)
binary.LittleEndian.PutUint64(out[8:16], h1)
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index 02ccd08..12b12a3 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -12,7 +12,6 @@
"os"
"path/filepath"
"runtime"
- "sync"
"time"
"cloud.google.com/go/compute/metadata"
@@ -42,20 +41,12 @@
// running on Google Cloud Platform.
JSON []byte
- udMu sync.Mutex // guards universeDomain
// universeDomain is the default service domain for a given Cloud universe.
universeDomain string
}
// UniverseDomain returns the default service domain for a given Cloud universe.
-//
// The default value is "googleapis.com".
-//
-// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports
-// obtaining the universe domain when authenticating via the GCE metadata server.
-// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the
-// default value when authenticating via the GCE metadata server.
-// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
func (c *Credentials) UniverseDomain() string {
if c.universeDomain == "" {
return universeDomainDefault
@@ -63,55 +54,6 @@
return c.universeDomain
}
-// GetUniverseDomain returns the default service domain for a given Cloud
-// universe.
-//
-// The default value is "googleapis.com".
-//
-// It obtains the universe domain from the attached service account on GCE when
-// authenticating via the GCE metadata server. See also [The attached service
-// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
-// If the GCE metadata server returns a 404 error, the default value is
-// returned. If the GCE metadata server returns an error other than 404, the
-// error is returned.
-func (c *Credentials) GetUniverseDomain() (string, error) {
- c.udMu.Lock()
- defer c.udMu.Unlock()
- if c.universeDomain == "" && metadata.OnGCE() {
- // If we're on Google Compute Engine, an App Engine standard second
- // generation runtime, or App Engine flexible, use the metadata server.
- err := c.computeUniverseDomain()
- if err != nil {
- return "", err
- }
- }
- // If not on Google Compute Engine, or in case of any non-error path in
- // computeUniverseDomain that did not set universeDomain, set the default
- // universe domain.
- if c.universeDomain == "" {
- c.universeDomain = universeDomainDefault
- }
- return c.universeDomain, nil
-}
-
-// computeUniverseDomain fetches the default service domain for a given Cloud
-// universe from Google Compute Engine (GCE)'s metadata server. It's only valid
-// to use this method if your program is running on a GCE instance.
-func (c *Credentials) computeUniverseDomain() error {
- var err error
- c.universeDomain, err = metadata.Get("universe/universe_domain")
- if err != nil {
- if _, ok := err.(metadata.NotDefinedError); ok {
- // http.StatusNotFound (404)
- c.universeDomain = universeDomainDefault
- return nil
- } else {
- return err
- }
- }
- return nil
-}
-
// DefaultCredentials is the old name of Credentials.
//
// Deprecated: use Credentials instead.
@@ -149,12 +91,6 @@
// Note: This option is currently only respected when using credentials
// fetched from the GCE metadata server.
EarlyTokenRefresh time.Duration
-
- // UniverseDomain is the default service domain for a given Cloud universe.
- // Only supported in authentication flows that support universe domains.
- // This value takes precedence over a universe domain explicitly specified
- // in a credentials config file or by the GCE metadata server. Optional.
- UniverseDomain string
}
func (params CredentialsParams) deepCopy() CredentialsParams {
@@ -239,9 +175,8 @@
if metadata.OnGCE() {
id, _ := metadata.ProjectID()
return &Credentials{
- ProjectID: id,
- TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
- universeDomain: params.UniverseDomain,
+ ProjectID: id,
+ TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
}, nil
}
@@ -282,9 +217,6 @@
}
universeDomain := f.UniverseDomain
- if params.UniverseDomain != "" {
- universeDomain = params.UniverseDomain
- }
// Authorized user credentials are only supported in the googleapis.com universe.
if f.Type == userCredentialsKey {
universeDomain = universeDomainDefault
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 948a3ee..b18efb7 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -4,9 +4,6 @@
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
-//
-// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
-// returning errors.
package errgroup
import (
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index c649202..6202638 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -248,7 +248,6 @@
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nf_tables.h>
#include <linux/netlink.h>
#include <linux/net_namespace.h>
#include <linux/nfc.h>
@@ -284,6 +283,10 @@
#include <asm/termbits.h>
#endif
+#ifndef MSG_FASTOPEN
+#define MSG_FASTOPEN 0x20000000
+#endif
+
#ifndef PTRACE_GETREGS
#define PTRACE_GETREGS 0xc
#endif
@@ -292,6 +295,14 @@
#define PTRACE_SETREGS 0xd
#endif
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
+#ifndef SOL_SMC
+#define SOL_SMC 286
+#endif
+
#ifdef SOL_BLUETOOTH
// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
// but it is already in bluetooth_linux.go
@@ -308,23 +319,10 @@
#undef TIPC_WAIT_FOREVER
#define TIPC_WAIT_FOREVER 0xffffffff
-// Copied from linux/netfilter/nf_nat.h
-// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h
-// and netinet/in.h.
-#define NF_NAT_RANGE_MAP_IPS (1 << 0)
-#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1)
-#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
-#define NF_NAT_RANGE_PERSISTENT (1 << 3)
-#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
-#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
-#define NF_NAT_RANGE_NETMAP (1 << 6)
-#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
- (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
-#define NF_NAT_RANGE_MASK \
- (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
- NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
- NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \
- NF_NAT_RANGE_NETMAP)
+// Copied from linux/l2tp.h
+// Including linux/l2tp.h here causes conflicts between linux/in.h
+// and netinet/in.h included via net/route.h above.
+#define IPPROTO_L2TP 115
// Copied from linux/hid.h.
// Keep in sync with the size of the referenced fields.
@@ -605,9 +603,6 @@
$2 ~ /^FSOPT_/ ||
$2 ~ /^WDIO[CFS]_/ ||
$2 ~ /^NFN/ ||
- $2 !~ /^NFT_META_IIFTYPE/ &&
- $2 ~ /^NFT_/ ||
- $2 ~ /^NF_NAT_/ ||
$2 ~ /^XDP_/ ||
$2 ~ /^RWF_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ ||
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index a5d3ff8..c73cfe2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -2127,60 +2127,6 @@
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
- NFT_CHAIN_FLAGS = 0x7
- NFT_CHAIN_MAXNAMELEN = 0x100
- NFT_CT_MAX = 0x17
- NFT_DATA_RESERVED_MASK = 0xffffff00
- NFT_DATA_VALUE_MAXLEN = 0x40
- NFT_EXTHDR_OP_MAX = 0x4
- NFT_FIB_RESULT_MAX = 0x3
- NFT_INNER_MASK = 0xf
- NFT_LOGLEVEL_MAX = 0x8
- NFT_NAME_MAXLEN = 0x100
- NFT_NG_MAX = 0x1
- NFT_OBJECT_CONNLIMIT = 0x5
- NFT_OBJECT_COUNTER = 0x1
- NFT_OBJECT_CT_EXPECT = 0x9
- NFT_OBJECT_CT_HELPER = 0x3
- NFT_OBJECT_CT_TIMEOUT = 0x7
- NFT_OBJECT_LIMIT = 0x4
- NFT_OBJECT_MAX = 0xa
- NFT_OBJECT_QUOTA = 0x2
- NFT_OBJECT_SECMARK = 0x8
- NFT_OBJECT_SYNPROXY = 0xa
- NFT_OBJECT_TUNNEL = 0x6
- NFT_OBJECT_UNSPEC = 0x0
- NFT_OBJ_MAXNAMELEN = 0x100
- NFT_OSF_MAXGENRELEN = 0x10
- NFT_QUEUE_FLAG_BYPASS = 0x1
- NFT_QUEUE_FLAG_CPU_FANOUT = 0x2
- NFT_QUEUE_FLAG_MASK = 0x3
- NFT_REG32_COUNT = 0x10
- NFT_REG32_SIZE = 0x4
- NFT_REG_MAX = 0x4
- NFT_REG_SIZE = 0x10
- NFT_REJECT_ICMPX_MAX = 0x3
- NFT_RT_MAX = 0x4
- NFT_SECMARK_CTX_MAXLEN = 0x100
- NFT_SET_MAXNAMELEN = 0x100
- NFT_SOCKET_MAX = 0x3
- NFT_TABLE_F_MASK = 0x3
- NFT_TABLE_MAXNAMELEN = 0x100
- NFT_TRACETYPE_MAX = 0x3
- NFT_TUNNEL_F_MASK = 0x7
- NFT_TUNNEL_MAX = 0x1
- NFT_TUNNEL_MODE_MAX = 0x2
- NFT_USERDATA_MAXLEN = 0x100
- NFT_XFRM_KEY_MAX = 0x6
- NF_NAT_RANGE_MAP_IPS = 0x1
- NF_NAT_RANGE_MASK = 0x7f
- NF_NAT_RANGE_NETMAP = 0x40
- NF_NAT_RANGE_PERSISTENT = 0x8
- NF_NAT_RANGE_PROTO_OFFSET = 0x20
- NF_NAT_RANGE_PROTO_RANDOM = 0x4
- NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14
- NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10
- NF_NAT_RANGE_PROTO_SPECIFIED = 0x2
NILFS_SUPER_MAGIC = 0x3434
NL0 = 0x0
NL1 = 0x100
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 9dc4241..a1d0615 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 0d3a075..5b2a740 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index c39f777..f6eda13 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 57571d0..55df20a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index e62963e..8c1155c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index 0083135..7cc80c5 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 79029ed..0688737 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -2297,3 +2297,5 @@
var libc_unveil_trampoline_addr uintptr
//go:cgo_import_dynamic libc_unveil unveil "libc.so"
+
+
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index ffb8708..47dc579 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -194,7 +194,6 @@
//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW
//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
//sys SetEndOfFile(handle Handle) (err error)
-//sys SetFileValidData(handle Handle, validDataLength int64) (err error)
//sys GetSystemTimeAsFileTime(time *Filetime)
//sys GetSystemTimePreciseAsFileTime(time *Filetime)
//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff]
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index e8791c8..146a1f0 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -342,7 +342,6 @@
procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories")
procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW")
procSetEndOfFile = modkernel32.NewProc("SetEndOfFile")
- procSetFileValidData = modkernel32.NewProc("SetFileValidData")
procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW")
procSetErrorMode = modkernel32.NewProc("SetErrorMode")
procSetEvent = modkernel32.NewProc("SetEvent")
@@ -2989,14 +2988,6 @@
return
}
-func SetFileValidData(handle Handle, validDataLength int64) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
- if r1 == 0 {
- err = errnoErr(e1)
- }
- return
-}
-
func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
if r1 == 0 {
diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE
new file mode 100644
index 0000000..e4a47e1
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2019 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README
new file mode 100644
index 0000000..aac7867
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/README
@@ -0,0 +1,2 @@
+This repository holds the transition packages for the new Go 1.13 error values.
+See golang.org/design/29934-error-values.
diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go
new file mode 100644
index 0000000..4317f24
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/adaptor.go
@@ -0,0 +1,193 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xerrors
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+// FormatError calls the FormatError method of f with an errors.Printer
+// configured according to s and verb, and writes the result to s.
+func FormatError(f Formatter, s fmt.State, verb rune) {
+ // Assuming this function is only called from the Format method, and given
+ // that FormatError takes precedence over Format, it cannot be called from
+ // any package that supports errors.Formatter. It is therefore safe to
+ // disregard that State may be a specific printer implementation and use one
+ // of our choice instead.
+
+ // limitations: does not support printing error as Go struct.
+
+ var (
+ sep = " " // separator before next error
+ p = &state{State: s}
+ direct = true
+ )
+
+ var err error = f
+
+ switch verb {
+ // Note that this switch must match the preference order
+ // for ordinary string printing (%#v before %+v, and so on).
+
+ case 'v':
+ if s.Flag('#') {
+ if stringer, ok := err.(fmt.GoStringer); ok {
+ io.WriteString(&p.buf, stringer.GoString())
+ goto exit
+ }
+ // proceed as if it were %v
+ } else if s.Flag('+') {
+ p.printDetail = true
+ sep = "\n - "
+ }
+ case 's':
+ case 'q', 'x', 'X':
+ // Use an intermediate buffer in the rare cases that precision,
+ // truncation, or one of the alternative verbs (q, x, and X) are
+ // specified.
+ direct = false
+
+ default:
+ p.buf.WriteString("%!")
+ p.buf.WriteRune(verb)
+ p.buf.WriteByte('(')
+ switch {
+ case err != nil:
+ p.buf.WriteString(reflect.TypeOf(f).String())
+ default:
+ p.buf.WriteString("<nil>")
+ }
+ p.buf.WriteByte(')')
+ io.Copy(s, &p.buf)
+ return
+ }
+
+loop:
+ for {
+ switch v := err.(type) {
+ case Formatter:
+ err = v.FormatError((*printer)(p))
+ case fmt.Formatter:
+ v.Format(p, 'v')
+ break loop
+ default:
+ io.WriteString(&p.buf, v.Error())
+ break loop
+ }
+ if err == nil {
+ break
+ }
+ if p.needColon || !p.printDetail {
+ p.buf.WriteByte(':')
+ p.needColon = false
+ }
+ p.buf.WriteString(sep)
+ p.inDetail = false
+ p.needNewline = false
+ }
+
+exit:
+ width, okW := s.Width()
+ prec, okP := s.Precision()
+
+ if !direct || (okW && width > 0) || okP {
+ // Construct format string from State s.
+ format := []byte{'%'}
+ if s.Flag('-') {
+ format = append(format, '-')
+ }
+ if s.Flag('+') {
+ format = append(format, '+')
+ }
+ if s.Flag(' ') {
+ format = append(format, ' ')
+ }
+ if okW {
+ format = strconv.AppendInt(format, int64(width), 10)
+ }
+ if okP {
+ format = append(format, '.')
+ format = strconv.AppendInt(format, int64(prec), 10)
+ }
+ format = append(format, string(verb)...)
+ fmt.Fprintf(s, string(format), p.buf.String())
+ } else {
+ io.Copy(s, &p.buf)
+ }
+}
+
+var detailSep = []byte("\n ")
+
+// state tracks error printing state. It implements fmt.State.
+type state struct {
+ fmt.State
+ buf bytes.Buffer
+
+ printDetail bool
+ inDetail bool
+ needColon bool
+ needNewline bool
+}
+
+func (s *state) Write(b []byte) (n int, err error) {
+ if s.printDetail {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ if s.inDetail && s.needColon {
+ s.needNewline = true
+ if b[0] == '\n' {
+ b = b[1:]
+ }
+ }
+ k := 0
+ for i, c := range b {
+ if s.needNewline {
+ if s.inDetail && s.needColon {
+ s.buf.WriteByte(':')
+ s.needColon = false
+ }
+ s.buf.Write(detailSep)
+ s.needNewline = false
+ }
+ if c == '\n' {
+ s.buf.Write(b[k:i])
+ k = i + 1
+ s.needNewline = true
+ }
+ }
+ s.buf.Write(b[k:])
+ if !s.inDetail {
+ s.needColon = true
+ }
+ } else if !s.inDetail {
+ s.buf.Write(b)
+ }
+ return len(b), nil
+}
+
+// printer wraps a state to implement an xerrors.Printer.
+type printer state
+
+func (s *printer) Print(args ...interface{}) {
+ if !s.inDetail || s.printDetail {
+ fmt.Fprint((*state)(s), args...)
+ }
+}
+
+func (s *printer) Printf(format string, args ...interface{}) {
+ if !s.inDetail || s.printDetail {
+ fmt.Fprintf((*state)(s), format, args...)
+ }
+}
+
+func (s *printer) Detail() bool {
+ s.inDetail = true
+ return s.printDetail
+}
diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go
new file mode 100644
index 0000000..2ef99f5
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/doc.go
@@ -0,0 +1,23 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xerrors implements functions to manipulate errors.
+//
+// This package is based on the Go 2 proposal for error values:
+//
+// https://golang.org/design/29934-error-values
+//
+// These functions were incorporated into the standard library's errors package
+// in Go 1.13:
+// - Is
+// - As
+// - Unwrap
+//
+// Also, Errorf's %w verb was incorporated into fmt.Errorf.
+//
+// Use this package to get equivalent behavior in all supported Go versions.
+//
+// No other features of this package were included in Go 1.13, and at present
+// there are no plans to include any of them.
+package xerrors // import "golang.org/x/xerrors"
diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go
new file mode 100644
index 0000000..e88d377
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/errors.go
@@ -0,0 +1,33 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xerrors
+
+import "fmt"
+
+// errorString is a trivial implementation of error.
+type errorString struct {
+ s string
+ frame Frame
+}
+
+// New returns an error that formats as the given text.
+//
+// The returned error contains a Frame set to the caller's location and
+// implements Formatter to show this information when printed with details.
+func New(text string) error {
+ return &errorString{text, Caller(1)}
+}
+
+func (e *errorString) Error() string {
+ return e.s
+}
+
+func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) }
+
+func (e *errorString) FormatError(p Printer) (next error) {
+ p.Print(e.s)
+ e.frame.Format(p)
+ return nil
+}
diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go
new file mode 100644
index 0000000..27a5d70
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/fmt.go
@@ -0,0 +1,190 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xerrors
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/xerrors/internal"
+)
+
+const percentBangString = "%!"
+
+// Errorf formats according to a format specifier and returns the string as a
+// value that satisfies error.
+//
+// The returned error includes the file and line number of the caller when
+// formatted with additional detail enabled. If the last argument is an error
+// the returned error's Format method will return it if the format string ends
+// with ": %s", ": %v", or ": %w". If the last argument is an error and the
+// format string ends with ": %w", the returned error implements an Unwrap
+// method returning it.
+//
+// If the format specifier includes a %w verb with an error operand in a
+// position other than at the end, the returned error will still implement an
+// Unwrap method returning the operand, but the error's Format method will not
+// return the wrapped error.
+//
+// It is invalid to include more than one %w verb or to supply it with an
+// operand that does not implement the error interface. The %w verb is otherwise
+// a synonym for %v.
+//
+// Note that as of Go 1.13, the fmt.Errorf function will do error formatting,
+// but it will not capture a stack backtrace.
+func Errorf(format string, a ...interface{}) error {
+ format = formatPlusW(format)
+ // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
+ wrap := strings.HasSuffix(format, ": %w")
+ idx, format2, ok := parsePercentW(format)
+ percentWElsewhere := !wrap && idx >= 0
+ if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) {
+ err := errorAt(a, len(a)-1)
+ if err == nil {
+ return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)}
+ }
+ // TODO: this is not entirely correct. The error value could be
+ // printed elsewhere in format if it mixes numbered with unnumbered
+ // substitutions. With relatively small changes to doPrintf we can
+ // have it optionally ignore extra arguments and pass the argument
+ // list in its entirety.
+ msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...)
+ frame := Frame{}
+ if internal.EnableTrace {
+ frame = Caller(1)
+ }
+ if wrap {
+ return &wrapError{msg, err, frame}
+ }
+ return &noWrapError{msg, err, frame}
+ }
+ // Support %w anywhere.
+ // TODO: don't repeat the wrapped error's message when %w occurs in the middle.
+ msg := fmt.Sprintf(format2, a...)
+ if idx < 0 {
+ return &noWrapError{msg, nil, Caller(1)}
+ }
+ err := errorAt(a, idx)
+ if !ok || err == nil {
+ // Too many %ws or argument of %w is not an error. Approximate the Go
+ // 1.13 fmt.Errorf message.
+ return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)}
+ }
+ frame := Frame{}
+ if internal.EnableTrace {
+ frame = Caller(1)
+ }
+ return &wrapError{msg, err, frame}
+}
+
+func errorAt(args []interface{}, i int) error {
+ if i < 0 || i >= len(args) {
+ return nil
+ }
+ err, ok := args[i].(error)
+ if !ok {
+ return nil
+ }
+ return err
+}
+
+// formatPlusW is used to avoid the vet check that will barf at %w.
+func formatPlusW(s string) string {
+ return s
+}
+
+// Return the index of the only %w in format, or -1 if none.
+// Also return a rewritten format string with %w replaced by %v, and
+// false if there is more than one %w.
+// TODO: handle "%[N]w".
+func parsePercentW(format string) (idx int, newFormat string, ok bool) {
+ // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go.
+ idx = -1
+ ok = true
+ n := 0
+ sz := 0
+ var isW bool
+ for i := 0; i < len(format); i += sz {
+ if format[i] != '%' {
+ sz = 1
+ continue
+ }
+ // "%%" is not a format directive.
+ if i+1 < len(format) && format[i+1] == '%' {
+ sz = 2
+ continue
+ }
+ sz, isW = parsePrintfVerb(format[i:])
+ if isW {
+ if idx >= 0 {
+ ok = false
+ } else {
+ idx = n
+ }
+ // "Replace" the last character, the 'w', with a 'v'.
+ p := i + sz - 1
+ format = format[:p] + "v" + format[p+1:]
+ }
+ n++
+ }
+ return idx, format, ok
+}
+
+// Parse the printf verb starting with a % at s[0].
+// Return how many bytes it occupies and whether the verb is 'w'.
+func parsePrintfVerb(s string) (int, bool) {
+ // Assume only that the directive is a sequence of non-letters followed by a single letter.
+ sz := 0
+ var r rune
+ for i := 1; i < len(s); i += sz {
+ r, sz = utf8.DecodeRuneInString(s[i:])
+ if unicode.IsLetter(r) {
+ return i + sz, r == 'w'
+ }
+ }
+ return len(s), false
+}
+
+type noWrapError struct {
+ msg string
+ err error
+ frame Frame
+}
+
+func (e *noWrapError) Error() string {
+ return fmt.Sprint(e)
+}
+
+func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
+
+func (e *noWrapError) FormatError(p Printer) (next error) {
+ p.Print(e.msg)
+ e.frame.Format(p)
+ return e.err
+}
+
+type wrapError struct {
+ msg string
+ err error
+ frame Frame
+}
+
+func (e *wrapError) Error() string {
+ return fmt.Sprint(e)
+}
+
+func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
+
+func (e *wrapError) FormatError(p Printer) (next error) {
+ p.Print(e.msg)
+ e.frame.Format(p)
+ return e.err
+}
+
+func (e *wrapError) Unwrap() error {
+ return e.err
+}
diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go
new file mode 100644
index 0000000..1bc9c26
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/format.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xerrors
+
+// A Formatter formats error messages.
+type Formatter interface {
+ error
+
+ // FormatError prints the receiver's first error and returns the next error in
+ // the error chain, if any.
+ FormatError(p Printer) (next error)
+}
+
+// A Printer formats error messages.
+//
+// The most common implementation of Printer is the one provided by package fmt
+// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message
+// typically provide their own implementations.
+type Printer interface {
+ // Print appends args to the message output.
+ Print(args ...interface{})
+
+ // Printf writes a formatted string.
+ Printf(format string, args ...interface{})
+
+ // Detail reports whether error detail is requested.
+ // After the first call to Detail, all text written to the Printer
+ // is formatted as additional detail, or ignored when
+ // detail has not been requested.
+ // If Detail returns false, the caller can avoid printing the detail at all.
+ Detail() bool
+}
diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go
new file mode 100644
index 0000000..0de628e
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/frame.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xerrors
+
+import (
+ "runtime"
+)
+
+// A Frame contains part of a call stack.
+type Frame struct {
+ // Make room for three PCs: the one we were asked for, what it called,
+ // and possibly a PC for skipPleaseUseCallersFrames. See:
+ // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169
+ frames [3]uintptr
+}
+
+// Caller returns a Frame that describes a frame on the caller's stack.
+// The argument skip is the number of frames to skip over.
+// Caller(0) returns the frame for the caller of Caller.
+func Caller(skip int) Frame {
+ var s Frame
+ runtime.Callers(skip+1, s.frames[:])
+ return s
+}
+
+// location reports the file, line, and function of a frame.
+//
+// The returned function may be "" even if file and line are not.
+func (f Frame) location() (function, file string, line int) {
+ frames := runtime.CallersFrames(f.frames[:])
+ if _, ok := frames.Next(); !ok {
+ return "", "", 0
+ }
+ fr, ok := frames.Next()
+ if !ok {
+ return "", "", 0
+ }
+ return fr.Function, fr.File, fr.Line
+}
+
+// Format prints the stack as error detail.
+// It should be called from an error's Format implementation
+// after printing any other error detail.
+func (f Frame) Format(p Printer) {
+ if p.Detail() {
+ function, file, line := f.location()
+ if function != "" {
+ p.Printf("%s\n ", function)
+ }
+ if file != "" {
+ p.Printf("%s:%d\n", file, line)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go
new file mode 100644
index 0000000..89f4eca
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/internal/internal.go
@@ -0,0 +1,8 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal
+
+// EnableTrace indicates whether stack information should be recorded in errors.
+var EnableTrace = true
diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go
new file mode 100644
index 0000000..9842758
--- /dev/null
+++ b/vendor/golang.org/x/xerrors/wrap.go
@@ -0,0 +1,112 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xerrors
+
+import (
+ "reflect"
+)
+
+// A Wrapper provides context around another error.
+type Wrapper interface {
+ // Unwrap returns the next error in the error chain.
+ // If there is no next error, Unwrap returns nil.
+ Unwrap() error
+}
+
+// Opaque returns an error with the same error formatting as err
+// but that does not match err and cannot be unwrapped.
+func Opaque(err error) error {
+ return noWrapper{err}
+}
+
+type noWrapper struct {
+ error
+}
+
+func (e noWrapper) FormatError(p Printer) (next error) {
+ if f, ok := e.error.(Formatter); ok {
+ return f.FormatError(p)
+ }
+ p.Print(e.error)
+ return nil
+}
+
+// Unwrap returns the result of calling the Unwrap method on err, if err implements
+// Unwrap. Otherwise, Unwrap returns nil.
+//
+// Deprecated: As of Go 1.13, use errors.Unwrap instead.
+func Unwrap(err error) error {
+ u, ok := err.(Wrapper)
+ if !ok {
+ return nil
+ }
+ return u.Unwrap()
+}
+
+// Is reports whether any error in err's chain matches target.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+//
+// Deprecated: As of Go 1.13, use errors.Is instead.
+func Is(err, target error) bool {
+ if target == nil {
+ return err == target
+ }
+
+ isComparable := reflect.TypeOf(target).Comparable()
+ for {
+ if isComparable && err == target {
+ return true
+ }
+ if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
+ return true
+ }
+ // TODO: consider supporing target.Is(err). This would allow
+ // user-definable predicates, but also may allow for coping with sloppy
+ // APIs, thereby making it easier to get away with them.
+ if err = Unwrap(err); err == nil {
+ return false
+ }
+ }
+}
+
+// As finds the first error in err's chain that matches the type to which target
+// points, and if so, sets the target to its value and returns true. An error
+// matches a type if it is assignable to the target type, or if it has a method
+// As(interface{}) bool such that As(target) returns true. As will panic if target
+// is not a non-nil pointer to a type which implements error or is of interface type.
+//
+// The As method should set the target to its value and return true if err
+// matches the type to which target points.
+//
+// Deprecated: As of Go 1.13, use errors.As instead.
+func As(err error, target interface{}) bool {
+ if target == nil {
+ panic("errors: target cannot be nil")
+ }
+ val := reflect.ValueOf(target)
+ typ := val.Type()
+ if typ.Kind() != reflect.Ptr || val.IsNil() {
+ panic("errors: target must be a non-nil pointer")
+ }
+ if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) {
+ panic("errors: *target must be interface or implement error")
+ }
+ targetType := typ.Elem()
+ for err != nil {
+ if reflect.TypeOf(err).AssignableTo(targetType) {
+ val.Elem().Set(reflect.ValueOf(err))
+ return true
+ }
+ if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) {
+ return true
+ }
+ err = Unwrap(err)
+ }
+ return false
+}
+
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json
index 60b8fc3..2411236 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-api.json
+++ b/vendor/google.golang.org/api/compute/v1/compute-api.json
@@ -1367,7 +1367,7 @@
],
"parameters": {
"backendBucket": {
- "description": "Name of the BackendBucket resource to which the security policy should be set. The name should conform to RFC1035.",
+ "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.",
"location": "path",
"required": true,
"type": "string"
@@ -8066,7 +8066,7 @@
]
},
"listManagedInstances": {
- "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
+ "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
"flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances",
"httpMethod": "POST",
"id": "compute.instanceGroupManagers.listManagedInstances",
@@ -10326,53 +10326,6 @@
"https://www.googleapis.com/auth/compute.readonly"
]
},
- "performMaintenance": {
- "description": "Perform a manual maintenance on the instance.",
- "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/performMaintenance",
- "httpMethod": "POST",
- "id": "compute.instances.performMaintenance",
- "parameterOrder": [
- "project",
- "zone",
- "instance"
- ],
- "parameters": {
- "instance": {
- "description": "Name of the instance scoping this request.",
- "location": "path",
- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}",
- "required": true,
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "requestId": {
- "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- "location": "query",
- "type": "string"
- },
- "zone": {
- "description": "The name of the zone for this request.",
- "location": "path",
- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
- "required": true,
- "type": "string"
- }
- },
- "path": "projects/{project}/zones/{zone}/instances/{instance}/performMaintenance",
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute"
- ]
- },
"removeResourcePolicies": {
"description": "Removes resource policies from an instance.",
"flatPath": "projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies",
@@ -11295,11 +11248,6 @@
"location": "query",
"type": "string"
},
- "withExtendedNotifications": {
- "description": "Determines whether the customers receive notifications before migration. Only applicable to SF vms.",
- "location": "query",
- "type": "boolean"
- },
"zone": {
"description": "The name of the zone for this request.",
"location": "path",
@@ -11426,7 +11374,7 @@
],
"parameters": {
"discardLocalSsd": {
- "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.",
+ "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.",
"location": "query",
"type": "boolean"
},
@@ -11478,7 +11426,7 @@
],
"parameters": {
"discardLocalSsd": {
- "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.",
+ "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.",
"location": "query",
"type": "boolean"
},
@@ -21921,7 +21869,7 @@
]
},
"listManagedInstances": {
- "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
+ "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
"flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances",
"httpMethod": "POST",
"id": "compute.regionInstanceGroupManagers.listManagedInstances",
@@ -26599,73 +26547,6 @@
}
}
},
- "regionZones": {
- "methods": {
- "list": {
- "description": "Retrieves the list of Zone resources under the specific region available to the specified project.",
- "flatPath": "projects/{project}/regions/{region}/zones",
- "httpMethod": "GET",
- "id": "compute.regionZones.list",
- "parameterOrder": [
- "project",
- "region"
- ],
- "parameters": {
- "filter": {
- "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.",
- "location": "query",
- "type": "string"
- },
- "maxResults": {
- "default": "500",
- "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)",
- "format": "uint32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "orderBy": {
- "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.",
- "location": "query",
- "type": "string"
- },
- "pageToken": {
- "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.",
- "location": "query",
- "type": "string"
- },
- "project": {
- "description": "Project ID for this request.",
- "location": "path",
- "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- "required": true,
- "type": "string"
- },
- "region": {
- "description": "Region for this request.",
- "location": "path",
- "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
- "required": true,
- "type": "string"
- },
- "returnPartialSuccess": {
- "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.",
- "location": "query",
- "type": "boolean"
- }
- },
- "path": "projects/{project}/regions/{region}/zones",
- "response": {
- "$ref": "ZoneList"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/compute.readonly"
- ]
- }
- }
- },
"regions": {
"methods": {
"get": {
@@ -33315,7 +33196,7 @@
]
},
"setSslPolicy": {
- "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.",
+ "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.",
"flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy",
"httpMethod": "POST",
"id": "compute.targetSslProxies.setSslPolicy",
@@ -35386,7 +35267,7 @@
}
}
},
- "revision": "20231231",
+ "revision": "20231031",
"rootUrl": "https://compute.googleapis.com/",
"schemas": {
"AWSV4Signature": {
@@ -36647,80 +36528,6 @@
},
"type": "object"
},
- "AllocationAggregateReservation": {
- "description": "This reservation type is specified by total resource amounts (e.g. total count of CPUs) and can account for multiple instance SKUs. In other words, one can create instances of varying shapes against this reservation.",
- "id": "AllocationAggregateReservation",
- "properties": {
- "inUseResources": {
- "description": "[Output only] List of resources currently in use.",
- "items": {
- "$ref": "AllocationAggregateReservationReservedResourceInfo"
- },
- "type": "array"
- },
- "reservedResources": {
- "description": "List of reserved resources (CPUs, memory, accelerators).",
- "items": {
- "$ref": "AllocationAggregateReservationReservedResourceInfo"
- },
- "type": "array"
- },
- "vmFamily": {
- "description": "The VM family that all instances scheduled against this reservation must belong to.",
- "enum": [
- "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L",
- "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP",
- "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P"
- ],
- "enumDescriptions": [
- "",
- "",
- ""
- ],
- "type": "string"
- },
- "workloadType": {
- "description": "The workload type of the instances that will target this reservation.",
- "enum": [
- "BATCH",
- "SERVING",
- "UNSPECIFIED"
- ],
- "enumDescriptions": [
- "Reserved resources will be optimized for BATCH workloads, such as ML training.",
- "Reserved resources will be optimized for SERVING workloads, such as ML inference.",
- ""
- ],
- "type": "string"
- }
- },
- "type": "object"
- },
- "AllocationAggregateReservationReservedResourceInfo": {
- "id": "AllocationAggregateReservationReservedResourceInfo",
- "properties": {
- "accelerator": {
- "$ref": "AllocationAggregateReservationReservedResourceInfoAccelerator",
- "description": "Properties of accelerator resources in this reservation."
- }
- },
- "type": "object"
- },
- "AllocationAggregateReservationReservedResourceInfoAccelerator": {
- "id": "AllocationAggregateReservationReservedResourceInfoAccelerator",
- "properties": {
- "acceleratorCount": {
- "description": "Number of accelerators of specified type.",
- "format": "int32",
- "type": "integer"
- },
- "acceleratorType": {
- "description": "Full or partial URL to accelerator type. e.g. \"projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l\"",
- "type": "string"
- }
- },
- "type": "object"
- },
"AllocationResourceStatus": {
"description": "[Output Only] Contains output only fields.",
"id": "AllocationResourceStatus",
@@ -36994,10 +36801,6 @@
"description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.",
"type": "string"
},
- "enableConfidentialCompute": {
- "description": "Whether this disk is using confidential compute mode.",
- "type": "boolean"
- },
"labels": {
"additionalProperties": {
"type": "string"
@@ -38369,7 +38172,7 @@
"id": "BackendService",
"properties": {
"affinityCookieTtlSec": {
- "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.",
+ "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.",
"format": "int32",
"type": "integer"
},
@@ -38404,7 +38207,7 @@
},
"connectionTrackingPolicy": {
"$ref": "BackendServiceConnectionTrackingPolicy",
- "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers."
+ "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing."
},
"consistentHash": {
"$ref": "ConsistentHashLoadBalancerSettings",
@@ -38437,12 +38240,12 @@
"type": "string"
},
"enableCDN": {
- "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.",
+ "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.",
"type": "boolean"
},
"failoverPolicy": {
"$ref": "BackendServiceFailoverPolicy",
- "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)."
+ "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)."
},
"fingerprint": {
"description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.",
@@ -38458,7 +38261,7 @@
},
"iap": {
"$ref": "BackendServiceIAP",
- "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers."
+ "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing."
},
"id": {
"description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.",
@@ -38481,10 +38284,10 @@
"INVALID_LOAD_BALANCING_SCHEME"
],
"enumDescriptions": [
- "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.",
- "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.",
- "Signifies that this will be used for internal passthrough Network Load Balancers.",
- "Signifies that this will be used for internal Application Load Balancers.",
+ "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing",
+ "Signifies that this will be used for External Managed HTTP(S) Load Balancing.",
+ "Signifies that this will be used for Internal TCP/UDP Load Balancing.",
+ "Signifies that this will be used for Internal HTTP(S) Load Balancing.",
"Signifies that this will be used by Traffic Director.",
""
],
@@ -38551,12 +38354,12 @@
},
"port": {
"deprecated": true,
- "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.",
+ "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.",
"format": "int32",
"type": "integer"
},
"portName": {
- "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.",
+ "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.",
"type": "string"
},
"protocol": {
@@ -38935,11 +38738,11 @@
"type": "string"
},
"enableStrongAffinity": {
- "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.",
+ "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.",
"type": "boolean"
},
"idleTimeoutSec": {
- "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.",
+ "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.",
"format": "int32",
"type": "integer"
},
@@ -38961,7 +38764,7 @@
"type": "object"
},
"BackendServiceFailoverPolicy": {
- "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).",
+ "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).",
"id": "BackendServiceFailoverPolicy",
"properties": {
"disableConnectionDrainOnFailover": {
@@ -38969,7 +38772,7 @@
"type": "boolean"
},
"dropTrafficIfUnhealthy": {
- "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.",
+ "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.",
"type": "boolean"
},
"failoverRatio": {
@@ -39836,7 +39639,7 @@
"description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)."
},
"members": {
- "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workforce identity pool. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: All workforce identities in a group. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All workforce identities with a specific attribute value. * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: All identities in a workforce identity pool. * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: A single identity in a workload identity pool. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload identity pool group. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: All identities in a workload identity pool with a certain attribute. * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: All identities in a workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: Deleted single identity in a workforce identity pool. For example, `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.",
+ "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.",
"items": {
"type": "string"
},
@@ -40083,13 +39886,6 @@
"description": "[Output Only] Commitment end time in RFC3339 text format.",
"type": "string"
},
- "existingReservations": {
- "description": "Specifies the already existing reservations to attach to the Commitment. This field is optional, and it can be a full or partial URL. For example, the following are valid URLs to an reservation: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /reservations/reservation - projects/project/zones/zone/reservations/reservation ",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
"id": {
"description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.",
"format": "uint64",
@@ -40135,7 +39931,7 @@
"type": "string"
},
"reservations": {
- "description": "List of create-on-create reservations for this commitment.",
+ "description": "List of create-on-create reseravtions for this commitment.",
"items": {
"$ref": "Reservation"
},
@@ -40987,10 +40783,6 @@
"$ref": "CustomerEncryptionKey",
"description": "Encrypts the disk using a customer-supplied encryption key or a customer-managed encryption key. Encryption keys do not protect access to metadata of the disk. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later. For example, to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine. After you encrypt a disk with a customer-managed key, the diskEncryptionKey.kmsKeyName is set to a key *version* name once the disk is created. The disk is encrypted with this version of the key. In the response, diskEncryptionKey.kmsKeyName appears in the following format: \"diskEncryptionKey.kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeysVersions/version If you do not provide an encryption key when creating the disk, then the disk is encrypted using an automatically generated key and you don't need to provide a key to use the disk later."
},
- "enableConfidentialCompute": {
- "description": "Whether this disk is using confidential compute mode.",
- "type": "boolean"
- },
"guestOsFeatures": {
"description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.",
"items": {
@@ -41102,11 +40894,6 @@
"$ref": "DiskResourceStatus",
"description": "[Output Only] Status information for the disk resource."
},
- "satisfiesPzi": {
- "description": "Output only. Reserved for future use.",
- "readOnly": true,
- "type": "boolean"
- },
"satisfiesPzs": {
"description": "[Output Only] Reserved for future use.",
"type": "boolean"
@@ -43748,7 +43535,7 @@
"type": "object"
},
"ForwardingRule": {
- "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.",
+ "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.",
"id": "ForwardingRule",
"properties": {
"IPAddress": {
@@ -43782,7 +43569,7 @@
"type": "boolean"
},
"allowGlobalAccess": {
- "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.",
+ "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.",
"type": "boolean"
},
"allowPscGlobalAccess": {
@@ -43790,11 +43577,11 @@
"type": "boolean"
},
"backendService": {
- "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.",
+ "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.",
"type": "string"
},
"baseForwardingRule": {
- "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.",
+ "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.",
"type": "string"
},
"creationTimestamp": {
@@ -43835,7 +43622,7 @@
},
"kind": {
"default": "compute#forwardingRule",
- "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.",
+ "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.",
"type": "string"
},
"labelFingerprint": {
@@ -43883,7 +43670,7 @@
"type": "string"
},
"network": {
- "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.",
+ "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.",
"type": "string"
},
"networkTier": {
@@ -43918,7 +43705,7 @@
"type": "array"
},
"pscConnectionId": {
- "description": "[Output Only] The PSC connection id of the PSC forwarding rule.",
+ "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.",
"format": "uint64",
"type": "string"
},
@@ -43957,23 +43744,23 @@
"type": "array"
},
"serviceLabel": {
- "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.",
+ "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.",
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"type": "string"
},
"serviceName": {
- "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.",
+ "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.",
"type": "string"
},
"sourceIpRanges": {
- "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).",
+ "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).",
"items": {
"type": "string"
},
"type": "array"
},
"subnetwork": {
- "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.",
+ "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.",
"type": "string"
},
"target": {
@@ -44304,7 +44091,7 @@
"type": "object"
},
"ForwardingRuleServiceDirectoryRegistration": {
- "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.",
+ "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.",
"id": "ForwardingRuleServiceDirectoryRegistration",
"properties": {
"namespace": {
@@ -44316,7 +44103,7 @@
"type": "string"
},
"serviceDirectoryRegion": {
- "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.",
+ "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.",
"type": "string"
}
},
@@ -44473,7 +44260,7 @@
"type": "string"
},
"portSpecification": {
- "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
+ "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
"enum": [
"USE_FIXED_PORT",
"USE_NAMED_PORT",
@@ -44660,11 +44447,10 @@
"id": "GuestOsFeature",
"properties": {
"type": {
- "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE - IDPF For more information, see Enabling guest operating system features.",
+ "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.",
"enum": [
"FEATURE_TYPE_UNSPECIFIED",
"GVNIC",
- "IDPF",
"MULTI_IP_SUBNET",
"SECURE_BOOT",
"SEV_CAPABLE",
@@ -44686,7 +44472,6 @@
"",
"",
"",
- "",
""
],
"type": "string"
@@ -44711,7 +44496,7 @@
"type": "string"
},
"portSpecification": {
- "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
+ "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
"enum": [
"USE_FIXED_PORT",
"USE_NAMED_PORT",
@@ -44764,7 +44549,7 @@
"type": "string"
},
"portSpecification": {
- "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
+ "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
"enum": [
"USE_FIXED_PORT",
"USE_NAMED_PORT",
@@ -44817,7 +44602,7 @@
"type": "string"
},
"portSpecification": {
- "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
+ "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
"enum": [
"USE_FIXED_PORT",
"USE_NAMED_PORT",
@@ -44854,7 +44639,7 @@
"type": "object"
},
"HealthCheck": {
- "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.",
+ "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.",
"id": "HealthCheck",
"properties": {
"checkIntervalSec": {
@@ -46649,10 +46434,6 @@
"format": "int64",
"type": "string"
},
- "enableConfidentialCompute": {
- "description": "Whether this image is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.",
- "type": "boolean"
- },
"family": {
"description": "The name of the image family to which this image belongs. The image family name can be from a publicly managed image family provided by Compute Engine, or from a custom image family you create. For example, centos-stream-9 is a publicly available image family. For more information, see Image family best practices. When creating disks, you can specify an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.",
"type": "string"
@@ -46741,11 +46522,6 @@
},
"type": "object"
},
- "satisfiesPzi": {
- "description": "Output only. Reserved for future use.",
- "readOnly": true,
- "type": "boolean"
- },
"satisfiesPzs": {
"description": "[Output Only] Reserved for future use.",
"type": "boolean"
@@ -47200,10 +46976,6 @@
"$ref": "ResourceStatus",
"description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field."
},
- "satisfiesPzi": {
- "description": "[Output Only] Reserved for future use.",
- "type": "boolean"
- },
"satisfiesPzs": {
"description": "[Output Only] Reserved for future use.",
"type": "boolean"
@@ -53607,11 +53379,6 @@
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"type": "string"
},
- "satisfiesPzi": {
- "description": "Output only. Reserved for future use.",
- "readOnly": true,
- "type": "boolean"
- },
"satisfiesPzs": {
"description": "[Output Only] Reserved for future use.",
"type": "boolean"
@@ -59312,7 +59079,7 @@
"type": "object"
},
"Operation": {
- "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources. Note that completed Operation resources have a limited retention period.",
+ "description": "Represents an Operation resource. Google Compute Engine has three Operation resources: * [Global](/compute/docs/reference/rest/v1/globalOperations) * [Regional](/compute/docs/reference/rest/v1/regionOperations) * [Zonal](/compute/docs/reference/rest/v1/zoneOperations) You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the `globalOperations` resource. - For regional operations, use the `regionOperations` resource. - For zonal operations, use the `zoneOperations` resource. For more information, read Global, Regional, and Zonal Resources.",
"id": "Operation",
"properties": {
"clientOperationId": {
@@ -60409,14 +60176,14 @@
"id": "PacketMirroringFilter",
"properties": {
"IPProtocols": {
- "description": "Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all IPv4 traffic is mirrored.",
+ "description": "Protocols that apply as filter on mirrored traffic. If no protocols are specified, all traffic that matches the specified CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.",
"items": {
"type": "string"
},
"type": "array"
},
"cidrRanges": {
- "description": "One or more IPv4 or IPv6 CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. If no ranges are specified, all IPv4 traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all IPv4 traffic is mirrored. To mirror all IPv4 and IPv6 traffic, use \"0.0.0.0/0,::/0\". Note: Support for IPv6 traffic is in preview.",
+ "description": "IP CIDR ranges that apply as filter on the source (ingress) or destination (egress) IP in the IP header. Only IPv4 is supported. If no ranges are specified, all traffic that matches the specified IPProtocols is mirrored. If neither cidrRanges nor IPProtocols is specified, all traffic is mirrored.",
"items": {
"type": "string"
},
@@ -62166,7 +61933,6 @@
"COMMITTED_N2_CPUS",
"COMMITTED_NVIDIA_A100_80GB_GPUS",
"COMMITTED_NVIDIA_A100_GPUS",
- "COMMITTED_NVIDIA_H100_GPUS",
"COMMITTED_NVIDIA_K80_GPUS",
"COMMITTED_NVIDIA_L4_GPUS",
"COMMITTED_NVIDIA_P100_GPUS",
@@ -62242,7 +62008,6 @@
"PREEMPTIBLE_LOCAL_SSD_GB",
"PREEMPTIBLE_NVIDIA_A100_80GB_GPUS",
"PREEMPTIBLE_NVIDIA_A100_GPUS",
- "PREEMPTIBLE_NVIDIA_H100_GPUS",
"PREEMPTIBLE_NVIDIA_K80_GPUS",
"PREEMPTIBLE_NVIDIA_L4_GPUS",
"PREEMPTIBLE_NVIDIA_P100_GPUS",
@@ -62252,9 +62017,6 @@
"PREEMPTIBLE_NVIDIA_T4_GPUS",
"PREEMPTIBLE_NVIDIA_T4_VWS_GPUS",
"PREEMPTIBLE_NVIDIA_V100_GPUS",
- "PREEMPTIBLE_TPU_LITE_DEVICE_V5",
- "PREEMPTIBLE_TPU_LITE_PODSLICE_V5",
- "PREEMPTIBLE_TPU_PODSLICE_V4",
"PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK",
"PSC_INTERNAL_LB_FORWARDING_RULES",
"PUBLIC_ADVERTISED_PREFIXES",
@@ -62292,9 +62054,6 @@
"TARGET_SSL_PROXIES",
"TARGET_TCP_PROXIES",
"TARGET_VPN_GATEWAYS",
- "TPU_LITE_DEVICE_V5",
- "TPU_LITE_PODSLICE_V5",
- "TPU_PODSLICE_V4",
"URL_MAPS",
"VPN_GATEWAYS",
"VPN_TUNNELS",
@@ -62333,7 +62092,6 @@
"",
"",
"",
- "",
"Guest CPUs",
"",
"",
@@ -62431,10 +62189,6 @@
"",
"",
"",
- "",
- "",
- "",
- "",
"The total number of snapshots allowed for a single project.",
"",
"",
@@ -62454,9 +62208,6 @@
"",
"",
"",
- "",
- "",
- "",
""
],
"type": "string"
@@ -64130,10 +63881,6 @@
"description": "Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources.",
"id": "Reservation",
"properties": {
- "aggregateReservation": {
- "$ref": "AllocationAggregateReservation",
- "description": "Reservation for aggregated resources, providing shape flexibility."
- },
"commitment": {
"description": "[Output Only] Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment.",
"type": "string"
@@ -67499,7 +67246,7 @@
"type": "string"
},
"portSpecification": {
- "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
+ "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
"enum": [
"USE_FIXED_PORT",
"USE_NAMED_PORT",
@@ -68652,10 +68399,6 @@
"$ref": "Expr",
"description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies."
},
- "exprOptions": {
- "$ref": "SecurityPolicyRuleMatcherExprOptions",
- "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')."
- },
"versionedExpr": {
"description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.",
"enum": [
@@ -68682,36 +68425,6 @@
},
"type": "object"
},
- "SecurityPolicyRuleMatcherExprOptions": {
- "id": "SecurityPolicyRuleMatcherExprOptions",
- "properties": {
- "recaptchaOptions": {
- "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions",
- "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect."
- }
- },
- "type": "object"
- },
- "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": {
- "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions",
- "properties": {
- "actionTokenSiteKeys": {
- "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "sessionTokenSiteKeys": {
- "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.",
- "items": {
- "type": "string"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
"SecurityPolicyRuleNetworkMatcher": {
"description": "Represents a match condition that incoming network traffic is evaluated against.",
"id": "SecurityPolicyRuleNetworkMatcher",
@@ -68896,7 +68609,7 @@
"type": "string"
},
"enforceOnKey": {
- "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ",
+ "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ",
"enum": [
"ALL",
"HTTP_COOKIE",
@@ -68905,8 +68618,6 @@
"IP",
"REGION_CODE",
"SNI",
- "TLS_JA3_FINGERPRINT",
- "USER_IP",
"XFF_IP"
],
"enumDescriptions": [
@@ -68917,8 +68628,6 @@
"",
"",
"",
- "",
- "",
""
],
"type": "string"
@@ -68957,7 +68666,7 @@
"type": "string"
},
"enforceOnKeyType": {
- "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ",
+ "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ",
"enum": [
"ALL",
"HTTP_COOKIE",
@@ -68966,8 +68675,6 @@
"IP",
"REGION_CODE",
"SNI",
- "TLS_JA3_FINGERPRINT",
- "USER_IP",
"XFF_IP"
],
"enumDescriptions": [
@@ -68978,8 +68685,6 @@
"",
"",
"",
- "",
- "",
""
],
"type": "string"
@@ -69994,10 +69699,6 @@
"format": "int64",
"type": "string"
},
- "enableConfidentialCompute": {
- "description": "Whether this snapshot is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.",
- "type": "boolean"
- },
"guestOsFeatures": {
"description": "[Output Only] A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.",
"items": {
@@ -70057,11 +69758,6 @@
"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
"type": "string"
},
- "satisfiesPzi": {
- "description": "Output only. Reserved for future use.",
- "readOnly": true,
- "type": "boolean"
- },
"satisfiesPzs": {
"description": "[Output Only] Reserved for future use.",
"type": "boolean"
@@ -71531,7 +71227,7 @@
"type": "object"
},
"SslPolicy": {
- "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.",
+ "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.",
"id": "SslPolicy",
"properties": {
"creationTimestamp": {
@@ -71946,7 +71642,7 @@
"type": "string"
},
"purpose": {
- "description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.",
+ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.",
"enum": [
"GLOBAL_MANAGED_PROXY",
"INTERNAL_HTTPS_LOAD_BALANCER",
@@ -71958,7 +71654,7 @@
],
"enumDescriptions": [
"Subnet reserved for Global Envoy-based Load Balancing.",
- "Subnet reserved for Internal HTTP(S) Load Balancing. This is a legacy purpose, please use REGIONAL_MANAGED_PROXY instead.",
+ "Subnet reserved for Internal HTTP(S) Load Balancing.",
"Regular user created or automatically created subnet.",
"Subnetwork used as source range for Private NAT Gateways.",
"Regular user created or automatically created subnet.",
@@ -71971,12 +71667,8 @@
"description": "URL of the region where the Subnetwork resides. This field can be set only at resource creation time.",
"type": "string"
},
- "reservedInternalRange": {
- "description": "The URL of the reserved internal range.",
- "type": "string"
- },
"role": {
- "description": "The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.",
+ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.",
"enum": [
"ACTIVE",
"BACKUP"
@@ -72408,10 +72100,6 @@
"rangeName": {
"description": "The name associated with this subnetwork secondary range, used when adding an alias IP range to a VM instance. The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the subnetwork.",
"type": "string"
- },
- "reservedInternalRange": {
- "description": "The URL of the reserved internal range.",
- "type": "string"
}
},
"type": "object"
@@ -72600,7 +72288,7 @@
"type": "string"
},
"portSpecification": {
- "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
+ "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.",
"enum": [
"USE_FIXED_PORT",
"USE_NAMED_PORT",
@@ -74330,7 +74018,7 @@
"type": "object"
},
"TargetPool": {
- "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.",
+ "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.",
"id": "TargetPool",
"properties": {
"backupPool": {
@@ -74993,7 +74681,7 @@
"type": "object"
},
"TargetSslProxy": {
- "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.",
+ "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.",
"id": "TargetSslProxy",
"properties": {
"certificateMap": {
@@ -75372,7 +75060,7 @@
"type": "object"
},
"TargetTcpProxy": {
- "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.",
+ "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.",
"id": "TargetTcpProxy",
"properties": {
"creationTimestamp": {
@@ -77119,7 +76807,7 @@
"type": "string"
},
"purpose": {
- "description": "The purpose of the resource. This field can be either PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY are user-created subnetworks that are reserved for Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY.",
+ "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.",
"enum": [
"GLOBAL_MANAGED_PROXY",
"INTERNAL_HTTPS_LOAD_BALANCER",
@@ -77131,7 +76819,7 @@
],
"enumDescriptions": [
"Subnet reserved for Global Envoy-based Load Balancing.",
- "Subnet reserved for Internal HTTP(S) Load Balancing. This is a legacy purpose, please use REGIONAL_MANAGED_PROXY instead.",
+ "Subnet reserved for Internal HTTP(S) Load Balancing.",
"Regular user created or automatically created subnet.",
"Subnetwork used as source range for Private NAT Gateways.",
"Regular user created or automatically created subnet.",
@@ -77141,7 +76829,7 @@
"type": "string"
},
"role": {
- "description": "The role of subnetwork. Currently, this field is only used when purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.",
+ "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.",
"enum": [
"ACTIVE",
"BACKUP"
diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go
index 954688c..2f9af3b 100644
--- a/vendor/google.golang.org/api/compute/v1/compute-gen.go
+++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC.
+// Copyright 2023 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -95,9 +95,7 @@
const apiName = "compute"
const apiVersion = "v1"
const basePath = "https://compute.googleapis.com/compute/v1/"
-const basePathTemplate = "https://compute.UNIVERSE_DOMAIN/compute/v1/"
const mtlsBasePath = "https://compute.mtls.googleapis.com/compute/v1/"
-const defaultUniverseDomain = "googleapis.com"
// OAuth2 scopes used by this API.
const (
@@ -136,9 +134,7 @@
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
- opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
- opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
@@ -231,7 +227,6 @@
s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s)
s.RegionTargetTcpProxies = NewRegionTargetTcpProxiesService(s)
s.RegionUrlMaps = NewRegionUrlMapsService(s)
- s.RegionZones = NewRegionZonesService(s)
s.Regions = NewRegionsService(s)
s.Reservations = NewReservationsService(s)
s.ResourcePolicies = NewResourcePoliciesService(s)
@@ -401,8 +396,6 @@
RegionUrlMaps *RegionUrlMapsService
- RegionZones *RegionZonesService
-
Regions *RegionsService
Reservations *ReservationsService
@@ -1073,15 +1066,6 @@
s *Service
}
-func NewRegionZonesService(s *Service) *RegionZonesService {
- rs := &RegionZonesService{s: s}
- return rs
-}
-
-type RegionZonesService struct {
- s *Service
-}
-
func NewRegionsService(s *Service) *RegionsService {
rs := &RegionsService{s: s}
return rs
@@ -2984,121 +2968,6 @@
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
-// AllocationAggregateReservation: This reservation type is specified by
-// total resource amounts (e.g. total count of CPUs) and can account for
-// multiple instance SKUs. In other words, one can create instances of
-// varying shapes against this reservation.
-type AllocationAggregateReservation struct {
- // InUseResources: [Output only] List of resources currently in use.
- InUseResources []*AllocationAggregateReservationReservedResourceInfo `json:"inUseResources,omitempty"`
-
- // ReservedResources: List of reserved resources (CPUs, memory,
- // accelerators).
- ReservedResources []*AllocationAggregateReservationReservedResourceInfo `json:"reservedResources,omitempty"`
-
- // VmFamily: The VM family that all instances scheduled against this
- // reservation must belong to.
- //
- // Possible values:
- // "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L"
- // "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP"
- // "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P"
- VmFamily string `json:"vmFamily,omitempty"`
-
- // WorkloadType: The workload type of the instances that will target
- // this reservation.
- //
- // Possible values:
- // "BATCH" - Reserved resources will be optimized for BATCH workloads,
- // such as ML training.
- // "SERVING" - Reserved resources will be optimized for SERVING
- // workloads, such as ML inference.
- // "UNSPECIFIED"
- WorkloadType string `json:"workloadType,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "InUseResources") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "InUseResources") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *AllocationAggregateReservation) MarshalJSON() ([]byte, error) {
- type NoMethod AllocationAggregateReservation
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type AllocationAggregateReservationReservedResourceInfo struct {
- // Accelerator: Properties of accelerator resources in this reservation.
- Accelerator *AllocationAggregateReservationReservedResourceInfoAccelerator `json:"accelerator,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Accelerator") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Accelerator") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *AllocationAggregateReservationReservedResourceInfo) MarshalJSON() ([]byte, error) {
- type NoMethod AllocationAggregateReservationReservedResourceInfo
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type AllocationAggregateReservationReservedResourceInfoAccelerator struct {
- // AcceleratorCount: Number of accelerators of specified type.
- AcceleratorCount int64 `json:"acceleratorCount,omitempty"`
-
- // AcceleratorType: Full or partial URL to accelerator type. e.g.
- // "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l"
- AcceleratorType string `json:"acceleratorType,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "AcceleratorCount") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *AllocationAggregateReservationReservedResourceInfoAccelerator) MarshalJSON() ([]byte, error) {
- type NoMethod AllocationAggregateReservationReservedResourceInfoAccelerator
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
// AllocationResourceStatus: [Output Only] Contains output only fields.
type AllocationResourceStatus struct {
// SpecificSkuAllocation: Allocation Properties of this reservation.
@@ -3506,10 +3375,6 @@
// example: pd-standard.
DiskType string `json:"diskType,omitempty"`
- // EnableConfidentialCompute: Whether this disk is using confidential
- // compute mode.
- EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"`
-
// Labels: Labels to apply to this disk. These can be later modified by
// the disks.setLabels method. This field is only applicable for
// persistent disks.
@@ -5672,13 +5537,13 @@
// For more information, see Backend Services.
type BackendService struct {
// AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is
- // applicable to Application Load Balancers and Traffic Director and
- // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to
- // 0, the cookie is non-persistent and lasts only until the end of the
- // browser session (or equivalent). The maximum allowed value is two
- // weeks (1,209,600). Not supported when the backend service is
- // referenced by a URL map that is bound to target gRPC proxy that has
- // validateForProxyless field set to true.
+ // applicable to external and internal HTTP(S) load balancers and
+ // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session
+ // affinity. If set to 0, the cookie is non-persistent and lasts only
+ // until the end of the browser session (or equivalent). The maximum
+ // allowed value is two weeks (1,209,600). Not supported when the
+ // backend service is referenced by a URL map that is bound to target
+ // gRPC proxy that has validateForProxyless field set to true.
AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"`
// Backends: The list of backends that serve this BackendService.
@@ -5704,8 +5569,8 @@
// ConnectionTrackingPolicy: Connection Tracking configuration for this
// BackendService. Connection tracking policy settings are only
- // available for external passthrough Network Load Balancers and
- // internal passthrough Network Load Balancers.
+ // available for Network Load Balancing and Internal TCP/UDP Load
+ // Balancing.
ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"`
// ConsistentHash: Consistent Hash-based load balancing can be used to
@@ -5744,15 +5609,15 @@
// security policy associated with this backend service.
EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"`
- // EnableCDN: If true, enables Cloud CDN for the backend service of a
- // global external Application Load Balancer.
+ // EnableCDN: If true, enables Cloud CDN for the backend service of an
+ // external HTTP(S) load balancer.
EnableCDN bool `json:"enableCDN,omitempty"`
// FailoverPolicy: Requires at least one backend instance group to be
// defined as a backup (failover) backend. For load balancers that have
- // configurable failover: Internal passthrough Network Load Balancers
+ // configurable failover: Internal TCP/UDP Load Balancing
// (https://cloud.google.com/load-balancing/docs/internal/failover-overview)
- // and external passthrough Network Load Balancers
+ // and external TCP/UDP Load Balancing
// (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"`
@@ -5776,8 +5641,8 @@
HealthChecks []string `json:"healthChecks,omitempty"`
// Iap: The configurations for Identity-Aware Proxy on this resource.
- // Not available for internal passthrough Network Load Balancers and
- // external passthrough Network Load Balancers.
+ // Not available for Internal TCP/UDP Load Balancing and Network Load
+ // Balancing.
Iap *BackendServiceIAP `json:"iap,omitempty"`
// Id: [Output Only] The unique identifier for the resource. This
@@ -5793,16 +5658,14 @@
// another. For more information, refer to Choosing a load balancer.
//
// Possible values:
- // "EXTERNAL" - Signifies that this will be used for classic
- // Application Load Balancers, global external proxy Network Load
- // Balancers, or external passthrough Network Load Balancers.
- // "EXTERNAL_MANAGED" - Signifies that this will be used for global
- // external Application Load Balancers, regional external Application
- // Load Balancers, or regional external proxy Network Load Balancers.
- // "INTERNAL" - Signifies that this will be used for internal
- // passthrough Network Load Balancers.
- // "INTERNAL_MANAGED" - Signifies that this will be used for internal
- // Application Load Balancers.
+ // "EXTERNAL" - Signifies that this will be used for external HTTP(S),
+ // SSL Proxy, TCP Proxy, or Network Load Balancing
+ // "EXTERNAL_MANAGED" - Signifies that this will be used for External
+ // Managed HTTP(S) Load Balancing.
+ // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP
+ // Load Balancing.
+ // "INTERNAL_MANAGED" - Signifies that this will be used for Internal
+ // HTTP(S) Load Balancing.
// "INTERNAL_SELF_MANAGED" - Signifies that this will be used by
// Traffic Director.
// "INVALID_LOAD_BALANCING_SCHEME"
@@ -5944,18 +5807,16 @@
OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"`
// Port: Deprecated in favor of portName. The TCP port to connect on the
- // backend. The default value is 80. For internal passthrough Network
- // Load Balancers and external passthrough Network Load Balancers, omit
- // port.
+ // backend. The default value is 80. For Internal TCP/UDP Load Balancing
+ // and Network Load Balancing, omit port.
Port int64 `json:"port,omitempty"`
// PortName: A named port on a backend instance group representing the
// port for communication to the backend VMs in that group. The named
// port must be defined on each backend instance group
// (https://cloud.google.com/load-balancing/docs/backend-service#named_ports).
- // This parameter has no meaning if the backends are NEGs. For internal
- // passthrough Network Load Balancers and external passthrough Network
- // Load Balancers, omit port_name.
+ // This parameter has no meaning if the backends are NEGs. For Internal
+ // TCP/UDP Load Balancing and Network Load Balancing, omit port_name.
PortName string `json:"portName,omitempty"`
// Protocol: The protocol this BackendService uses to communicate with
@@ -6539,19 +6400,18 @@
// "NEVER_PERSIST"
ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"`
- // EnableStrongAffinity: Enable Strong Session Affinity for external
- // passthrough Network Load Balancers. This option is not available
- // publicly.
+ // EnableStrongAffinity: Enable Strong Session Affinity for Network Load
+ // Balancing. This option is not available publicly.
EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"`
// IdleTimeoutSec: Specifies how long to keep a Connection Tracking
- // entry while there is no matching traffic (in seconds). For internal
- // passthrough Network Load Balancers: - The minimum (default) is 10
- // minutes and the maximum is 16 hours. - It can be set only if
- // Connection Tracking is less than 5-tuple (i.e. Session Affinity is
- // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking
- // Mode is PER_SESSION). For external passthrough Network Load Balancers
- // the default is 60 seconds. This option is not available publicly.
+ // entry while there is no matching traffic (in seconds). For Internal
+ // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the
+ // maximum is 16 hours. - It can be set only if Connection Tracking is
+ // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION,
+ // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For
+ // Network Load Balancer the default is 60 seconds. This option is not
+ // available publicly.
IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"`
// TrackingMode: Specifies the key used for connection tracking. There
@@ -6597,9 +6457,9 @@
}
// BackendServiceFailoverPolicy: For load balancers that have
-// configurable failover: Internal passthrough Network Load Balancers
+// configurable failover: Internal TCP/UDP Load Balancing
// (https://cloud.google.com/load-balancing/docs/internal/failover-overview)
-// and external passthrough Network Load Balancers
+// and external TCP/UDP Load Balancing
// (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
// On failover or failback, this field indicates whether connection
// draining will be honored. Google Cloud has a fixed connection
@@ -6618,9 +6478,9 @@
// unhealthy.If set to false, connections are distributed among all
// primary VMs when all primary and all backup backend VMs are
// unhealthy. For load balancers that have configurable failover:
- // Internal passthrough Network Load Balancers
+ // Internal TCP/UDP Load Balancing
// (https://cloud.google.com/load-balancing/docs/internal/failover-overview)
- // and external passthrough Network Load Balancers
+ // and external TCP/UDP Load Balancing
// (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview).
// The default is false.
DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"`
@@ -7857,34 +7717,11 @@
// For example, `admins@example.com`. * `domain:{domain}`: The G Suite
// domain (primary) that represents all the users of that domain. For
// example, `google.com` or `example.com`. *
- // `principal://iam.googleapis.com/locations/global/workforcePools/{pool_
- // id}/subject/{subject_attribute_value}`: A single identity in a
- // workforce identity pool. *
- // `principalSet://iam.googleapis.com/locations/global/workforcePools/{po
- // ol_id}/group/{group_id}`: All workforce identities in a group. *
- // `principalSet://iam.googleapis.com/locations/global/workforcePools/{po
- // ol_id}/attribute.{attribute_name}/{attribute_value}`: All workforce
- // identities with a specific attribute value. *
- // `principalSet://iam.googleapis.com/locations/global/workforcePools/{po
- // ol_id}/*`: All identities in a workforce identity pool. *
- // `principal://iam.googleapis.com/projects/{project_number}/locations/gl
- // obal/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}
- // `: A single identity in a workload identity pool. *
- // `principalSet://iam.googleapis.com/projects/{project_number}/locations
- // /global/workloadIdentityPools/{pool_id}/group/{group_id}`: A workload
- // identity pool group. *
- // `principalSet://iam.googleapis.com/projects/{project_number}/locations
- // /global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{at
- // tribute_value}`: All identities in a workload identity pool with a
- // certain attribute. *
- // `principalSet://iam.googleapis.com/projects/{project_number}/locations
- // /global/workloadIdentityPools/{pool_id}/*`: All identities in a
- // workload identity pool. * `deleted:user:{emailid}?uid={uniqueid}`: An
- // email address (plus unique identifier) representing a user that has
- // been recently deleted. For example,
- // `alice@example.com?uid=123456789012345678901`. If the user is
- // recovered, this value reverts to `user:{emailid}` and the recovered
- // user retains the role in the binding. *
+ // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus
+ // unique identifier) representing a user that has been recently
+ // deleted. For example, `alice@example.com?uid=123456789012345678901`.
+ // If the user is recovered, this value reverts to `user:{emailid}` and
+ // the recovered user retains the role in the binding. *
// `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
// (plus unique identifier) representing a service account that has been
// recently deleted. For example,
@@ -7896,12 +7733,7 @@
// that has been recently deleted. For example,
// `admins@example.com?uid=123456789012345678901`. If the group is
// recovered, this value reverts to `group:{emailid}` and the recovered
- // group retains the role in the binding. *
- // `deleted:principal://iam.googleapis.com/locations/global/workforcePool
- // s/{pool_id}/subject/{subject_attribute_value}`: Deleted single
- // identity in a workforce identity pool. For example,
- // `deleted:principal://iam.googleapis.com/locations/global/workforcePool
- // s/my-pool-id/subject/my-subject-attribute-value`.
+ // group retains the role in the binding.
Members []string `json:"members,omitempty"`
// Role: Role that is assigned to the list of `members`, or principals.
@@ -8320,15 +8152,6 @@
// format.
EndTimestamp string `json:"endTimestamp,omitempty"`
- // ExistingReservations: Specifies the already existing reservations to
- // attach to the Commitment. This field is optional, and it can be a
- // full or partial URL. For example, the following are valid URLs to an
- // reservation: -
- // https://www.googleapis.com/compute/v1/projects/project/zones/zone
- // /reservations/reservation -
- // projects/project/zones/zone/reservations/reservation
- ExistingReservations []string `json:"existingReservations,omitempty"`
-
// Id: [Output Only] The unique identifier for the resource. This
// identifier is defined by the server.
Id uint64 `json:"id,omitempty,string"`
@@ -8368,7 +8191,7 @@
// used.
Region string `json:"region,omitempty"`
- // Reservations: List of create-on-create reservations for this
+ // Reservations: List of create-on-create reseravtions for this
// commitment.
Reservations []*Reservation `json:"reservations,omitempty"`
@@ -9517,10 +9340,6 @@
// provide a key to use the disk later.
DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"`
- // EnableConfidentialCompute: Whether this disk is using confidential
- // compute mode.
- EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"`
-
// GuestOsFeatures: A list of features to enable on the guest operating
// system. Applicable only for bootable images. Read Enabling guest
// operating system features to see a list of available options.
@@ -9621,9 +9440,6 @@
// resource.
ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"`
- // SatisfiesPzi: Output only. Reserved for future use.
- SatisfiesPzi bool `json:"satisfiesPzi,omitempty"`
-
// SatisfiesPzs: [Output Only] Reserved for future use.
SatisfiesPzs bool `json:"satisfiesPzs,omitempty"`
@@ -13501,10 +13317,10 @@
// * Regional
// (https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules)
// A forwarding rule and its corresponding IP address represent the
-// frontend configuration of a Google Cloud load balancer. Forwarding
-// rules can also reference target instances and Cloud VPN Classic
-// gateways (targetVpnGateway). For more information, read Forwarding
-// rule concepts and Using protocol forwarding.
+// frontend configuration of a Google Cloud Platform load balancer.
+// Forwarding rules can also reference target instances and Cloud VPN
+// Classic gateways (targetVpnGateway). For more information, read
+// Forwarding rule concepts and Using protocol forwarding.
type ForwardingRule struct {
// IPAddress: IP address for which this forwarding rule accepts traffic.
// When a client sends traffic to this IP address, the forwarding rule
@@ -13568,9 +13384,8 @@
// AllowGlobalAccess: This field is used along with the backend_service
// field for internal load balancing or with the target field for
// internal TargetInstance. If set to true, clients can access the
- // internal passthrough Network Load Balancers, the regional internal
- // Application Load Balancer, and the regional internal proxy Network
- // Load Balancer from all regions. If false, only allows access from the
+ // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load
+ // Balancer from all regions. If false, only allows access from the
// local region the load balancer is located at. Note that for
// INTERNAL_MANAGED forwarding rules, this field cannot be changed after
// the forwarding rule is created.
@@ -13581,16 +13396,16 @@
AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"`
// BackendService: Identifies the backend service to which the
- // forwarding rule sends traffic. Required for internal and external
- // passthrough Network Load Balancers; must be omitted for all other
+ // forwarding rule sends traffic. Required for Internal TCP/UDP Load
+ // Balancing and Network Load Balancing; must be omitted for all other
// load balancer types.
BackendService string `json:"backendService,omitempty"`
// BaseForwardingRule: [Output Only] The URL for the corresponding base
- // forwarding rule. By base forwarding rule, we mean the forwarding rule
+ // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule
// that has the same IP address, protocol, and port settings with the
- // current forwarding rule, but without sourceIPRanges specified. Always
- // empty if the current forwarding rule does not have sourceIPRanges
+ // current Forwarding Rule, but without sourceIPRanges specified. Always
+ // empty if the current Forwarding Rule does not have sourceIPRanges
// specified.
BaseForwardingRule string `json:"baseForwardingRule,omitempty"`
@@ -13633,7 +13448,7 @@
IsMirroringCollector bool `json:"isMirroringCollector,omitempty"`
// Kind: [Output Only] Type of the resource. Always
- // compute#forwardingRule for forwarding rule resources.
+ // compute#forwardingRule for Forwarding Rule resources.
Kind string `json:"kind,omitempty"`
// LabelFingerprint: A fingerprint for the labels being applied to this
@@ -13696,10 +13511,10 @@
Name string `json:"name,omitempty"`
// Network: This field is not used for global external load balancing.
- // For internal passthrough Network Load Balancers, this field
- // identifies the network that the load balanced IP should belong to for
- // this forwarding rule. If the subnetwork is specified, the network of
- // the subnetwork will be used. If neither subnetwork nor this field is
+ // For Internal TCP/UDP Load Balancing, this field identifies the
+ // network that the load balanced IP should belong to for this
+ // Forwarding Rule. If the subnetwork is specified, the network of the
+ // subnetwork will be used. If neither subnetwork nor this field is
// specified, the default network will be used. For Private Service
// Connect forwarding rules that forward traffic to Google APIs, a
// network must be provided.
@@ -13766,7 +13581,7 @@
Ports []string `json:"ports,omitempty"`
// PscConnectionId: [Output Only] The PSC connection id of the PSC
- // forwarding rule.
+ // Forwarding Rule.
PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"`
// Possible values:
@@ -13796,7 +13611,7 @@
ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"`
// ServiceLabel: An optional prefix to the service name for this
- // forwarding rule. If specified, the prefix is the first label of the
+ // Forwarding Rule. If specified, the prefix is the first label of the
// fully qualified service name. The label must be 1-63 characters long,
// and comply with RFC1035. Specifically, the label must be 1-63
// characters long and match the regular expression
@@ -13807,26 +13622,25 @@
ServiceLabel string `json:"serviceLabel,omitempty"`
// ServiceName: [Output Only] The internal fully qualified service name
- // for this forwarding rule. This field is only used for internal load
+ // for this Forwarding Rule. This field is only used for internal load
// balancing.
ServiceName string `json:"serviceName,omitempty"`
- // SourceIpRanges: If not empty, this forwarding rule will only forward
+ // SourceIpRanges: If not empty, this Forwarding Rule will only forward
// the traffic when the source IP address matches one of the IP
- // addresses or CIDR ranges set here. Note that a forwarding rule can
+ // addresses or CIDR ranges set here. Note that a Forwarding Rule can
// only have up to 64 source IP ranges, and this field can only be used
- // with a regional forwarding rule whose scheme is EXTERNAL. Each
+ // with a regional Forwarding Rule whose scheme is EXTERNAL. Each
// source_ip_range entry should be either an IP address (for example,
// 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).
SourceIpRanges []string `json:"sourceIpRanges,omitempty"`
// Subnetwork: This field identifies the subnetwork that the load
- // balanced IP should belong to for this forwarding rule, used with
- // internal load balancers and external passthrough Network Load
- // Balancers with IPv6. If the network specified is in auto subnet mode,
- // this field is optional. However, a subnetwork must be specified if
- // the network is in custom subnet mode or when creating external
- // forwarding rule with IPv6.
+ // balanced IP should belong to for this Forwarding Rule, used in
+ // internal load balancing and network load balancing with IPv6. If the
+ // network specified is in auto subnet mode, this field is optional.
+ // However, a subnetwork must be specified if the network is in custom
+ // subnet mode or when creating external forwarding rule with IPv6.
Subnetwork string `json:"subnetwork,omitempty"`
// Target: The URL of the target resource to receive the matched
@@ -14287,9 +14101,9 @@
}
// ForwardingRuleServiceDirectoryRegistration: Describes the
-// auto-registration of the forwarding rule to Service Directory. The
+// auto-registration of the Forwarding Rule to Service Directory. The
// region and project of the Service Directory resource generated from
-// this registration will be the same as this forwarding rule.
+// this registration will be the same as this Forwarding Rule.
type ForwardingRuleServiceDirectoryRegistration struct {
// Namespace: Service Directory namespace to register the forwarding
// rule under.
@@ -14301,8 +14115,8 @@
// ServiceDirectoryRegion: [Optional] Service Directory region to
// register this global forwarding rule under. Default to "us-central1".
- // Only used for PSC for Google APIs. All PSC for Google APIs forwarding
- // rules on the same network should use the same Service Directory
+ // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding
+ // Rules on the same network should use the same Service Directory
// region.
ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"`
@@ -14519,7 +14333,7 @@
// PortSpecification: Specifies how a port is selected for health
// checking. Can be one of the following values: USE_FIXED_PORT:
// Specifies a port number explicitly using the port field in the health
- // check. Supported by backend services for passthrough load balancers
+ // check. Supported by backend services for pass-through load balancers
// and backend services for proxy load balancers. Not supported by
// target pools. The health check supports all backends supported by the
// backend service provided the backend can be health checked. For
@@ -14529,7 +14343,7 @@
// specifying the health check port by referring to the backend service.
// Only supported by backend services for proxy load balancers. Not
// supported by target pools. Not supported by backend services for
- // passthrough load balancers. Supports all backends that can be health
+ // pass-through load balancers. Supports all backends that can be health
// checked; for example, GCE_VM_IP_PORT network endpoint groups and
// instance group backends. For GCE_VM_IP_PORT network endpoint group
// backends, the health check uses the port number specified for each
@@ -14900,13 +14714,12 @@
// commas to separate values. Set to one or more of the following
// values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET -
// UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE -
- // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE - IDPF For more information,
- // see Enabling guest operating system features.
+ // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see
+ // Enabling guest operating system features.
//
// Possible values:
// "FEATURE_TYPE_UNSPECIFIED"
// "GVNIC"
- // "IDPF"
// "MULTI_IP_SUBNET"
// "SECURE_BOOT"
// "SEV_CAPABLE"
@@ -14960,7 +14773,7 @@
// PortSpecification: Specifies how a port is selected for health
// checking. Can be one of the following values: USE_FIXED_PORT:
// Specifies a port number explicitly using the port field in the health
- // check. Supported by backend services for passthrough load balancers
+ // check. Supported by backend services for pass-through load balancers
// and backend services for proxy load balancers. Not supported by
// target pools. The health check supports all backends supported by the
// backend service provided the backend can be health checked. For
@@ -14970,7 +14783,7 @@
// specifying the health check port by referring to the backend service.
// Only supported by backend services for proxy load balancers. Not
// supported by target pools. Not supported by backend services for
- // passthrough load balancers. Supports all backends that can be health
+ // pass-through load balancers. Supports all backends that can be health
// checked; for example, GCE_VM_IP_PORT network endpoint groups and
// instance group backends. For GCE_VM_IP_PORT network endpoint group
// backends, the health check uses the port number specified for each
@@ -15054,7 +14867,7 @@
// PortSpecification: Specifies how a port is selected for health
// checking. Can be one of the following values: USE_FIXED_PORT:
// Specifies a port number explicitly using the port field in the health
- // check. Supported by backend services for passthrough load balancers
+ // check. Supported by backend services for pass-through load balancers
// and backend services for proxy load balancers. Also supported in
// legacy HTTP health checks for target pools. The health check supports
// all backends supported by the backend service provided the backend
@@ -15148,7 +14961,7 @@
// PortSpecification: Specifies how a port is selected for health
// checking. Can be one of the following values: USE_FIXED_PORT:
// Specifies a port number explicitly using the port field in the health
- // check. Supported by backend services for passthrough load balancers
+ // check. Supported by backend services for pass-through load balancers
// and backend services for proxy load balancers. Not supported by
// target pools. The health check supports all backends supported by the
// backend service provided the backend can be health checked. For
@@ -15158,7 +14971,7 @@
// specifying the health check port by referring to the backend service.
// Only supported by backend services for proxy load balancers. Not
// supported by target pools. Not supported by backend services for
- // passthrough load balancers. Supports all backends that can be health
+ // pass-through load balancers. Supports all backends that can be health
// checked; for example, GCE_VM_IP_PORT network endpoint groups and
// instance group backends. For GCE_VM_IP_PORT network endpoint group
// backends, the health check uses the port number specified for each
@@ -15228,14 +15041,20 @@
// (/compute/docs/reference/rest/v1/regionHealthChecks) * Global
// (/compute/docs/reference/rest/v1/healthChecks) These health check
// resources can be used for load balancing and for autohealing VMs in a
-// managed instance group (MIG). **Load balancing** Health check
-// requirements vary depending on the type of load balancer. For details
-// about the type of health check supported for each load balancer and
-// corresponding backend type, see Health checks overview: Load balancer
-// guide. **Autohealing in MIGs** The health checks that you use for
-// autohealing VMs in a MIG can be either regional or global. For more
-// information, see Set up an application health check and autohealing.
-// For more information, see Health checks overview.
+// managed instance group (MIG). **Load balancing** The following load
+// balancer can use either regional or global health check: * Internal
+// TCP/UDP load balancer The following load balancers require regional
+// health check: * Internal HTTP(S) load balancer * Backend
+// service-based network load balancer Traffic Director and the
+// following load balancers require global health check: * External
+// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load
+// balancer The following load balancer require legacy HTTP health
+// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target
+// pool-based network load balancer **Autohealing in MIGs** The health
+// checks that you use for autohealing VMs in a MIG can be either
+// regional or global. For more information, see Set up an application
+// health check and autohealing. For more information, see Health checks
+// overview.
type HealthCheck struct {
// CheckIntervalSec: How often (in seconds) to send a health check. The
// default value is 5 seconds.
@@ -17934,11 +17753,6 @@
// (in GB).
DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
- // EnableConfidentialCompute: Whether this image is created from a
- // confidential compute mode disk. [Output Only]: This field is not set
- // by user, but from source disk.
- EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"`
-
// Family: The name of the image family to which this image belongs. The
// image family name can be from a publicly managed image family
// provided by Compute Engine, or from a custom image family you create.
@@ -18005,9 +17819,6 @@
// RawDisk: The parameters of the raw disk image.
RawDisk *ImageRawDisk `json:"rawDisk,omitempty"`
- // SatisfiesPzi: Output only. Reserved for future use.
- SatisfiesPzi bool `json:"satisfiesPzi,omitempty"`
-
// SatisfiesPzs: [Output Only] Reserved for future use.
SatisfiesPzs bool `json:"satisfiesPzs,omitempty"`
@@ -18626,9 +18437,6 @@
// corresponding input only field.
ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"`
- // SatisfiesPzi: [Output Only] Reserved for future use.
- SatisfiesPzi bool `json:"satisfiesPzi,omitempty"`
-
// SatisfiesPzs: [Output Only] Reserved for future use.
SatisfiesPzs bool `json:"satisfiesPzs,omitempty"`
@@ -27779,9 +27587,6 @@
// last character, which cannot be a dash.
Name string `json:"name,omitempty"`
- // SatisfiesPzi: Output only. Reserved for future use.
- SatisfiesPzi bool `json:"satisfiesPzi,omitempty"`
-
// SatisfiesPzs: [Output Only] Reserved for future use.
SatisfiesPzs bool `json:"satisfiesPzs,omitempty"`
@@ -35475,8 +35280,7 @@
// `globalOperations` resource. - For regional operations, use the
// `regionOperations` resource. - For zonal operations, use the
// `zoneOperations` resource. For more information, read Global,
-// Regional, and Zonal Resources. Note that completed Operation
-// resources have a limited retention period.
+// Regional, and Zonal Resources.
type Operation struct {
// ClientOperationId: [Output Only] The value of `requestId` if you
// provided it in the request. Not present otherwise.
@@ -36885,16 +36689,14 @@
// IPProtocols: Protocols that apply as filter on mirrored traffic. If
// no protocols are specified, all traffic that matches the specified
// CIDR ranges is mirrored. If neither cidrRanges nor IPProtocols is
- // specified, all IPv4 traffic is mirrored.
+ // specified, all traffic is mirrored.
IPProtocols []string `json:"IPProtocols,omitempty"`
- // CidrRanges: One or more IPv4 or IPv6 CIDR ranges that apply as filter
- // on the source (ingress) or destination (egress) IP in the IP header.
- // If no ranges are specified, all IPv4 traffic that matches the
+ // CidrRanges: IP CIDR ranges that apply as filter on the source
+ // (ingress) or destination (egress) IP in the IP header. Only IPv4 is
+ // supported. If no ranges are specified, all traffic that matches the
// specified IPProtocols is mirrored. If neither cidrRanges nor
- // IPProtocols is specified, all IPv4 traffic is mirrored. To mirror all
- // IPv4 and IPv6 traffic, use "0.0.0.0/0,::/0". Note: Support for IPv6
- // traffic is in preview.
+ // IPProtocols is specified, all traffic is mirrored.
CidrRanges []string `json:"cidrRanges,omitempty"`
// Direction: Direction of traffic to mirror, either INGRESS, EGRESS, or
@@ -39416,7 +39218,6 @@
// "COMMITTED_N2_CPUS"
// "COMMITTED_NVIDIA_A100_80GB_GPUS"
// "COMMITTED_NVIDIA_A100_GPUS"
- // "COMMITTED_NVIDIA_H100_GPUS"
// "COMMITTED_NVIDIA_K80_GPUS"
// "COMMITTED_NVIDIA_L4_GPUS"
// "COMMITTED_NVIDIA_P100_GPUS"
@@ -39492,7 +39293,6 @@
// "PREEMPTIBLE_LOCAL_SSD_GB"
// "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS"
// "PREEMPTIBLE_NVIDIA_A100_GPUS"
- // "PREEMPTIBLE_NVIDIA_H100_GPUS"
// "PREEMPTIBLE_NVIDIA_K80_GPUS"
// "PREEMPTIBLE_NVIDIA_L4_GPUS"
// "PREEMPTIBLE_NVIDIA_P100_GPUS"
@@ -39502,9 +39302,6 @@
// "PREEMPTIBLE_NVIDIA_T4_GPUS"
// "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS"
// "PREEMPTIBLE_NVIDIA_V100_GPUS"
- // "PREEMPTIBLE_TPU_LITE_DEVICE_V5"
- // "PREEMPTIBLE_TPU_LITE_PODSLICE_V5"
- // "PREEMPTIBLE_TPU_PODSLICE_V4"
// "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK"
// "PSC_INTERNAL_LB_FORWARDING_RULES"
// "PUBLIC_ADVERTISED_PREFIXES"
@@ -39543,9 +39340,6 @@
// "TARGET_SSL_PROXIES"
// "TARGET_TCP_PROXIES"
// "TARGET_VPN_GATEWAYS"
- // "TPU_LITE_DEVICE_V5"
- // "TPU_LITE_PODSLICE_V5"
- // "TPU_PODSLICE_V4"
// "URL_MAPS"
// "VPN_GATEWAYS"
// "VPN_TUNNELS"
@@ -42082,10 +41876,6 @@
// that capacity is held in a specific zone even if the reserved VMs are
// not running. For more information, read Reserving zonal resources.
type Reservation struct {
- // AggregateReservation: Reservation for aggregated resources, providing
- // shape flexibility.
- AggregateReservation *AllocationAggregateReservation `json:"aggregateReservation,omitempty"`
-
// Commitment: [Output Only] Full or partial URL to a parent commitment.
// This field displays for reservations that are tied to a commitment.
Commitment string `json:"commitment,omitempty"`
@@ -42166,22 +41956,20 @@
// server.
googleapi.ServerResponse `json:"-"`
- // ForceSendFields is a list of field names (e.g.
- // "AggregateReservation") to unconditionally include in API requests.
- // By default, fields with empty or default values are omitted from API
- // requests. However, any non-pointer, non-interface field appearing in
- // ForceSendFields will be sent to the server regardless of whether the
- // field is empty or not. This may be used to include empty fields in
- // Patch requests.
+ // ForceSendFields is a list of field names (e.g. "Commitment") to
+ // unconditionally include in API requests. By default, fields with
+ // empty or default values are omitted from API requests. However, any
+ // non-pointer, non-interface field appearing in ForceSendFields will be
+ // sent to the server regardless of whether the field is empty or not.
+ // This may be used to include empty fields in Patch requests.
ForceSendFields []string `json:"-"`
- // NullFields is a list of field names (e.g. "AggregateReservation") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
+ // NullFields is a list of field names (e.g. "Commitment") to include in
+ // API requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
NullFields []string `json:"-"`
}
@@ -46482,7 +46270,7 @@
// PortSpecification: Specifies how a port is selected for health
// checking. Can be one of the following values: USE_FIXED_PORT:
// Specifies a port number explicitly using the port field in the health
- // check. Supported by backend services for passthrough load balancers
+ // check. Supported by backend services for pass-through load balancers
// and backend services for proxy load balancers. Not supported by
// target pools. The health check supports all backends supported by the
// backend service provided the backend can be health checked. For
@@ -46492,7 +46280,7 @@
// specifying the health check port by referring to the backend service.
// Only supported by backend services for proxy load balancers. Not
// supported by target pools. Not supported by backend services for
- // passthrough load balancers. Supports all backends that can be health
+ // pass-through load balancers. Supports all backends that can be health
// checked; for example, GCE_VM_IP_PORT network endpoint groups and
// instance group backends. For GCE_VM_IP_PORT network endpoint group
// backends, the health check uses the port number specified for each
@@ -48192,10 +47980,6 @@
// Security Policies.
Expr *Expr `json:"expr,omitempty"`
- // ExprOptions: The configuration options available when specifying a
- // user defined CEVAL expression (i.e., 'expr').
- ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"`
-
// VersionedExpr: Preconfigured versioned expression. If this field is
// specified, config must also be specified. Available preconfigured
// expressions along with their requirements are: SRC_IPS_V1 - must
@@ -48257,73 +48041,6 @@
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
-type SecurityPolicyRuleMatcherExprOptions struct {
- // RecaptchaOptions: reCAPTCHA configuration options to be applied for
- // the rule. If the rule does not evaluate reCAPTCHA tokens, this field
- // has no effect.
- RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "RecaptchaOptions") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) {
- type NoMethod SecurityPolicyRuleMatcherExprOptions
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct {
- // ActionTokenSiteKeys: A list of site keys to be used during the
- // validation of reCAPTCHA action-tokens. The provided site keys need to
- // be created from reCAPTCHA API under the same project where the
- // security policy is created.
- ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"`
-
- // SessionTokenSiteKeys: A list of site keys to be used during the
- // validation of reCAPTCHA session-tokens. The provided site keys need
- // to be created from reCAPTCHA API under the same project where the
- // security policy is created.
- SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys")
- // to unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to
- // include in API requests with the JSON null value. By default, fields
- // with empty values are omitted from API requests. However, any field
- // with an empty value appearing in NullFields will be sent to the
- // server as null. It is an error if a field in this list has a
- // non-empty value. This may be used to include null fields in Patch
- // requests.
- NullFields []string `json:"-"`
-}
-
-func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) {
- type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
// SecurityPolicyRuleNetworkMatcher: Represents a match condition that
// incoming network traffic is evaluated against.
type SecurityPolicyRuleNetworkMatcher struct {
@@ -48579,13 +48296,7 @@
// Server name indication in the TLS session of the HTTPS request. The
// key value is truncated to the first 128 bytes. The key type defaults
// to ALL on a HTTP session. - REGION_CODE: The country/region from
- // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL
- // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If
- // not available, the key type defaults to ALL. - USER_IP: The IP
- // address of the originating client, which is resolved based on
- // "userIpRequestHeaders" configured with the security policy. If there
- // is no "userIpRequestHeaders" configuration or an IP address cannot be
- // resolved from it, the key type defaults to IP.
+ // which the request originates.
//
// Possible values:
// "ALL"
@@ -48595,8 +48306,6 @@
// "IP"
// "REGION_CODE"
// "SNI"
- // "TLS_JA3_FINGERPRINT"
- // "USER_IP"
// "XFF_IP"
EnforceOnKey string `json:"enforceOnKey,omitempty"`
@@ -48683,14 +48392,7 @@
// bytes. - SNI: Server name indication in the TLS session of the HTTPS
// request. The key value is truncated to the first 128 bytes. The key
// type defaults to ALL on a HTTP session. - REGION_CODE: The
- // country/region from which the request originates. -
- // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects
- // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type
- // defaults to ALL. - USER_IP: The IP address of the originating client,
- // which is resolved based on "userIpRequestHeaders" configured with the
- // security policy. If there is no "userIpRequestHeaders" configuration
- // or an IP address cannot be resolved from it, the key type defaults to
- // IP.
+ // country/region from which the request originates.
//
// Possible values:
// "ALL"
@@ -48700,8 +48402,6 @@
// "IP"
// "REGION_CODE"
// "SNI"
- // "TLS_JA3_FINGERPRINT"
- // "USER_IP"
// "XFF_IP"
EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"`
@@ -50184,11 +49884,6 @@
// snapshot to a disk.
DownloadBytes int64 `json:"downloadBytes,omitempty,string"`
- // EnableConfidentialCompute: Whether this snapshot is created from a
- // confidential compute mode disk. [Output Only]: This field is not set
- // by user, but from source disk.
- EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"`
-
// GuestOsFeatures: [Output Only] A list of features to enable on the
// guest operating system. Applicable only for bootable images. Read
// Enabling guest operating system features to see a list of available
@@ -50240,9 +49935,6 @@
// last character, which cannot be a dash.
Name string `json:"name,omitempty"`
- // SatisfiesPzi: Output only. Reserved for future use.
- SatisfiesPzi bool `json:"satisfiesPzi,omitempty"`
-
// SatisfiesPzs: [Output Only] Reserved for future use.
SatisfiesPzs bool `json:"satisfiesPzs,omitempty"`
@@ -52180,9 +51872,9 @@
}
// SslPolicy: Represents an SSL Policy resource. Use SSL policies to
-// control SSL features, such as versions and cipher suites, that are
-// offered by Application Load Balancers and proxy Network Load
-// Balancers. For more information, read SSL policies overview.
+// control the SSL features, such as versions and cipher suites, offered
+// by an HTTPS or SSL Proxy load balancer. For more information, read
+// SSL Policy Concepts.
type SslPolicy struct {
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text
// format.
@@ -52757,24 +52449,26 @@
PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"`
// Purpose: The purpose of the resource. This field can be either
- // PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY,
- // PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for
+ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or
+ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for
// user-created subnets or subnets that are automatically created in
- // auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY
- // or REGIONAL_MANAGED_PROXY are user-created subnetworks that are
- // reserved for Envoy-based load balancers. A subnet with purpose set to
+ // auto mode networks. A subnet with purpose set to
+ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved
+ // for regional Envoy-based load balancers. A subnet with purpose set to
// PRIVATE_SERVICE_CONNECT is used to publish services using Private
- // Service Connect. If unspecified, the subnet purpose defaults to
+ // Service Connect. A subnet with purpose set to
+ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used
+ // only by regional internal HTTP(S) load balancers. Note that
+ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional
+ // Envoy load balancers. If unspecified, the subnet purpose defaults to
// PRIVATE. The enableFlowLogs field isn't supported if the subnet
- // purpose field is set to GLOBAL_MANAGED_PROXY or
- // REGIONAL_MANAGED_PROXY.
+ // purpose field is set to REGIONAL_MANAGED_PROXY.
//
// Possible values:
// "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Envoy-based
// Load Balancing.
// "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal
- // HTTP(S) Load Balancing. This is a legacy purpose, please use
- // REGIONAL_MANAGED_PROXY instead.
+ // HTTP(S) Load Balancing.
// "PRIVATE" - Regular user created or automatically created subnet.
// "PRIVATE_NAT" - Subnetwork used as source range for Private NAT
// Gateways.
@@ -52790,16 +52484,12 @@
// can be set only at resource creation time.
Region string `json:"region,omitempty"`
- // ReservedInternalRange: The URL of the reserved internal range.
- ReservedInternalRange string `json:"reservedInternalRange,omitempty"`
-
// Role: The role of subnetwork. Currently, this field is only used when
- // purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The
- // value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one
- // that is currently being used for Envoy-based load balancers in a
- // region. A BACKUP subnetwork is one that is ready to be promoted to
- // ACTIVE or is currently draining. This field can be updated with a
- // patch request.
+ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or
+ // BACKUP. An ACTIVE subnetwork is one that is currently being used for
+ // Envoy-based load balancers in a region. A BACKUP subnetwork is one
+ // that is ready to be promoted to ACTIVE or is currently draining. This
+ // field can be updated with a patch request.
//
// Possible values:
// "ACTIVE" - The ACTIVE subnet that is currently used.
@@ -53365,9 +53055,6 @@
// unique within the subnetwork.
RangeName string `json:"rangeName,omitempty"`
- // ReservedInternalRange: The URL of the reserved internal range.
- ReservedInternalRange string `json:"reservedInternalRange,omitempty"`
-
// ForceSendFields is a list of field names (e.g. "IpCidrRange") to
// unconditionally include in API requests. By default, fields with
// empty or default values are omitted from API requests. However, any
@@ -53675,7 +53362,7 @@
// PortSpecification: Specifies how a port is selected for health
// checking. Can be one of the following values: USE_FIXED_PORT:
// Specifies a port number explicitly using the port field in the health
- // check. Supported by backend services for passthrough load balancers
+ // check. Supported by backend services for pass-through load balancers
// and backend services for proxy load balancers. Not supported by
// target pools. The health check supports all backends supported by the
// backend service provided the backend can be health checked. For
@@ -53685,7 +53372,7 @@
// specifying the health check port by referring to the backend service.
// Only supported by backend services for proxy load balancers. Not
// supported by target pools. Not supported by backend services for
- // passthrough load balancers. Supports all backends that can be health
+ // pass-through load balancers. Supports all backends that can be health
// checked; for example, GCE_VM_IP_PORT network endpoint groups and
// instance group backends. For GCE_VM_IP_PORT network endpoint group
// backends, the health check uses the port number specified for each
@@ -56080,10 +55767,10 @@
}
// TargetPool: Represents a Target Pool resource. Target pools are used
-// with external passthrough Network Load Balancers. A target pool
-// references member instances, an associated legacy HttpHealthCheck
-// resource, and, optionally, a backup target pool. For more
-// information, read Using target pools.
+// for network TCP/UDP load balancing. A target pool references member
+// instances, an associated legacy HttpHealthCheck resource, and,
+// optionally, a backup target pool. For more information, read Using
+// target pools.
type TargetPool struct {
// BackupPool: The server-defined URL for the resource. This field is
// applicable only when the containing target pool is serving a
@@ -57099,10 +56786,10 @@
}
// TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL
-// proxy is a component of a Proxy Network Load Balancer. The forwarding
-// rule references the target SSL proxy, and the target proxy then
-// references a backend service. For more information, read Proxy
-// Network Load Balancer overview.
+// proxy is a component of a SSL Proxy load balancer. Global forwarding
+// rules reference a target SSL proxy, and the target proxy then
+// references an external backend service. For more information, read
+// Using Target Proxies.
type TargetSslProxy struct {
// CertificateMap: URL of a certificate map that identifies a
// certificate map associated with the given target proxy. This field
@@ -57616,10 +57303,10 @@
}
// TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP
-// proxy is a component of a Proxy Network Load Balancer. The forwarding
-// rule references the target TCP proxy, and the target proxy then
-// references a backend service. For more information, read Proxy
-// Network Load Balancer overview.
+// proxy is a component of a TCP Proxy load balancer. Global forwarding
+// rules reference target TCP proxy, and the target proxy then
+// references an external backend service. For more information, read
+// TCP Proxy Load Balancing overview.
type TargetTcpProxy struct {
// CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text
// format.
@@ -59989,24 +59676,26 @@
Network string `json:"network,omitempty"`
// Purpose: The purpose of the resource. This field can be either
- // PRIVATE, GLOBAL_MANAGED_PROXY, REGIONAL_MANAGED_PROXY,
- // PRIVATE_SERVICE_CONNECT, or PRIVATE is the default purpose for
+ // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or
+ // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for
// user-created subnets or subnets that are automatically created in
- // auto mode networks. Subnets with purpose set to GLOBAL_MANAGED_PROXY
- // or REGIONAL_MANAGED_PROXY are user-created subnetworks that are
- // reserved for Envoy-based load balancers. A subnet with purpose set to
+ // auto mode networks. A subnet with purpose set to
+ // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved
+ // for regional Envoy-based load balancers. A subnet with purpose set to
// PRIVATE_SERVICE_CONNECT is used to publish services using Private
- // Service Connect. If unspecified, the subnet purpose defaults to
+ // Service Connect. A subnet with purpose set to
+ // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used
+ // only by regional internal HTTP(S) load balancers. Note that
+ // REGIONAL_MANAGED_PROXY is the preferred setting for all regional
+ // Envoy load balancers. If unspecified, the subnet purpose defaults to
// PRIVATE. The enableFlowLogs field isn't supported if the subnet
- // purpose field is set to GLOBAL_MANAGED_PROXY or
- // REGIONAL_MANAGED_PROXY.
+ // purpose field is set to REGIONAL_MANAGED_PROXY.
//
// Possible values:
// "GLOBAL_MANAGED_PROXY" - Subnet reserved for Global Envoy-based
// Load Balancing.
// "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal
- // HTTP(S) Load Balancing. This is a legacy purpose, please use
- // REGIONAL_MANAGED_PROXY instead.
+ // HTTP(S) Load Balancing.
// "PRIVATE" - Regular user created or automatically created subnet.
// "PRIVATE_NAT" - Subnetwork used as source range for Private NAT
// Gateways.
@@ -60019,12 +59708,11 @@
Purpose string `json:"purpose,omitempty"`
// Role: The role of subnetwork. Currently, this field is only used when
- // purpose is set to GLOBAL_MANAGED_PROXY or REGIONAL_MANAGED_PROXY. The
- // value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one
- // that is currently being used for Envoy-based load balancers in a
- // region. A BACKUP subnetwork is one that is ready to be promoted to
- // ACTIVE or is currently draining. This field can be updated with a
- // patch request.
+ // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or
+ // BACKUP. An ACTIVE subnetwork is one that is currently being used for
+ // Envoy-based load balancers in a region. A BACKUP subnetwork is one
+ // that is ready to be promoted to ACTIVE or is currently draining. This
+ // field can be updated with a patch request.
//
// Possible values:
// "ACTIVE" - The ACTIVE subnet that is currently used.
@@ -68319,7 +68007,7 @@
// SetEdgeSecurityPolicy: Sets the edge security policy for the
// specified backend bucket.
//
-// - backendBucket: Name of the BackendBucket resource to which the
+// - backendBucket: Name of the BackendService resource to which the
// security policy should be set. The name should conform to RFC1035.
// - project: Project ID for this request.
func (r *BackendBucketsService) SetEdgeSecurityPolicy(project string, backendBucket string, securitypolicyreference *SecurityPolicyReference) *BackendBucketsSetEdgeSecurityPolicyCall {
@@ -68448,7 +68136,7 @@
// ],
// "parameters": {
// "backendBucket": {
- // "description": "Name of the BackendBucket resource to which the security policy should be set. The name should conform to RFC1035.",
+ // "description": "Name of the BackendService resource to which the security policy should be set. The name should conform to RFC1035.",
// "location": "path",
// "required": true,
// "type": "string"
@@ -97840,8 +97528,8 @@
// instance, the currentAction is CREATING. If a previous action failed,
// the list displays the errors for that failed action. The orderBy
// query parameter is not supported. The `pageToken` query parameter is
-// supported only if the group's `listManagedInstancesResults` field is
-// set to `PAGINATED`.
+// supported only in the alpha and beta API and only if the group's
+// `listManagedInstancesResults` field is set to `PAGINATED`.
//
// - instanceGroupManager: The name of the managed instance group.
// - project: Project ID for this request.
@@ -98027,7 +97715,7 @@
}
return ret, nil
// {
- // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
+ // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
// "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances",
// "httpMethod": "POST",
// "id": "compute.instanceGroupManagers.listManagedInstances",
@@ -107109,184 +106797,6 @@
}
}
-// method id "compute.instances.performMaintenance":
-
-type InstancesPerformMaintenanceCall struct {
- s *Service
- project string
- zone string
- instance string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// PerformMaintenance: Perform a manual maintenance on the instance.
-//
-// - instance: Name of the instance scoping this request.
-// - project: Project ID for this request.
-// - zone: The name of the zone for this request.
-func (r *InstancesService) PerformMaintenance(project string, zone string, instance string) *InstancesPerformMaintenanceCall {
- c := &InstancesPerformMaintenanceCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.zone = zone
- c.instance = instance
- return c
-}
-
-// RequestId sets the optional parameter "requestId": An optional
-// request ID to identify requests. Specify a unique request ID so that
-// if you must retry your request, the server will know to ignore the
-// request if it has already been completed. For example, consider a
-// situation where you make an initial request and the request times
-// out. If you make the request again with the same request ID, the
-// server can check if original operation with the same request ID was
-// received, and if so, will ignore the second request. This prevents
-// clients from accidentally creating duplicate commitments. The request
-// ID must be a valid UUID with the exception that zero UUID is not
-// supported ( 00000000-0000-0000-0000-000000000000).
-func (c *InstancesPerformMaintenanceCall) RequestId(requestId string) *InstancesPerformMaintenanceCall {
- c.urlParams_.Set("requestId", requestId)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *InstancesPerformMaintenanceCall) Fields(s ...googleapi.Field) *InstancesPerformMaintenanceCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *InstancesPerformMaintenanceCall) Context(ctx context.Context) *InstancesPerformMaintenanceCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *InstancesPerformMaintenanceCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *InstancesPerformMaintenanceCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/performMaintenance")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "zone": c.zone,
- "instance": c.instance,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.instances.performMaintenance" call.
-// Exactly one of *Operation or error will be non-nil. Any non-2xx
-// status code is an error. Response headers are in either
-// *Operation.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *InstancesPerformMaintenanceCall) Do(opts ...googleapi.CallOption) (*Operation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Operation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Perform a manual maintenance on the instance.",
- // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/performMaintenance",
- // "httpMethod": "POST",
- // "id": "compute.instances.performMaintenance",
- // "parameterOrder": [
- // "project",
- // "zone",
- // "instance"
- // ],
- // "parameters": {
- // "instance": {
- // "description": "Name of the instance scoping this request.",
- // "location": "path",
- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}",
- // "required": true,
- // "type": "string"
- // },
- // "project": {
- // "description": "Project ID for this request.",
- // "location": "path",
- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- // "required": true,
- // "type": "string"
- // },
- // "requestId": {
- // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).",
- // "location": "query",
- // "type": "string"
- // },
- // "zone": {
- // "description": "The name of the zone for this request.",
- // "location": "path",
- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "projects/{project}/zones/{zone}/instances/{instance}/performMaintenance",
- // "response": {
- // "$ref": "Operation"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/compute"
- // ]
- // }
-
-}
-
// method id "compute.instances.removeResourcePolicies":
type InstancesRemoveResourcePoliciesCall struct {
@@ -110653,14 +110163,6 @@
return c
}
-// WithExtendedNotifications sets the optional parameter
-// "withExtendedNotifications": Determines whether the customers receive
-// notifications before migration. Only applicable to SF vms.
-func (c *InstancesSimulateMaintenanceEventCall) WithExtendedNotifications(withExtendedNotifications bool) *InstancesSimulateMaintenanceEventCall {
- c.urlParams_.Set("withExtendedNotifications", fmt.Sprint(withExtendedNotifications))
- return c
-}
-
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
@@ -110778,11 +110280,6 @@
// "location": "query",
// "type": "string"
// },
- // "withExtendedNotifications": {
- // "description": "Determines whether the customers receive notifications before migration. Only applicable to SF vms.",
- // "location": "query",
- // "type": "boolean"
- // },
// "zone": {
// "description": "The name of the zone for this request.",
// "location": "path",
@@ -111202,11 +110699,9 @@
return c
}
-// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This
-// property is required if the instance has any attached Local SSD
-// disks. If false, Local SSD data will be preserved when the instance
-// is suspended. If true, the contents of any attached Local SSD disks
-// will be discarded.
+// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If
+// true, discard the contents of any attached localSSD partitions.
+// Default value is false.
func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall {
c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd))
return c
@@ -111327,7 +110822,7 @@
// ],
// "parameters": {
// "discardLocalSsd": {
- // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.",
+ // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.",
// "location": "query",
// "type": "boolean"
// },
@@ -111402,11 +110897,9 @@
return c
}
-// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This
-// property is required if the instance has any attached Local SSD
-// disks. If false, Local SSD data will be preserved when the instance
-// is suspended. If true, the contents of any attached Local SSD disks
-// will be discarded.
+// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If
+// true, discard the contents of any attached localSSD partitions.
+// Default value is false.
func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall {
c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd))
return c
@@ -111527,7 +111020,7 @@
// ],
// "parameters": {
// "discardLocalSsd": {
- // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.",
+ // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.",
// "location": "query",
// "type": "boolean"
// },
@@ -155063,8 +154556,9 @@
// group and instances that are scheduled to be created. The list
// includes any current actions that the group has scheduled for its
// instances. The orderBy query parameter is not supported. The
-// `pageToken` query parameter is supported only if the group's
-// `listManagedInstancesResults` field is set to `PAGINATED`.
+// `pageToken` query parameter is supported only in the alpha and beta
+// API and only if the group's `listManagedInstancesResults` field is
+// set to `PAGINATED`.
//
// - instanceGroupManager: The name of the managed instance group.
// - project: Project ID for this request.
@@ -155249,7 +154743,7 @@
}
return ret, nil
// {
- // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
+ // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.",
// "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances",
// "httpMethod": "POST",
// "id": "compute.regionInstanceGroupManagers.listManagedInstances",
@@ -173709,298 +173203,6 @@
}
-// method id "compute.regionZones.list":
-
-type RegionZonesListCall struct {
- s *Service
- project string
- region string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves the list of Zone resources under the specific region
-// available to the specified project.
-//
-// - project: Project ID for this request.
-// - region: Region for this request.
-func (r *RegionZonesService) List(project string, region string) *RegionZonesListCall {
- c := &RegionZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.project = project
- c.region = region
- return c
-}
-
-// Filter sets the optional parameter "filter": A filter expression that
-// filters resources listed in the response. Most Compute resources
-// support two types of filter expressions: expressions that support
-// regular expressions and expressions that follow API improvement
-// proposal AIP-160. These two types of filter expressions cannot be
-// mixed in one request. If you want to use AIP-160, your expression
-// must specify the field name, an operator, and the value that you want
-// to use for filtering. The value must be a string, a number, or a
-// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=`
-// or `:`. For example, if you are filtering Compute Engine instances,
-// you can exclude instances named `example-instance` by specifying
-// `name != example-instance`. The `:*` comparison can be used to test
-// whether a key has been defined. For example, to find all objects with
-// `owner` label use: ``` labels.owner:* ``` You can also filter nested
-// fields. For example, you could specify `scheduling.automaticRestart =
-// false` to include instances only if they are not scheduled for
-// automatic restarts. You can use filtering on nested fields to filter
-// based on resource labels. To filter on multiple expressions, provide
-// each separate expression within parentheses. For example: ```
-// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")
-// ``` By default, each expression is an `AND` expression. However, you
-// can include `AND` and `OR` expressions explicitly. For example: ```
-// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell")
-// AND (scheduling.automaticRestart = true) ``` If you want to use a
-// regular expression, use the `eq` (equal) or `ne` (not equal) operator
-// against a single un-parenthesized expression with or without quotes
-// or against multiple parenthesized expressions. Examples: `fieldname
-// eq unquoted literal` `fieldname eq 'single quoted literal'`
-// `fieldname eq "double quoted literal" `(fieldname1 eq literal)
-// (fieldname2 ne "literal")` The literal value is interpreted as a
-// regular expression using Google RE2 library syntax. The literal value
-// must match the entire field. For example, to filter for instances
-// that do not end with name "instance", you would use `name ne
-// .*instance`. You cannot combine constraints on multiple fields using
-// regular expressions.
-func (c *RegionZonesListCall) Filter(filter string) *RegionZonesListCall {
- c.urlParams_.Set("filter", filter)
- return c
-}
-
-// MaxResults sets the optional parameter "maxResults": The maximum
-// number of results per page that should be returned. If the number of
-// available results is larger than `maxResults`, Compute Engine returns
-// a `nextPageToken` that can be used to get the next page of results in
-// subsequent list requests. Acceptable values are `0` to `500`,
-// inclusive. (Default: `500`)
-func (c *RegionZonesListCall) MaxResults(maxResults int64) *RegionZonesListCall {
- c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
- return c
-}
-
-// OrderBy sets the optional parameter "orderBy": Sorts list results by
-// a certain order. By default, results are returned in alphanumerical
-// order based on the resource name. You can also sort results in
-// descending order based on the creation timestamp using
-// `orderBy="creationTimestamp desc". This sorts results based on the
-// `creationTimestamp` field in reverse chronological order (newest
-// result first). Use this to sort resources like operations so that the
-// newest operation is returned first. Currently, only sorting by `name`
-// or `creationTimestamp desc` is supported.
-func (c *RegionZonesListCall) OrderBy(orderBy string) *RegionZonesListCall {
- c.urlParams_.Set("orderBy", orderBy)
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": Specifies a page
-// token to use. Set `pageToken` to the `nextPageToken` returned by a
-// previous list request to get the next page of results.
-func (c *RegionZonesListCall) PageToken(pageToken string) *RegionZonesListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// ReturnPartialSuccess sets the optional parameter
-// "returnPartialSuccess": Opt-in for partial success behavior which
-// provides partial results in case of failure. The default value is
-// false.
-func (c *RegionZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionZonesListCall {
- c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *RegionZonesListCall) Fields(s ...googleapi.Field) *RegionZonesListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *RegionZonesListCall) IfNoneMatch(entityTag string) *RegionZonesListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *RegionZonesListCall) Context(ctx context.Context) *RegionZonesListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *RegionZonesListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *RegionZonesListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/zones")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "project": c.project,
- "region": c.region,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "compute.regionZones.list" call.
-// Exactly one of *ZoneList or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *ZoneList.ServerResponse.Header or (if a response was returned at
-// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified
-// to check whether the returned error was because
-// http.StatusNotModified was returned.
-func (c *RegionZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &ZoneList{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves the list of Zone resources under the specific region available to the specified project.",
- // "flatPath": "projects/{project}/regions/{region}/zones",
- // "httpMethod": "GET",
- // "id": "compute.regionZones.list",
- // "parameterOrder": [
- // "project",
- // "region"
- // ],
- // "parameters": {
- // "filter": {
- // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.",
- // "location": "query",
- // "type": "string"
- // },
- // "maxResults": {
- // "default": "500",
- // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)",
- // "format": "uint32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "orderBy": {
- // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.",
- // "location": "query",
- // "type": "string"
- // },
- // "pageToken": {
- // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.",
- // "location": "query",
- // "type": "string"
- // },
- // "project": {
- // "description": "Project ID for this request.",
- // "location": "path",
- // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))",
- // "required": true,
- // "type": "string"
- // },
- // "region": {
- // "description": "Region for this request.",
- // "location": "path",
- // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
- // "required": true,
- // "type": "string"
- // },
- // "returnPartialSuccess": {
- // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.",
- // "location": "query",
- // "type": "boolean"
- // }
- // },
- // "path": "projects/{project}/regions/{region}/zones",
- // "response": {
- // "$ref": "ZoneList"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/compute",
- // "https://www.googleapis.com/auth/compute.readonly"
- // ]
- // }
-
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *RegionZonesListCall) Pages(ctx context.Context, f func(*ZoneList) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
// method id "compute.regions.get":
type RegionsGetCall struct {
@@ -202387,8 +201589,8 @@
// SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy
// specifies the server-side support for SSL features. This affects
-// connections between clients and the load balancer. They do not affect
-// the connection between the load balancer and the backends.
+// connections between clients and the SSL proxy load balancer. They do
+// not affect the connection between the load balancer and the backends.
//
// - project: Project ID for this request.
// - targetSslProxy: Name of the TargetSslProxy resource whose SSL
@@ -202510,7 +201712,7 @@
}
return ret, nil
// {
- // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.",
+ // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.",
// "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy",
// "httpMethod": "POST",
// "id": "compute.targetSslProxies.setSslPolicy",
diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
index 947e83a..5dfff4c 100644
--- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
+++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC.
+// Copyright 2023 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -90,9 +90,7 @@
const apiName = "iamcredentials"
const apiVersion = "v1"
const basePath = "https://iamcredentials.googleapis.com/"
-const basePathTemplate = "https://iamcredentials.UNIVERSE_DOMAIN/"
const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/"
-const defaultUniverseDomain = "googleapis.com"
// OAuth2 scopes used by this API.
const (
@@ -109,9 +107,7 @@
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
- opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
- opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index 285e6e0..3356fa9 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -27,7 +27,6 @@
type DialSettings struct {
Endpoint string
DefaultEndpoint string
- DefaultEndpointTemplate string
DefaultMTLSEndpoint string
Scopes []string
DefaultScopes []string
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 8ecad35..a130609 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.156.0"
+const Version = "0.153.0"
diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go
index c15be9f..3fdee09 100644
--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go
+++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go
@@ -22,29 +22,10 @@
// It should only be used internally by generated clients.
//
// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint.
-//
-// Deprecated: WithDefaultEndpoint does not support setting the universe domain.
-// Use WithDefaultEndpointTemplate and WithDefaultUniverseDomain to compose the
-// default endpoint instead.
func WithDefaultEndpoint(url string) option.ClientOption {
return defaultEndpointOption(url)
}
-type defaultEndpointTemplateOption string
-
-func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) {
- settings.DefaultEndpointTemplate = string(o)
-}
-
-// WithDefaultEndpointTemplate provides a template for creating the endpoint
-// using a universe domain. See also WithDefaultUniverseDomain and
-// option.WithUniverseDomain.
-//
-// It should only be used internally by generated clients.
-func WithDefaultEndpointTemplate(url string) option.ClientOption {
- return defaultEndpointTemplateOption(url)
-}
-
type defaultMTLSEndpointOption string
func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) {
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 2500369..6c89799 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -26,14 +26,7 @@
"description": "Stores and retrieves potentially large, immutable data objects.",
"discoveryVersion": "v1",
"documentationLink": "https://developers.google.com/storage/docs/json_api/",
- "endpoints": [
- {
- "description": "Regional Endpoint",
- "endpointUrl": "https://storage.me-central2.rep.googleapis.com/",
- "location": "me-central2"
- }
- ],
- "etag": "\"3136323232353032373039383637313835303036\"",
+ "etag": "\"38383938373230313033363637363637353533\"",
"icons": {
"x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
"x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -99,7 +92,7 @@
},
"protocol": "rest",
"resources": {
- "anywhereCaches": {
+ "anywhereCache": {
"methods": {
"disable": {
"description": "Disables an Anywhere Cache instance.",
@@ -117,7 +110,7 @@
"type": "string"
},
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
@@ -149,7 +142,7 @@
"type": "string"
},
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
@@ -176,7 +169,7 @@
],
"parameters": {
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
@@ -204,13 +197,13 @@
],
"parameters": {
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
},
"pageSize": {
- "description": "Maximum number of items to return in a single page of responses. Maximum 1000.",
+ "description": "Maximum number of items return in a single page of responses. Maximum 1000.",
"format": "int32",
"location": "query",
"minimum": "0",
@@ -250,7 +243,7 @@
"type": "string"
},
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
@@ -282,7 +275,7 @@
"type": "string"
},
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
@@ -314,7 +307,7 @@
"type": "string"
},
"bucket": {
- "description": "Name of the parent bucket.",
+ "description": "Name of the partent bucket",
"location": "path",
"required": true,
"type": "string"
@@ -1387,240 +1380,6 @@
}
}
},
- "folders": {
- "methods": {
- "delete": {
- "description": "Permanently deletes a folder. Only applicable to buckets with hierarchical namespace enabled.",
- "httpMethod": "DELETE",
- "id": "storage.folders.delete",
- "parameterOrder": [
- "bucket",
- "folder"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the folder resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "folder": {
- "description": "Name of a folder.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "If set, only deletes the folder if its metageneration matches this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "If set, only deletes the folder if its metageneration does not match this value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/folders/{folder}",
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "get": {
- "description": "Returns metadata for the specified folder. Only applicable to buckets with hierarchical namespace enabled.",
- "httpMethod": "GET",
- "id": "storage.folders.get",
- "parameterOrder": [
- "bucket",
- "folder"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the folder resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "folder": {
- "description": "Name of a folder.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifMetagenerationMatch": {
- "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifMetagenerationNotMatch": {
- "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/folders/{folder}",
- "response": {
- "$ref": "Folder"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "insert": {
- "description": "Creates a new folder. Only applicable to buckets with hierarchical namespace enabled.",
- "httpMethod": "POST",
- "id": "storage.folders.insert",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the folder resides.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "recursive": {
- "description": "If true, any parent folder which doesn’t exist will be created automatically.",
- "location": "query",
- "type": "boolean"
- }
- },
- "path": "b/{bucket}/folders",
- "request": {
- "$ref": "Folder"
- },
- "response": {
- "$ref": "Folder"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "list": {
- "description": "Retrieves a list of folders matching the criteria. Only applicable to buckets with hierarchical namespace enabled.",
- "httpMethod": "GET",
- "id": "storage.folders.list",
- "parameterOrder": [
- "bucket"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which to look for folders.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "delimiter": {
- "description": "Returns results in a directory-like mode. The only supported value is '/'. If set, items will only contain folders that either exactly match the prefix, or are one level below the prefix.",
- "location": "query",
- "type": "string"
- },
- "endOffset": {
- "description": "Filter results to folders whose names are lexicographically before endOffset. If startOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
- "location": "query",
- "type": "string"
- },
- "pageSize": {
- "description": "Maximum number of items to return in a single page of responses.",
- "format": "int32",
- "location": "query",
- "minimum": "0",
- "type": "integer"
- },
- "pageToken": {
- "description": "A previously-returned page token representing part of the larger set of results to view.",
- "location": "query",
- "type": "string"
- },
- "prefix": {
- "description": "Filter results to folders whose paths begin with this prefix. If set, the value must either be an empty string or end with a '/'.",
- "location": "query",
- "type": "string"
- },
- "startOffset": {
- "description": "Filter results to folders whose names are lexicographically equal to or after startOffset. If endOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
- "location": "query",
- "type": "string"
- }
- },
- "path": "b/{bucket}/folders",
- "response": {
- "$ref": "Folders"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- },
- "rename": {
- "description": "Renames a source folder to a destination folder. Only applicable to buckets with hierarchical namespace enabled.",
- "httpMethod": "POST",
- "id": "storage.folders.rename",
- "parameterOrder": [
- "bucket",
- "sourceFolder",
- "destinationFolder"
- ],
- "parameters": {
- "bucket": {
- "description": "Name of the bucket in which the folders are in.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "destinationFolder": {
- "description": "Name of the destination folder.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "ifSourceMetagenerationMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "ifSourceMetagenerationNotMatch": {
- "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- "format": "int64",
- "location": "query",
- "type": "string"
- },
- "sourceFolder": {
- "description": "Name of the source folder.",
- "location": "path",
- "required": true,
- "type": "string"
- }
- },
- "path": "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}",
- "response": {
- "$ref": "GoogleLongrunningOperation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_write"
- ]
- }
- }
- },
"managedFolders": {
"methods": {
"delete": {
@@ -1799,7 +1558,7 @@
"type": "string"
},
"pageSize": {
- "description": "Maximum number of items to return in a single page of responses.",
+ "description": "Maximum number of items return in a single page of responses.",
"format": "int32",
"location": "query",
"minimum": "0",
@@ -4040,7 +3799,7 @@
}
}
},
- "revision": "20240105",
+ "revision": "20231117",
"rootUrl": "https://storage.googleapis.com/",
"schemas": {
"AnywhereCache": {
@@ -4094,10 +3853,6 @@
"description": "The modification time of the cache instance metadata in RFC 3339 format.",
"format": "date-time",
"type": "string"
- },
- "zone": {
- "description": "The zone in which the cache instance is running. For example, us-central1-a.",
- "type": "string"
}
},
"type": "object"
@@ -4248,16 +4003,6 @@
"description": "HTTP 1.1 Entity tag for the bucket.",
"type": "string"
},
- "hierarchicalNamespace": {
- "description": "The bucket's hierarchical namespace configuration.",
- "properties": {
- "enabled": {
- "description": "When set to true, hierarchical namespace is enabled for this bucket.",
- "type": "boolean"
- }
- },
- "type": "object"
- },
"iamConfiguration": {
"description": "The bucket's IAM configuration.",
"properties": {
@@ -4845,90 +4590,6 @@
},
"type": "object"
},
- "Folder": {
- "description": "A folder. Only available in buckets with hierarchical namespace enabled.",
- "id": "Folder",
- "properties": {
- "bucket": {
- "description": "The name of the bucket containing this folder.",
- "type": "string"
- },
- "id": {
- "description": "The ID of the folder, including the bucket name, folder name.",
- "type": "string"
- },
- "kind": {
- "default": "storage#folder",
- "description": "The kind of item this is. For folders, this is always storage#folder.",
- "type": "string"
- },
- "metadata": {
- "additionalProperties": {
- "description": "An individual metadata entry.",
- "type": "string"
- },
- "description": "User-provided metadata, in key/value pairs.",
- "type": "object"
- },
- "metageneration": {
- "description": "The version of the metadata for this folder. Used for preconditions and for detecting changes in metadata.",
- "format": "int64",
- "type": "string"
- },
- "name": {
- "description": "The name of the folder. Required if not specified by URL parameter.",
- "type": "string"
- },
- "pendingRenameInfo": {
- "description": "Only present if the folder is part of an ongoing rename folder operation. Contains information which can be used to query the operation status.",
- "properties": {
- "operationId": {
- "description": "The ID of the rename folder operation.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "selfLink": {
- "description": "The link to this folder.",
- "type": "string"
- },
- "timeCreated": {
- "description": "The creation time of the folder in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- },
- "updated": {
- "description": "The modification time of the folder metadata in RFC 3339 format.",
- "format": "date-time",
- "type": "string"
- }
- },
- "type": "object"
- },
- "Folders": {
- "description": "A list of folders.",
- "id": "Folders",
- "properties": {
- "items": {
- "description": "The list of items.",
- "items": {
- "$ref": "Folder"
- },
- "type": "array"
- },
- "kind": {
- "default": "storage#folders",
- "description": "The kind of item this is. For lists of folders, this is always storage#folders.",
- "type": "string"
- },
- "nextPageToken": {
- "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
- "type": "string"
- }
- },
- "type": "object"
- },
"GoogleLongrunningListOperationsResponse": {
"description": "The response message for storage.buckets.operations.list.",
"id": "GoogleLongrunningListOperationsResponse",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index c34ca98..c4331c0 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC.
+// Copyright 2023 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -98,9 +98,7 @@
const apiName = "storage"
const apiVersion = "v1"
const basePath = "https://storage.googleapis.com/storage/v1/"
-const basePathTemplate = "https://storage.UNIVERSE_DOMAIN/storage/v1/"
const mtlsBasePath = "https://storage.mtls.googleapis.com/storage/v1/"
-const defaultUniverseDomain = "googleapis.com"
// OAuth2 scopes used by this API.
const (
@@ -132,9 +130,7 @@
// NOTE: prepend, so we don't override user-specified scopes.
opts = append([]option.ClientOption{scopesOption}, opts...)
opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
- opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate))
opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
- opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain))
client, endpoint, err := htransport.NewClient(ctx, opts...)
if err != nil {
return nil, err
@@ -159,12 +155,11 @@
return nil, errors.New("client is nil")
}
s := &Service{client: client, BasePath: basePath}
- s.AnywhereCaches = NewAnywhereCachesService(s)
+ s.AnywhereCache = NewAnywhereCacheService(s)
s.BucketAccessControls = NewBucketAccessControlsService(s)
s.Buckets = NewBucketsService(s)
s.Channels = NewChannelsService(s)
s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)
- s.Folders = NewFoldersService(s)
s.ManagedFolders = NewManagedFoldersService(s)
s.Notifications = NewNotificationsService(s)
s.ObjectAccessControls = NewObjectAccessControlsService(s)
@@ -179,7 +174,7 @@
BasePath string // API endpoint base URL
UserAgent string // optional additional User-Agent fragment
- AnywhereCaches *AnywhereCachesService
+ AnywhereCache *AnywhereCacheService
BucketAccessControls *BucketAccessControlsService
@@ -189,8 +184,6 @@
DefaultObjectAccessControls *DefaultObjectAccessControlsService
- Folders *FoldersService
-
ManagedFolders *ManagedFoldersService
Notifications *NotificationsService
@@ -211,12 +204,12 @@
return googleapi.UserAgent + " " + s.UserAgent
}
-func NewAnywhereCachesService(s *Service) *AnywhereCachesService {
- rs := &AnywhereCachesService{s: s}
+func NewAnywhereCacheService(s *Service) *AnywhereCacheService {
+ rs := &AnywhereCacheService{s: s}
return rs
}
-type AnywhereCachesService struct {
+type AnywhereCacheService struct {
s *Service
}
@@ -256,15 +249,6 @@
s *Service
}
-func NewFoldersService(s *Service) *FoldersService {
- rs := &FoldersService{s: s}
- return rs
-}
-
-type FoldersService struct {
- s *Service
-}
-
func NewManagedFoldersService(s *Service) *ManagedFoldersService {
rs := &ManagedFoldersService{s: s}
return rs
@@ -383,10 +367,6 @@
// RFC 3339 format.
UpdateTime string `json:"updateTime,omitempty"`
- // Zone: The zone in which the cache instance is running. For example,
- // us-central1-a.
- Zone string `json:"zone,omitempty"`
-
// ServerResponse contains the HTTP response code and headers from the
// server.
googleapi.ServerResponse `json:"-"`
@@ -501,10 +481,6 @@
// Etag: HTTP 1.1 Entity tag for the bucket.
Etag string `json:"etag,omitempty"`
- // HierarchicalNamespace: The bucket's hierarchical namespace
- // configuration.
- HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"`
-
// IamConfiguration: The bucket's IAM configuration.
IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"`
@@ -805,36 +781,6 @@
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
-// BucketHierarchicalNamespace: The bucket's hierarchical namespace
-// configuration.
-type BucketHierarchicalNamespace struct {
- // Enabled: When set to true, hierarchical namespace is enabled for this
- // bucket.
- Enabled bool `json:"enabled,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "Enabled") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Enabled") to include in
- // API requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) {
- type NoMethod BucketHierarchicalNamespace
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
// BucketIamConfiguration: The bucket's IAM configuration.
type BucketIamConfiguration struct {
// BucketPolicyOnly: The bucket's uniform bucket-level access
@@ -1847,143 +1793,6 @@
return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
}
-// Folder: A folder. Only available in buckets with hierarchical
-// namespace enabled.
-type Folder struct {
- // Bucket: The name of the bucket containing this folder.
- Bucket string `json:"bucket,omitempty"`
-
- // Id: The ID of the folder, including the bucket name, folder name.
- Id string `json:"id,omitempty"`
-
- // Kind: The kind of item this is. For folders, this is always
- // storage#folder.
- Kind string `json:"kind,omitempty"`
-
- // Metadata: User-provided metadata, in key/value pairs.
- Metadata map[string]string `json:"metadata,omitempty"`
-
- // Metageneration: The version of the metadata for this folder. Used for
- // preconditions and for detecting changes in metadata.
- Metageneration int64 `json:"metageneration,omitempty,string"`
-
- // Name: The name of the folder. Required if not specified by URL
- // parameter.
- Name string `json:"name,omitempty"`
-
- // PendingRenameInfo: Only present if the folder is part of an ongoing
- // rename folder operation. Contains information which can be used to
- // query the operation status.
- PendingRenameInfo *FolderPendingRenameInfo `json:"pendingRenameInfo,omitempty"`
-
- // SelfLink: The link to this folder.
- SelfLink string `json:"selfLink,omitempty"`
-
- // TimeCreated: The creation time of the folder in RFC 3339 format.
- TimeCreated string `json:"timeCreated,omitempty"`
-
- // Updated: The modification time of the folder metadata in RFC 3339
- // format.
- Updated string `json:"updated,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Bucket") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Bucket") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Folder) MarshalJSON() ([]byte, error) {
- type NoMethod Folder
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// FolderPendingRenameInfo: Only present if the folder is part of an
-// ongoing rename folder operation. Contains information which can be
-// used to query the operation status.
-type FolderPendingRenameInfo struct {
- // OperationId: The ID of the rename folder operation.
- OperationId string `json:"operationId,omitempty"`
-
- // ForceSendFields is a list of field names (e.g. "OperationId") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "OperationId") to include
- // in API requests with the JSON null value. By default, fields with
- // empty values are omitted from API requests. However, any field with
- // an empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *FolderPendingRenameInfo) MarshalJSON() ([]byte, error) {
- type NoMethod FolderPendingRenameInfo
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
-// Folders: A list of folders.
-type Folders struct {
- // Items: The list of items.
- Items []*Folder `json:"items,omitempty"`
-
- // Kind: The kind of item this is. For lists of folders, this is always
- // storage#folders.
- Kind string `json:"kind,omitempty"`
-
- // NextPageToken: The continuation token, used to page through large
- // result sets. Provide this value in a subsequent request to return the
- // next page of results.
- NextPageToken string `json:"nextPageToken,omitempty"`
-
- // ServerResponse contains the HTTP response code and headers from the
- // server.
- googleapi.ServerResponse `json:"-"`
-
- // ForceSendFields is a list of field names (e.g. "Items") to
- // unconditionally include in API requests. By default, fields with
- // empty or default values are omitted from API requests. However, any
- // non-pointer, non-interface field appearing in ForceSendFields will be
- // sent to the server regardless of whether the field is empty or not.
- // This may be used to include empty fields in Patch requests.
- ForceSendFields []string `json:"-"`
-
- // NullFields is a list of field names (e.g. "Items") to include in API
- // requests with the JSON null value. By default, fields with empty
- // values are omitted from API requests. However, any field with an
- // empty value appearing in NullFields will be sent to the server as
- // null. It is an error if a field in this list has a non-empty value.
- // This may be used to include null fields in Patch requests.
- NullFields []string `json:"-"`
-}
-
-func (s *Folders) MarshalJSON() ([]byte, error) {
- type NoMethod Folders
- raw := NoMethod(*s)
- return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
-}
-
// GoogleLongrunningListOperationsResponse: The response message for
// storage.buckets.operations.list.
type GoogleLongrunningListOperationsResponse struct {
@@ -3258,7 +3067,7 @@
// method id "storage.anywhereCaches.disable":
-type AnywhereCachesDisableCall struct {
+type AnywhereCacheDisableCall struct {
s *Service
bucket string
anywhereCacheId string
@@ -3270,9 +3079,9 @@
// Disable: Disables an Anywhere Cache instance.
//
// - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) Disable(bucket string, anywhereCacheId string) *AnywhereCachesDisableCall {
- c := &AnywhereCachesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) Disable(bucket string, anywhereCacheId string) *AnywhereCacheDisableCall {
+ c := &AnywhereCacheDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.anywhereCacheId = anywhereCacheId
return c
@@ -3281,7 +3090,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesDisableCall) Fields(s ...googleapi.Field) *AnywhereCachesDisableCall {
+func (c *AnywhereCacheDisableCall) Fields(s ...googleapi.Field) *AnywhereCacheDisableCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -3289,21 +3098,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesDisableCall) Context(ctx context.Context) *AnywhereCachesDisableCall {
+func (c *AnywhereCacheDisableCall) Context(ctx context.Context) *AnywhereCacheDisableCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesDisableCall) Header() http.Header {
+func (c *AnywhereCacheDisableCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCacheDisableCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -3334,7 +3143,7 @@
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCacheDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3380,7 +3189,7 @@
// "type": "string"
// },
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
@@ -3401,7 +3210,7 @@
// method id "storage.anywhereCaches.get":
-type AnywhereCachesGetCall struct {
+type AnywhereCacheGetCall struct {
s *Service
bucket string
anywhereCacheId string
@@ -3414,9 +3223,9 @@
// Get: Returns the metadata of an Anywhere Cache instance.
//
// - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) Get(bucket string, anywhereCacheId string) *AnywhereCachesGetCall {
- c := &AnywhereCachesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) Get(bucket string, anywhereCacheId string) *AnywhereCacheGetCall {
+ c := &AnywhereCacheGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.anywhereCacheId = anywhereCacheId
return c
@@ -3425,7 +3234,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesGetCall) Fields(s ...googleapi.Field) *AnywhereCachesGetCall {
+func (c *AnywhereCacheGetCall) Fields(s ...googleapi.Field) *AnywhereCacheGetCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -3435,7 +3244,7 @@
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
-func (c *AnywhereCachesGetCall) IfNoneMatch(entityTag string) *AnywhereCachesGetCall {
+func (c *AnywhereCacheGetCall) IfNoneMatch(entityTag string) *AnywhereCacheGetCall {
c.ifNoneMatch_ = entityTag
return c
}
@@ -3443,21 +3252,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesGetCall) Context(ctx context.Context) *AnywhereCachesGetCall {
+func (c *AnywhereCacheGetCall) Context(ctx context.Context) *AnywhereCacheGetCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesGetCall) Header() http.Header {
+func (c *AnywhereCacheGetCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCacheGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -3491,7 +3300,7 @@
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCacheGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3537,7 +3346,7 @@
// "type": "string"
// },
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
@@ -3560,7 +3369,7 @@
// method id "storage.anywhereCaches.insert":
-type AnywhereCachesInsertCall struct {
+type AnywhereCacheInsertCall struct {
s *Service
bucket string
anywherecache *AnywhereCache
@@ -3571,9 +3380,9 @@
// Insert: Creates an Anywhere Cache instance.
//
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCachesInsertCall {
- c := &AnywhereCachesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCacheInsertCall {
+ c := &AnywhereCacheInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.anywherecache = anywherecache
return c
@@ -3582,7 +3391,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesInsertCall) Fields(s ...googleapi.Field) *AnywhereCachesInsertCall {
+func (c *AnywhereCacheInsertCall) Fields(s ...googleapi.Field) *AnywhereCacheInsertCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -3590,21 +3399,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesInsertCall) Context(ctx context.Context) *AnywhereCachesInsertCall {
+func (c *AnywhereCacheInsertCall) Context(ctx context.Context) *AnywhereCacheInsertCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesInsertCall) Header() http.Header {
+func (c *AnywhereCacheInsertCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCacheInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -3639,7 +3448,7 @@
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+func (c *AnywhereCacheInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3678,7 +3487,7 @@
// ],
// "parameters": {
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
@@ -3702,7 +3511,7 @@
// method id "storage.anywhereCaches.list":
-type AnywhereCachesListCall struct {
+type AnywhereCacheListCall struct {
s *Service
bucket string
urlParams_ gensupport.URLParams
@@ -3714,16 +3523,16 @@
// List: Returns a list of Anywhere Cache instances of the bucket
// matching the criteria.
//
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) List(bucket string) *AnywhereCachesListCall {
- c := &AnywhereCachesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) List(bucket string) *AnywhereCacheListCall {
+ c := &AnywhereCacheListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
return c
}
// PageSize sets the optional parameter "pageSize": Maximum number of
-// items to return in a single page of responses. Maximum 1000.
-func (c *AnywhereCachesListCall) PageSize(pageSize int64) *AnywhereCachesListCall {
+// items return in a single page of responses. Maximum 1000.
+func (c *AnywhereCacheListCall) PageSize(pageSize int64) *AnywhereCacheListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
}
@@ -3731,7 +3540,7 @@
// PageToken sets the optional parameter "pageToken": A
// previously-returned page token representing part of the larger set of
// results to view.
-func (c *AnywhereCachesListCall) PageToken(pageToken string) *AnywhereCachesListCall {
+func (c *AnywhereCacheListCall) PageToken(pageToken string) *AnywhereCacheListCall {
c.urlParams_.Set("pageToken", pageToken)
return c
}
@@ -3739,7 +3548,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesListCall) Fields(s ...googleapi.Field) *AnywhereCachesListCall {
+func (c *AnywhereCacheListCall) Fields(s ...googleapi.Field) *AnywhereCacheListCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -3749,7 +3558,7 @@
// getting updates only after the object has changed since the last
// request. Use googleapi.IsNotModified to check whether the response
// error from Do is the result of In-None-Match.
-func (c *AnywhereCachesListCall) IfNoneMatch(entityTag string) *AnywhereCachesListCall {
+func (c *AnywhereCacheListCall) IfNoneMatch(entityTag string) *AnywhereCacheListCall {
c.ifNoneMatch_ = entityTag
return c
}
@@ -3757,21 +3566,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesListCall) Context(ctx context.Context) *AnywhereCachesListCall {
+func (c *AnywhereCacheListCall) Context(ctx context.Context) *AnywhereCacheListCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesListCall) Header() http.Header {
+func (c *AnywhereCacheListCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCacheListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -3804,7 +3613,7 @@
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) {
+func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3843,13 +3652,13 @@
// ],
// "parameters": {
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
// },
// "pageSize": {
- // "description": "Maximum number of items to return in a single page of responses. Maximum 1000.",
+ // "description": "Maximum number of items return in a single page of responses. Maximum 1000.",
// "format": "int32",
// "location": "query",
// "minimum": "0",
@@ -3879,7 +3688,7 @@
// Pages invokes f for each page of results.
// A non-nil error returned from f will halt the iteration.
// The provided context supersedes any context provided to the Context method.
-func (c *AnywhereCachesListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error {
+func (c *AnywhereCacheListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error {
c.ctx_ = ctx
defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
for {
@@ -3899,7 +3708,7 @@
// method id "storage.anywhereCaches.pause":
-type AnywhereCachesPauseCall struct {
+type AnywhereCachePauseCall struct {
s *Service
bucket string
anywhereCacheId string
@@ -3911,9 +3720,9 @@
// Pause: Pauses an Anywhere Cache instance.
//
// - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) Pause(bucket string, anywhereCacheId string) *AnywhereCachesPauseCall {
- c := &AnywhereCachesPauseCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) Pause(bucket string, anywhereCacheId string) *AnywhereCachePauseCall {
+ c := &AnywhereCachePauseCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.anywhereCacheId = anywhereCacheId
return c
@@ -3922,7 +3731,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesPauseCall) Fields(s ...googleapi.Field) *AnywhereCachesPauseCall {
+func (c *AnywhereCachePauseCall) Fields(s ...googleapi.Field) *AnywhereCachePauseCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -3930,21 +3739,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesPauseCall) Context(ctx context.Context) *AnywhereCachesPauseCall {
+func (c *AnywhereCachePauseCall) Context(ctx context.Context) *AnywhereCachePauseCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesPauseCall) Header() http.Header {
+func (c *AnywhereCachePauseCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachePauseCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -3975,7 +3784,7 @@
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCachePauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -4021,7 +3830,7 @@
// "type": "string"
// },
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
@@ -4042,7 +3851,7 @@
// method id "storage.anywhereCaches.resume":
-type AnywhereCachesResumeCall struct {
+type AnywhereCacheResumeCall struct {
s *Service
bucket string
anywhereCacheId string
@@ -4054,9 +3863,9 @@
// Resume: Resumes a paused or disabled Anywhere Cache instance.
//
// - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) Resume(bucket string, anywhereCacheId string) *AnywhereCachesResumeCall {
- c := &AnywhereCachesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) Resume(bucket string, anywhereCacheId string) *AnywhereCacheResumeCall {
+ c := &AnywhereCacheResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.anywhereCacheId = anywhereCacheId
return c
@@ -4065,7 +3874,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesResumeCall) Fields(s ...googleapi.Field) *AnywhereCachesResumeCall {
+func (c *AnywhereCacheResumeCall) Fields(s ...googleapi.Field) *AnywhereCacheResumeCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -4073,21 +3882,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesResumeCall) Context(ctx context.Context) *AnywhereCachesResumeCall {
+func (c *AnywhereCacheResumeCall) Context(ctx context.Context) *AnywhereCacheResumeCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesResumeCall) Header() http.Header {
+func (c *AnywhereCacheResumeCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCacheResumeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -4118,7 +3927,7 @@
// at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCacheResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -4164,7 +3973,7 @@
// "type": "string"
// },
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
@@ -4185,7 +3994,7 @@
// method id "storage.anywhereCaches.update":
-type AnywhereCachesUpdateCall struct {
+type AnywhereCacheUpdateCall struct {
s *Service
bucket string
anywhereCacheId string
@@ -4199,9 +4008,9 @@
// Cache instance.
//
// - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the parent bucket.
-func (r *AnywhereCachesService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCachesUpdateCall {
- c := &AnywhereCachesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the partent bucket.
+func (r *AnywhereCacheService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCacheUpdateCall {
+ c := &AnywhereCacheUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.bucket = bucket
c.anywhereCacheId = anywhereCacheId
c.anywherecache = anywherecache
@@ -4211,7 +4020,7 @@
// Fields allows partial responses to be retrieved. See
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
// for more information.
-func (c *AnywhereCachesUpdateCall) Fields(s ...googleapi.Field) *AnywhereCachesUpdateCall {
+func (c *AnywhereCacheUpdateCall) Fields(s ...googleapi.Field) *AnywhereCacheUpdateCall {
c.urlParams_.Set("fields", googleapi.CombineFields(s))
return c
}
@@ -4219,21 +4028,21 @@
// Context sets the context to be used in this call's Do method. Any
// pending HTTP request will be aborted if the provided context is
// canceled.
-func (c *AnywhereCachesUpdateCall) Context(ctx context.Context) *AnywhereCachesUpdateCall {
+func (c *AnywhereCacheUpdateCall) Context(ctx context.Context) *AnywhereCacheUpdateCall {
c.ctx_ = ctx
return c
}
// Header returns an http.Header that can be modified by the caller to
// add HTTP headers to the request.
-func (c *AnywhereCachesUpdateCall) Header() http.Header {
+func (c *AnywhereCacheUpdateCall) Header() http.Header {
if c.header_ == nil {
c.header_ = make(http.Header)
}
return c.header_
}
-func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCacheUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
for k, v := range c.header_ {
@@ -4269,7 +4078,7 @@
// was returned at all) in error.(*googleapi.Error).Header. Use
// googleapi.IsNotModified to check whether the returned error was
// because http.StatusNotModified was returned.
-func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+func (c *AnywhereCacheUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
gensupport.SetOptions(c.urlParams_, opts...)
res, err := c.doRequest("json")
if res != nil && res.StatusCode == http.StatusNotModified {
@@ -4315,7 +4124,7 @@
// "type": "string"
// },
// "bucket": {
- // "description": "Name of the parent bucket.",
+ // "description": "Name of the partent bucket",
// "location": "path",
// "required": true,
// "type": "string"
@@ -8507,932 +8316,6 @@
}
-// method id "storage.folders.delete":
-
-type FoldersDeleteCall struct {
- s *Service
- bucket string
- folder string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Delete: Permanently deletes a folder. Only applicable to buckets with
-// hierarchical namespace enabled.
-//
-// - bucket: Name of the bucket in which the folder resides.
-// - folder: Name of a folder.
-func (r *FoldersService) Delete(bucket string, folder string) *FoldersDeleteCall {
- c := &FoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.folder = folder
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": If set, only deletes the folder if its
-// metageneration matches this value.
-func (c *FoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersDeleteCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": If set, only deletes the folder if its
-// metageneration does not match this value.
-func (c *FoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersDeleteCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *FoldersDeleteCall) Fields(s ...googleapi.Field) *FoldersDeleteCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *FoldersDeleteCall) Context(ctx context.Context) *FoldersDeleteCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *FoldersDeleteCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("DELETE", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "folder": c.folder,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.folders.delete" call.
-func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if err != nil {
- return err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return gensupport.WrapError(err)
- }
- return nil
- // {
- // "description": "Permanently deletes a folder. Only applicable to buckets with hierarchical namespace enabled.",
- // "httpMethod": "DELETE",
- // "id": "storage.folders.delete",
- // "parameterOrder": [
- // "bucket",
- // "folder"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the folder resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "folder": {
- // "description": "Name of a folder.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "If set, only deletes the folder if its metageneration matches this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "If set, only deletes the folder if its metageneration does not match this value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/folders/{folder}",
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.folders.get":
-
-type FoldersGetCall struct {
- s *Service
- bucket string
- folder string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// Get: Returns metadata for the specified folder. Only applicable to
-// buckets with hierarchical namespace enabled.
-//
-// - bucket: Name of the bucket in which the folder resides.
-// - folder: Name of a folder.
-func (r *FoldersService) Get(bucket string, folder string) *FoldersGetCall {
- c := &FoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.folder = folder
- return c
-}
-
-// IfMetagenerationMatch sets the optional parameter
-// "ifMetagenerationMatch": Makes the return of the folder metadata
-// conditional on whether the folder's current metageneration matches
-// the given value.
-func (c *FoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersGetCall {
- c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
- return c
-}
-
-// IfMetagenerationNotMatch sets the optional parameter
-// "ifMetagenerationNotMatch": Makes the return of the folder metadata
-// conditional on whether the folder's current metageneration does not
-// match the given value.
-func (c *FoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersGetCall {
- c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *FoldersGetCall) Fields(s ...googleapi.Field) *FoldersGetCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *FoldersGetCall) IfNoneMatch(entityTag string) *FoldersGetCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *FoldersGetCall) Context(ctx context.Context) *FoldersGetCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *FoldersGetCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "folder": c.folder,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.folders.get" call.
-// Exactly one of *Folder or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Folder.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Folder{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Returns metadata for the specified folder. Only applicable to buckets with hierarchical namespace enabled.",
- // "httpMethod": "GET",
- // "id": "storage.folders.get",
- // "parameterOrder": [
- // "bucket",
- // "folder"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the folder resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "folder": {
- // "description": "Name of a folder.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifMetagenerationMatch": {
- // "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifMetagenerationNotMatch": {
- // "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/folders/{folder}",
- // "response": {
- // "$ref": "Folder"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.folders.insert":
-
-type FoldersInsertCall struct {
- s *Service
- bucket string
- folder *Folder
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Insert: Creates a new folder. Only applicable to buckets with
-// hierarchical namespace enabled.
-//
-// - bucket: Name of the bucket in which the folder resides.
-func (r *FoldersService) Insert(bucket string, folder *Folder) *FoldersInsertCall {
- c := &FoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.folder = folder
- return c
-}
-
-// Recursive sets the optional parameter "recursive": If true, any
-// parent folder which doesn’t exist will be created automatically.
-func (c *FoldersInsertCall) Recursive(recursive bool) *FoldersInsertCall {
- c.urlParams_.Set("recursive", fmt.Sprint(recursive))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *FoldersInsertCall) Fields(s ...googleapi.Field) *FoldersInsertCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *FoldersInsertCall) Context(ctx context.Context) *FoldersInsertCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *FoldersInsertCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder)
- if err != nil {
- return nil, err
- }
- reqHeaders.Set("Content-Type", "application/json")
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.folders.insert" call.
-// Exactly one of *Folder or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Folder.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Folder{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Creates a new folder. Only applicable to buckets with hierarchical namespace enabled.",
- // "httpMethod": "POST",
- // "id": "storage.folders.insert",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the folder resides.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "recursive": {
- // "description": "If true, any parent folder which doesn’t exist will be created automatically.",
- // "location": "query",
- // "type": "boolean"
- // }
- // },
- // "path": "b/{bucket}/folders",
- // "request": {
- // "$ref": "Folder"
- // },
- // "response": {
- // "$ref": "Folder"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// method id "storage.folders.list":
-
-type FoldersListCall struct {
- s *Service
- bucket string
- urlParams_ gensupport.URLParams
- ifNoneMatch_ string
- ctx_ context.Context
- header_ http.Header
-}
-
-// List: Retrieves a list of folders matching the criteria. Only
-// applicable to buckets with hierarchical namespace enabled.
-//
-// - bucket: Name of the bucket in which to look for folders.
-func (r *FoldersService) List(bucket string) *FoldersListCall {
- c := &FoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- return c
-}
-
-// Delimiter sets the optional parameter "delimiter": Returns results in
-// a directory-like mode. The only supported value is '/'. If set, items
-// will only contain folders that either exactly match the prefix, or
-// are one level below the prefix.
-func (c *FoldersListCall) Delimiter(delimiter string) *FoldersListCall {
- c.urlParams_.Set("delimiter", delimiter)
- return c
-}
-
-// EndOffset sets the optional parameter "endOffset": Filter results to
-// folders whose names are lexicographically before endOffset. If
-// startOffset is also set, the folders listed will have names between
-// startOffset (inclusive) and endOffset (exclusive).
-func (c *FoldersListCall) EndOffset(endOffset string) *FoldersListCall {
- c.urlParams_.Set("endOffset", endOffset)
- return c
-}
-
-// PageSize sets the optional parameter "pageSize": Maximum number of
-// items to return in a single page of responses.
-func (c *FoldersListCall) PageSize(pageSize int64) *FoldersListCall {
- c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
- return c
-}
-
-// PageToken sets the optional parameter "pageToken": A
-// previously-returned page token representing part of the larger set of
-// results to view.
-func (c *FoldersListCall) PageToken(pageToken string) *FoldersListCall {
- c.urlParams_.Set("pageToken", pageToken)
- return c
-}
-
-// Prefix sets the optional parameter "prefix": Filter results to
-// folders whose paths begin with this prefix. If set, the value must
-// either be an empty string or end with a '/'.
-func (c *FoldersListCall) Prefix(prefix string) *FoldersListCall {
- c.urlParams_.Set("prefix", prefix)
- return c
-}
-
-// StartOffset sets the optional parameter "startOffset": Filter results
-// to folders whose names are lexicographically equal to or after
-// startOffset. If endOffset is also set, the folders listed will have
-// names between startOffset (inclusive) and endOffset (exclusive).
-func (c *FoldersListCall) StartOffset(startOffset string) *FoldersListCall {
- c.urlParams_.Set("startOffset", startOffset)
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *FoldersListCall) Fields(s ...googleapi.Field) *FoldersListCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// IfNoneMatch sets the optional parameter which makes the operation
-// fail if the object's ETag matches the given value. This is useful for
-// getting updates only after the object has changed since the last
-// request. Use googleapi.IsNotModified to check whether the response
-// error from Do is the result of In-None-Match.
-func (c *FoldersListCall) IfNoneMatch(entityTag string) *FoldersListCall {
- c.ifNoneMatch_ = entityTag
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *FoldersListCall) Context(ctx context.Context) *FoldersListCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *FoldersListCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- if c.ifNoneMatch_ != "" {
- reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
- }
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("GET", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.folders.list" call.
-// Exactly one of *Folders or error will be non-nil. Any non-2xx status
-// code is an error. Response headers are in either
-// *Folders.ServerResponse.Header or (if a response was returned at all)
-// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
-// check whether the returned error was because http.StatusNotModified
-// was returned.
-func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &Folders{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Retrieves a list of folders matching the criteria. Only applicable to buckets with hierarchical namespace enabled.",
- // "httpMethod": "GET",
- // "id": "storage.folders.list",
- // "parameterOrder": [
- // "bucket"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which to look for folders.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "delimiter": {
- // "description": "Returns results in a directory-like mode. The only supported value is '/'. If set, items will only contain folders that either exactly match the prefix, or are one level below the prefix.",
- // "location": "query",
- // "type": "string"
- // },
- // "endOffset": {
- // "description": "Filter results to folders whose names are lexicographically before endOffset. If startOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
- // "location": "query",
- // "type": "string"
- // },
- // "pageSize": {
- // "description": "Maximum number of items to return in a single page of responses.",
- // "format": "int32",
- // "location": "query",
- // "minimum": "0",
- // "type": "integer"
- // },
- // "pageToken": {
- // "description": "A previously-returned page token representing part of the larger set of results to view.",
- // "location": "query",
- // "type": "string"
- // },
- // "prefix": {
- // "description": "Filter results to folders whose paths begin with this prefix. If set, the value must either be an empty string or end with a '/'.",
- // "location": "query",
- // "type": "string"
- // },
- // "startOffset": {
- // "description": "Filter results to folders whose names are lexicographically equal to or after startOffset. If endOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
- // "location": "query",
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/folders",
- // "response": {
- // "$ref": "Folders"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/cloud-platform.read-only",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_only",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
-// Pages invokes f for each page of results.
-// A non-nil error returned from f will halt the iteration.
-// The provided context supersedes any context provided to the Context method.
-func (c *FoldersListCall) Pages(ctx context.Context, f func(*Folders) error) error {
- c.ctx_ = ctx
- defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
- for {
- x, err := c.Do()
- if err != nil {
- return err
- }
- if err := f(x); err != nil {
- return err
- }
- if x.NextPageToken == "" {
- return nil
- }
- c.PageToken(x.NextPageToken)
- }
-}
-
-// method id "storage.folders.rename":
-
-type FoldersRenameCall struct {
- s *Service
- bucket string
- sourceFolder string
- destinationFolder string
- urlParams_ gensupport.URLParams
- ctx_ context.Context
- header_ http.Header
-}
-
-// Rename: Renames a source folder to a destination folder. Only
-// applicable to buckets with hierarchical namespace enabled.
-//
-// - bucket: Name of the bucket in which the folders are in.
-// - destinationFolder: Name of the destination folder.
-// - sourceFolder: Name of the source folder.
-func (r *FoldersService) Rename(bucket string, sourceFolder string, destinationFolder string) *FoldersRenameCall {
- c := &FoldersRenameCall{s: r.s, urlParams_: make(gensupport.URLParams)}
- c.bucket = bucket
- c.sourceFolder = sourceFolder
- c.destinationFolder = destinationFolder
- return c
-}
-
-// IfSourceMetagenerationMatch sets the optional parameter
-// "ifSourceMetagenerationMatch": Makes the operation conditional on
-// whether the source object's current metageneration matches the given
-// value.
-func (c *FoldersRenameCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *FoldersRenameCall {
- c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
- return c
-}
-
-// IfSourceMetagenerationNotMatch sets the optional parameter
-// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
-// whether the source object's current metageneration does not match the
-// given value.
-func (c *FoldersRenameCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *FoldersRenameCall {
- c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
- return c
-}
-
-// Fields allows partial responses to be retrieved. See
-// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
-// for more information.
-func (c *FoldersRenameCall) Fields(s ...googleapi.Field) *FoldersRenameCall {
- c.urlParams_.Set("fields", googleapi.CombineFields(s))
- return c
-}
-
-// Context sets the context to be used in this call's Do method. Any
-// pending HTTP request will be aborted if the provided context is
-// canceled.
-func (c *FoldersRenameCall) Context(ctx context.Context) *FoldersRenameCall {
- c.ctx_ = ctx
- return c
-}
-
-// Header returns an http.Header that can be modified by the caller to
-// add HTTP headers to the request.
-func (c *FoldersRenameCall) Header() http.Header {
- if c.header_ == nil {
- c.header_ = make(http.Header)
- }
- return c.header_
-}
-
-func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) {
- reqHeaders := make(http.Header)
- reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
- for k, v := range c.header_ {
- reqHeaders[k] = v
- }
- reqHeaders.Set("User-Agent", c.s.userAgent())
- var body io.Reader = nil
- c.urlParams_.Set("alt", alt)
- c.urlParams_.Set("prettyPrint", "false")
- urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}")
- urls += "?" + c.urlParams_.Encode()
- req, err := http.NewRequest("POST", urls, body)
- if err != nil {
- return nil, err
- }
- req.Header = reqHeaders
- googleapi.Expand(req.URL, map[string]string{
- "bucket": c.bucket,
- "sourceFolder": c.sourceFolder,
- "destinationFolder": c.destinationFolder,
- })
- return gensupport.SendRequest(c.ctx_, c.s.client, req)
-}
-
-// Do executes the "storage.folders.rename" call.
-// Exactly one of *GoogleLongrunningOperation or error will be non-nil.
-// Any non-2xx status code is an error. Response headers are in either
-// *GoogleLongrunningOperation.ServerResponse.Header or (if a response
-// was returned at all) in error.(*googleapi.Error).Header. Use
-// googleapi.IsNotModified to check whether the returned error was
-// because http.StatusNotModified was returned.
-func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
- gensupport.SetOptions(c.urlParams_, opts...)
- res, err := c.doRequest("json")
- if res != nil && res.StatusCode == http.StatusNotModified {
- if res.Body != nil {
- res.Body.Close()
- }
- return nil, gensupport.WrapError(&googleapi.Error{
- Code: res.StatusCode,
- Header: res.Header,
- })
- }
- if err != nil {
- return nil, err
- }
- defer googleapi.CloseBody(res)
- if err := googleapi.CheckResponse(res); err != nil {
- return nil, gensupport.WrapError(err)
- }
- ret := &GoogleLongrunningOperation{
- ServerResponse: googleapi.ServerResponse{
- Header: res.Header,
- HTTPStatusCode: res.StatusCode,
- },
- }
- target := &ret
- if err := gensupport.DecodeResponse(target, res); err != nil {
- return nil, err
- }
- return ret, nil
- // {
- // "description": "Renames a source folder to a destination folder. Only applicable to buckets with hierarchical namespace enabled.",
- // "httpMethod": "POST",
- // "id": "storage.folders.rename",
- // "parameterOrder": [
- // "bucket",
- // "sourceFolder",
- // "destinationFolder"
- // ],
- // "parameters": {
- // "bucket": {
- // "description": "Name of the bucket in which the folders are in.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "destinationFolder": {
- // "description": "Name of the destination folder.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // },
- // "ifSourceMetagenerationMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "ifSourceMetagenerationNotMatch": {
- // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
- // "format": "int64",
- // "location": "query",
- // "type": "string"
- // },
- // "sourceFolder": {
- // "description": "Name of the source folder.",
- // "location": "path",
- // "required": true,
- // "type": "string"
- // }
- // },
- // "path": "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}",
- // "response": {
- // "$ref": "GoogleLongrunningOperation"
- // },
- // "scopes": [
- // "https://www.googleapis.com/auth/cloud-platform",
- // "https://www.googleapis.com/auth/devstorage.full_control",
- // "https://www.googleapis.com/auth/devstorage.read_write"
- // ]
- // }
-
-}
-
// method id "storage.managedFolders.delete":
type ManagedFoldersDeleteCall struct {
@@ -10116,7 +8999,7 @@
}
// PageSize sets the optional parameter "pageSize": Maximum number of
-// items to return in a single page of responses.
+// items return in a single page of responses.
func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall {
c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
return c
@@ -10250,7 +9133,7 @@
// "type": "string"
// },
// "pageSize": {
- // "description": "Maximum number of items to return in a single page of responses.",
+ // "description": "Maximum number of items return in a single page of responses.",
// "format": "int32",
// "location": "query",
// "minimum": "0",
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
index 10830f0..87a22f7 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -14,12 +14,10 @@
"net"
"os"
"strings"
- "sync"
"time"
"cloud.google.com/go/compute/metadata"
"go.opencensus.io/plugin/ocgrpc"
- "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"golang.org/x/oauth2"
"golang.org/x/time/rate"
"google.golang.org/api/internal"
@@ -28,7 +26,6 @@
grpcgoogle "google.golang.org/grpc/credentials/google"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/credentials/oauth"
- "google.golang.org/grpc/stats"
// Install grpclb, which is required for direct path.
_ "google.golang.org/grpc/balancer/grpclb"
@@ -46,29 +43,6 @@
// Log rate limiter
var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second}
-// Assign to var for unit test replacement
-var dialContext = grpc.DialContext
-
-// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
-// all dial connections to avoid the memory leak documented in
-// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
-//
-// TODO: If 4226 has been fixed in opentelemetry-go-contrib, replace this
-// singleton with inline usage for simplicity.
-var (
- initOtelStatsHandlerOnce sync.Once
- otelStatsHandler stats.Handler
-)
-
-// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
-// dial connections.
-func otelGRPCStatsHandler() stats.Handler {
- initOtelStatsHandlerOnce.Do(func() {
- otelStatsHandler = otelgrpc.NewClientHandler()
- })
- return otelStatsHandler
-}
-
// Dial returns a GRPC connection for use communicating with a Google cloud
// service, configured with the given ClientOptions.
func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
@@ -168,56 +142,52 @@
// when dialing an insecure connection?
if !o.NoAuth && !insecure {
if o.APIKey != "" {
- grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcAPIKey{
- apiKey: o.APIKey,
- requestReason: o.RequestReason,
- }))
- } else {
- creds, err := internal.Creds(ctx, o)
- if err != nil {
- return nil, err
- }
- grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{
- TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource},
+ log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.")
+ }
+ creds, err := internal.Creds(ctx, o)
+ if err != nil {
+ return nil, err
+ }
+
+ grpcOpts = append(grpcOpts,
+ grpc.WithPerRPCCredentials(grpcTokenSource{
+ TokenSource: oauth.TokenSource{creds.TokenSource},
quotaProject: internal.GetQuotaProject(creds, o.QuotaProject),
requestReason: o.RequestReason,
- }))
- // Attempt Direct Path:
- logRateLimiter.Do(func() {
- logDirectPathMisconfig(endpoint, creds.TokenSource, o)
- })
- if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
- // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
- grpcOpts = []grpc.DialOption{
- grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(
- grpcgoogle.DefaultCredentialsOptions{
- PerRPCCreds: oauth.TokenSource{TokenSource: creds.TokenSource},
- })),
- }
- if timeoutDialerOption != nil {
- grpcOpts = append(grpcOpts, timeoutDialerOption)
- }
- // Check if google-c2p resolver is enabled for DirectPath
- if isDirectPathXdsUsed(o) {
- // google-c2p resolver target must not have a port number
- if addr, _, err := net.SplitHostPort(endpoint); err == nil {
- endpoint = "google-c2p:///" + addr
- } else {
- endpoint = "google-c2p:///" + endpoint
- }
- } else {
- if !strings.HasPrefix(endpoint, "dns:///") {
- endpoint = "dns:///" + endpoint
- }
- grpcOpts = append(grpcOpts,
- // For now all DirectPath go clients will be using the following lb config, but in future
- // when different services need different configs, then we should change this to a
- // per-service config.
- grpc.WithDisableServiceConfig(),
- grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
- }
- // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
+ }),
+ )
+
+ // Attempt Direct Path:
+ logRateLimiter.Do(func() {
+ logDirectPathMisconfig(endpoint, creds.TokenSource, o)
+ })
+ if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
+ // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
+ grpcOpts = []grpc.DialOption{
+ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))}
+ if timeoutDialerOption != nil {
+ grpcOpts = append(grpcOpts, timeoutDialerOption)
}
+ // Check if google-c2p resolver is enabled for DirectPath
+ if isDirectPathXdsUsed(o) {
+ // google-c2p resolver target must not have a port number
+ if addr, _, err := net.SplitHostPort(endpoint); err == nil {
+ endpoint = "google-c2p:///" + addr
+ } else {
+ endpoint = "google-c2p:///" + endpoint
+ }
+ } else {
+ if !strings.HasPrefix(endpoint, "dns:///") {
+ endpoint = "dns:///" + endpoint
+ }
+ grpcOpts = append(grpcOpts,
+ // For now all DirectPath go clients will be using the following lb config, but in future
+ // when different services need different configs, then we should change this to a
+ // per-service config.
+ grpc.WithDisableServiceConfig(),
+ grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
+ }
+ // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
}
}
@@ -225,13 +195,12 @@
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
grpcOpts = addOCStatsHandler(grpcOpts, o)
- grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o)
grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
if o.UserAgent != "" {
grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
}
- return dialContext(ctx, endpoint, grpcOpts...)
+ return grpc.DialContext(ctx, endpoint, grpcOpts...)
}
func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
@@ -241,13 +210,6 @@
return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
}
-func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption {
- if settings.TelemetryDisabled {
- return opts
- }
- return append(opts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
-}
-
// grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource.
type grpcTokenSource struct {
oauth.TokenSource
@@ -275,31 +237,6 @@
return metadata, nil
}
-// grpcAPIKey supplies PerRPCCredentials from an API Key.
-type grpcAPIKey struct {
- apiKey string
-
- // Additional metadata attached as headers.
- requestReason string
-}
-
-// GetRequestMetadata gets the request metadata as a map from a grpcAPIKey.
-func (ts grpcAPIKey) GetRequestMetadata(ctx context.Context, uri ...string) (
- map[string]string, error) {
- metadata := map[string]string{
- "X-goog-api-key": ts.apiKey,
- }
- if ts.requestReason != "" {
- metadata["X-goog-request-reason"] = ts.requestReason
- }
- return metadata, nil
-}
-
-// RequireTransportSecurity indicates whether the credentials requires transport security.
-func (ts grpcAPIKey) RequireTransportSecurity() bool {
- return true
-}
-
func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool {
if !o.EnableDirectPath {
return false
diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go
index 7e322a1..a07362f 100644
--- a/vendor/google.golang.org/api/transport/http/dial.go
+++ b/vendor/google.golang.org/api/transport/http/dial.go
@@ -16,7 +16,6 @@
"time"
"go.opencensus.io/plugin/ochttp"
- "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
"golang.org/x/oauth2"
"google.golang.org/api/googleapi/transport"
@@ -70,9 +69,6 @@
requestReason: settings.RequestReason,
}
var trans http.RoundTripper = paramTransport
- // Give OpenTelemetry precedence over OpenCensus in case user configuration
- // causes both to write the same header (`X-Cloud-Trace-Context`).
- trans = addOpenTelemetryTransport(trans, settings)
trans = addOCTransport(trans, settings)
switch {
case settings.NoAuth:
@@ -207,13 +203,6 @@
}
}
-func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
- if settings.TelemetryDisabled {
- return trans
- }
- return otelhttp.NewTransport(trans)
-}
-
func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
if settings.TelemetryDisabled {
return trans
diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml
new file mode 100644
index 0000000..6d03f4d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go_import_path: google.golang.org/appengine
+
+install:
+ - ./travis_install.sh
+
+script:
+ - ./travis_test.sh
+
+matrix:
+ include:
+ - go: 1.9.x
+ env: GOAPP=true
+ - go: 1.10.x
+ env: GOAPP=false
+ - go: 1.11.x
+ env: GO111MODULE=on
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
index 2896936..ffc2985 100644
--- a/vendor/google.golang.org/appengine/CONTRIBUTING.md
+++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md
@@ -19,12 +19,14 @@
## Running system tests
+Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
+
Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
-Run tests with `go test`:
+Run tests with `goapp test`:
```
-go test -v google.golang.org/appengine/...
+goapp test -v google.golang.org/appengine/...
```
## Contributor License Agreements
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
index 5ccddd9..9fdbacd 100644
--- a/vendor/google.golang.org/appengine/README.md
+++ b/vendor/google.golang.org/appengine/README.md
@@ -1,6 +1,6 @@
# Go App Engine packages
-[](https://github.com/golang/appengine/actions/workflows/ci.yml)
+[](https://travis-ci.org/golang/appengine)
This repository supports the Go runtime on *App Engine standard*.
It provides APIs for interacting with App Engine services.
@@ -51,7 +51,7 @@
Most App Engine services are available with exactly the same API.
A few APIs were cleaned up, and there are some differences:
-* `appengine.Context` has been replaced with the `Context` type from `context`.
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
* `appengine.Datacenter` now takes a `context.Context` argument.
@@ -72,7 +72,7 @@
* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
Use the standard `net` package instead.
-## Key Encode/Decode compatibility to help with datastore library migrations
+## Key Encode/Decode compatibiltiy to help with datastore library migrations
Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore.
The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
index 35ba9c8..8c96976 100644
--- a/vendor/google.golang.org/appengine/appengine.go
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -9,10 +9,10 @@
package appengine // import "google.golang.org/appengine"
import (
- "context"
"net/http"
"github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
"google.golang.org/appengine/internal"
)
@@ -35,18 +35,18 @@
//
// Main is designed so that the app's main package looks like this:
//
-// package main
+// package main
//
-// import (
-// "google.golang.org/appengine"
+// import (
+// "google.golang.org/appengine"
//
-// _ "myapp/package0"
-// _ "myapp/package1"
-// )
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
//
-// func main() {
-// appengine.Main()
-// }
+// func main() {
+// appengine.Main()
+// }
//
// The "myapp/packageX" packages are expected to register HTTP handlers
// in their init functions.
@@ -54,9 +54,6 @@
internal.Main()
}
-// Middleware wraps an http handler so that it can make GAE API calls
-var Middleware func(http.Handler) http.Handler = internal.Middleware
-
// IsDevAppServer reports whether the App Engine app is running in the
// development App Server.
func IsDevAppServer() bool {
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
index 6e1d041..f4b645a 100644
--- a/vendor/google.golang.org/appengine/appengine_vm.go
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -2,19 +2,19 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build !appengine
// +build !appengine
package appengine
import (
- "context"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
)
// BackgroundContext returns a context not associated with a request.
-//
-// Deprecated: App Engine no longer has a special background context.
-// Just use context.Background().
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
func BackgroundContext() context.Context {
- return context.Background()
+ return internal.BackgroundContext()
}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
index 1202fc1..b8dcf8f 100644
--- a/vendor/google.golang.org/appengine/identity.go
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -5,9 +5,10 @@
package appengine
import (
- "context"
"time"
+ "golang.org/x/net/context"
+
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/app_identity"
modpb "google.golang.org/appengine/internal/modules"
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
index 0569f5d..721053c 100644
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -2,14 +2,12 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build !appengine
// +build !appengine
package internal
import (
"bytes"
- "context"
"errors"
"fmt"
"io/ioutil"
@@ -26,6 +24,7 @@
"time"
"github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
basepb "google.golang.org/appengine/internal/base"
logpb "google.golang.org/appengine/internal/log"
@@ -33,7 +32,8 @@
)
const (
- apiPath = "/rpc_http"
+ apiPath = "/rpc_http"
+ defaultTicketSuffix = "/default.20150612t184001.0"
)
var (
@@ -65,22 +65,21 @@
IdleConnTimeout: 90 * time.Second,
},
}
+
+ defaultTicketOnce sync.Once
+ defaultTicket string
+ backgroundContextOnce sync.Once
+ backgroundContext netcontext.Context
)
-func apiURL(ctx context.Context) *url.URL {
+func apiURL() *url.URL {
host, port := "appengine.googleapis.internal", "10001"
if h := os.Getenv("API_HOST"); h != "" {
host = h
}
- if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil {
- host = hostOverride.(string)
- }
if p := os.Getenv("API_PORT"); p != "" {
port = p
}
- if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil {
- port = portOverride.(string)
- }
return &url.URL{
Scheme: "http",
Host: host + ":" + port,
@@ -88,97 +87,82 @@
}
}
-// Middleware wraps an http handler so that it can make GAE API calls
-func Middleware(next http.Handler) http.Handler {
- return handleHTTPMiddleware(executeRequestSafelyMiddleware(next))
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ }
+ r = r.WithContext(withContext(r.Context(), c))
+ c.req = r
+
+ stopFlushing := make(chan int)
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ flushed := make(chan struct{})
+ go func() {
+ defer close(flushed)
+ // Force a log flush, because with very short requests we
+ // may not ever flush logs.
+ c.flushLog(true)
+ }()
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+ // Wait for the last flush to complete before returning,
+ // otherwise the security ticket will not be valid.
+ <-flushed
}
-func handleHTTPMiddleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- c := &aeContext{
- req: r,
- outHeader: w.Header(),
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
}
- r = r.WithContext(withContext(r.Context(), c))
- c.req = r
+ }()
- stopFlushing := make(chan int)
-
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
-
- if logToLogservice() {
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
- }
-
- next.ServeHTTP(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
-
- flushed := make(chan struct{})
- if logToLogservice() {
- stopFlushing <- 1 // any logging beyond this point will be dropped
-
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- go func() {
- defer close(flushed)
- // Force a log flush, because with very short requests we
- // may not ever flush logs.
- c.flushLog(true)
- }()
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
- }
-
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
- if logToLogservice() {
- // Wait for the last flush to complete before returning,
- // otherwise the security ticket will not be valid.
- <-flushed
- }
- })
-}
-
-func executeRequestSafelyMiddleware(next http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- c := w.(*aeContext)
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
-
- next.ServeHTTP(w, r)
- })
+ http.DefaultServeMux.ServeHTTP(c, r)
}
func renderPanic(x interface{}) string {
@@ -220,9 +204,9 @@
return string(buf)
}
-// aeContext represents the aeContext of an in-flight HTTP request.
+// context represents the context of an in-flight HTTP request.
// It implements the appengine.Context and http.ResponseWriter interfaces.
-type aeContext struct {
+type context struct {
req *http.Request
outCode int
@@ -234,6 +218,8 @@
lines []*logpb.UserAppLogLine
flushes int
}
+
+ apiURL *url.URL
}
var contextKey = "holds a *context"
@@ -241,8 +227,8 @@
// jointContext joins two contexts in a superficial way.
// It takes values and timeouts from a base context, and only values from another context.
type jointContext struct {
- base context.Context
- valuesOnly context.Context
+ base netcontext.Context
+ valuesOnly netcontext.Context
}
func (c jointContext) Deadline() (time.Time, bool) {
@@ -266,54 +252,94 @@
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
-func fromContext(ctx context.Context) *aeContext {
- c, _ := ctx.Value(&contextKey).(*aeContext)
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
return c
}
-func withContext(parent context.Context, c *aeContext) context.Context {
- ctx := context.WithValue(parent, &contextKey, c)
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
ctx = withNamespace(ctx, ns)
}
return ctx
}
-func toContext(c *aeContext) context.Context {
- return withContext(context.Background(), c)
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
}
-func IncomingHeaders(ctx context.Context) http.Header {
+func IncomingHeaders(ctx netcontext.Context) http.Header {
if c := fromContext(ctx); c != nil {
return c.req.Header
}
return nil
}
-func ReqContext(req *http.Request) context.Context {
+func ReqContext(req *http.Request) netcontext.Context {
return req.Context()
}
-func WithContext(parent context.Context, req *http.Request) context.Context {
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
return jointContext{
base: parent,
valuesOnly: req.Context(),
}
}
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request {
- ctx := req.Context()
- ctx = withAPIHostOverride(ctx, apiURL.Hostname())
- ctx = withAPIPortOverride(ctx, apiURL.Port())
- ctx = WithAppIDOverride(ctx, appID)
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+ defaultTicketOnce.Do(func() {
+ if IsDevAppServer() {
+ defaultTicket = "testapp" + defaultTicketSuffix
+ return
+ }
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+ })
+ return defaultTicket
+}
- // use the unregistered request as a placeholder so that withContext can read the headers
- c := &aeContext{req: req}
- c.req = req.WithContext(withContext(ctx, c))
- return c.req
+func BackgroundContext() netcontext.Context {
+ backgroundContextOnce.Do(func() {
+ // Compute background security ticket.
+ ticket := DefaultTicket()
+
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ }
+ backgroundContext = toContext(c)
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go c.logFlusher(make(chan int))
+ })
+
+ return backgroundContext
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ }
+ ctx := withContext(decorate(req.Context()), c)
+ req = req.WithContext(ctx)
+ c.req = req
+ return req, func() {}
}
var errTimeout = &CallError{
@@ -322,7 +348,7 @@
Timeout: true,
}
-func (c *aeContext) Header() http.Header { return c.outHeader }
+func (c *context) Header() http.Header { return c.outHeader }
// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
// codes do not permit a response body (nor response entity headers such as
@@ -339,7 +365,7 @@
return true
}
-func (c *aeContext) Write(b []byte) (int, error) {
+func (c *context) Write(b []byte) (int, error) {
if c.outCode == 0 {
c.WriteHeader(http.StatusOK)
}
@@ -350,7 +376,7 @@
return len(b), nil
}
-func (c *aeContext) WriteHeader(code int) {
+func (c *context) WriteHeader(code int) {
if c.outCode != 0 {
logf(c, 3, "WriteHeader called multiple times on request.") // error level
return
@@ -358,11 +384,10 @@
c.outCode = code
}
-func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) {
- apiURL := apiURL(ctx)
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
hreq := &http.Request{
Method: "POST",
- URL: apiURL,
+ URL: c.apiURL,
Header: http.Header{
apiEndpointHeader: apiEndpointHeaderValue,
apiMethodHeader: apiMethodHeaderValue,
@@ -371,16 +396,13 @@
},
Body: ioutil.NopCloser(bytes.NewReader(body)),
ContentLength: int64(len(body)),
- Host: apiURL.Host,
+ Host: c.apiURL.Host,
}
- c := fromContext(ctx)
- if c != nil {
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
- }
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
- }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
}
tr := apiHTTPClient.Transport.(*http.Transport)
@@ -422,7 +444,7 @@
return hrespBody, nil
}
-func Call(ctx context.Context, service, method string, in, out proto.Message) error {
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
@@ -441,11 +463,15 @@
}
c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errNotAppEngineContext
+ }
// Apply transaction modifications if we're in a transaction.
if t := transactionFromContext(ctx); t != nil {
if t.finished {
- return errors.New("transaction aeContext has expired")
+ return errors.New("transaction context has expired")
}
applyTransaction(in, &t.transaction)
}
@@ -461,13 +487,20 @@
return err
}
- ticket := ""
- if c != nil {
- ticket = c.req.Header.Get(ticketHeader)
- if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
- ticket = dri
+ ticket := c.req.Header.Get(ticketHeader)
+ // Use a test ticket under test environment.
+ if ticket == "" {
+ if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+ ticket = appid.(string) + defaultTicketSuffix
}
}
+ // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+ if ticket == "" {
+ ticket = DefaultTicket()
+ }
+ if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
+ ticket = dri
+ }
req := &remotepb.Request{
ServiceName: &service,
Method: &method,
@@ -479,7 +512,7 @@
return err
}
- hrespBody, err := post(ctx, hreqBody, timeout)
+ hrespBody, err := c.post(hreqBody, timeout)
if err != nil {
return err
}
@@ -516,11 +549,11 @@
return proto.Unmarshal(res.Response, out)
}
-func (c *aeContext) Request() *http.Request {
+func (c *context) Request() *http.Request {
return c.req
}
-func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) {
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
// Truncate long log lines.
// TODO(dsymonds): Check if this is still necessary.
const lim = 8 << 10
@@ -542,20 +575,18 @@
4: "CRITICAL",
}
-func logf(c *aeContext, level int64, format string, args ...interface{}) {
+func logf(c *context, level int64, format string, args ...interface{}) {
if c == nil {
- panic("not an App Engine aeContext")
+ panic("not an App Engine context")
}
s := fmt.Sprintf(format, args...)
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- if logToLogservice() {
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- }
- // Log to stdout if not deployed
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ // Only duplicate log to stderr if not running on App Engine second generation
if !IsSecondGen() {
log.Print(logLevelName[level] + ": " + s)
}
@@ -563,7 +594,7 @@
// flushLog attempts to flush any pending logs to the appserver.
// It should not be called concurrently.
-func (c *aeContext) flushLog(force bool) (flushed bool) {
+func (c *context) flushLog(force bool) (flushed bool) {
c.pendingLogs.Lock()
// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
n, rem := 0, 30<<20
@@ -624,7 +655,7 @@
forceFlushInterval = 60 * time.Second
)
-func (c *aeContext) logFlusher(stop <-chan int) {
+func (c *context) logFlusher(stop <-chan int) {
lastFlush := time.Now()
tick := time.NewTicker(flushInterval)
for {
@@ -642,12 +673,6 @@
}
}
-func ContextForTesting(req *http.Request) context.Context {
- return toContext(&aeContext{req: req})
-}
-
-func logToLogservice() bool {
- // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json
- // where $LOG_DIR is /var/log in prod and some tmpdir in dev
- return os.Getenv("LOG_TO_LOGSERVICE") != "0"
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{req: req})
}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
index 87c33c7..f0f40b2 100644
--- a/vendor/google.golang.org/appengine/internal/api_classic.go
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -2,13 +2,11 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build appengine
// +build appengine
package internal
import (
- "context"
"errors"
"fmt"
"net/http"
@@ -19,19 +17,20 @@
basepb "appengine_internal/base"
"github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
)
var contextKey = "holds an appengine.Context"
// fromContext returns the App Engine context or nil if ctx is not
// derived from an App Engine context.
-func fromContext(ctx context.Context) appengine.Context {
+func fromContext(ctx netcontext.Context) appengine.Context {
c, _ := ctx.Value(&contextKey).(appengine.Context)
return c
}
// This is only for classic App Engine adapters.
-func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) {
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
c := fromContext(ctx)
if c == nil {
return nil, errNotAppEngineContext
@@ -39,8 +38,8 @@
return c, nil
}
-func withContext(parent context.Context, c appengine.Context) context.Context {
- ctx := context.WithValue(parent, &contextKey, c)
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
s := &basepb.StringProto{}
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
@@ -51,7 +50,7 @@
return ctx
}
-func IncomingHeaders(ctx context.Context) http.Header {
+func IncomingHeaders(ctx netcontext.Context) http.Header {
if c := fromContext(ctx); c != nil {
if req, ok := c.Request().(*http.Request); ok {
return req.Header
@@ -60,11 +59,11 @@
return nil
}
-func ReqContext(req *http.Request) context.Context {
- return WithContext(context.Background(), req)
+func ReqContext(req *http.Request) netcontext.Context {
+ return WithContext(netcontext.Background(), req)
}
-func WithContext(parent context.Context, req *http.Request) context.Context {
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
c := appengine.NewContext(req)
return withContext(parent, c)
}
@@ -84,11 +83,11 @@
}
func (t *testingContext) Request() interface{} { return t.req }
-func ContextForTesting(req *http.Request) context.Context {
- return withContext(context.Background(), &testingContext{req: req})
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
}
-func Call(ctx context.Context, service, method string, in, out proto.Message) error {
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
if ns := NamespaceFromContext(ctx); ns != "" {
if fn, ok := NamespaceMods[service]; ok {
fn(in, ns)
@@ -145,8 +144,8 @@
return err
}
-func Middleware(next http.Handler) http.Handler {
- panic("Middleware called; this should be impossible")
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
}
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
index 5b95c13..e0c0b21 100644
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -5,26 +5,20 @@
package internal
import (
- "context"
"errors"
"os"
"github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
)
-type ctxKey string
-
-func (c ctxKey) String() string {
- return "appengine context key: " + string(c)
-}
-
var errNotAppEngineContext = errors.New("not an App Engine context")
-type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
var callOverrideKey = "holds []CallOverrideFunc"
-func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context {
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
// We avoid appending to any existing call override
// so we don't risk overwriting a popped stack below.
var cofs []CallOverrideFunc
@@ -32,10 +26,10 @@
cofs = append(cofs, uf...)
}
cofs = append(cofs, f)
- return context.WithValue(ctx, &callOverrideKey, cofs)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
}
-func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) {
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
if len(cofs) == 0 {
return nil, nil, false
@@ -43,7 +37,7 @@
// We found a list of overrides; grab the last, and reconstitute a
// context that will hide it.
f := cofs[len(cofs)-1]
- ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
return f, ctx, true
}
@@ -51,35 +45,23 @@
var logOverrideKey = "holds a logOverrideFunc"
-func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context {
- return context.WithValue(ctx, &logOverrideKey, f)
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
}
var appIDOverrideKey = "holds a string, being the full app ID"
-func WithAppIDOverride(ctx context.Context, appID string) context.Context {
- return context.WithValue(ctx, &appIDOverrideKey, appID)
-}
-
-var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST")
-
-func withAPIHostOverride(ctx context.Context, apiHost string) context.Context {
- return context.WithValue(ctx, apiHostOverrideKey, apiHost)
-}
-
-var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT")
-
-func withAPIPortOverride(ctx context.Context, apiPort string) context.Context {
- return context.WithValue(ctx, apiPortOverrideKey, apiPort)
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
}
var namespaceKey = "holds the namespace string"
-func withNamespace(ctx context.Context, ns string) context.Context {
- return context.WithValue(ctx, &namespaceKey, ns)
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
}
-func NamespaceFromContext(ctx context.Context) string {
+func NamespaceFromContext(ctx netcontext.Context) string {
// If there's no namespace, return the empty string.
ns, _ := ctx.Value(&namespaceKey).(string)
return ns
@@ -88,14 +70,14 @@
// FullyQualifiedAppID returns the fully-qualified application ID.
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx context.Context) string {
+func FullyQualifiedAppID(ctx netcontext.Context) string {
if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
return id
}
return fullyQualifiedAppID(ctx)
}
-func Logf(ctx context.Context, level int64, format string, args ...interface{}) {
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
f(level, format, args...)
return
@@ -108,7 +90,7 @@
}
// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx context.Context, namespace string) context.Context {
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
return withNamespace(ctx, namespace)
}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
index 0f95aa9..9b4134e 100644
--- a/vendor/google.golang.org/appengine/internal/identity.go
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -5,8 +5,9 @@
package internal
import (
- "context"
"os"
+
+ netcontext "golang.org/x/net/context"
)
var (
@@ -22,7 +23,7 @@
// AppID is the implementation of the wrapper function of the same name in
// ../identity.go. See that file for commentary.
-func AppID(c context.Context) string {
+func AppID(c netcontext.Context) string {
return appID(FullyQualifiedAppID(c))
}
@@ -34,7 +35,7 @@
return appengineStandard || IsSecondGen()
}
-// IsSecondGen is the implementation of the wrapper function of the same name in
+// IsStandard is the implementation of the wrapper function of the same name in
// ../appengine.go. See that file for commentary.
func IsSecondGen() bool {
// Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
index 5ad3548..4e979f4 100644
--- a/vendor/google.golang.org/appengine/internal/identity_classic.go
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -2,22 +2,21 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build appengine
// +build appengine
package internal
import (
- "context"
-
"appengine"
+
+ netcontext "golang.org/x/net/context"
)
func init() {
appengineStandard = true
}
-func DefaultVersionHostname(ctx context.Context) string {
+func DefaultVersionHostname(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
@@ -25,12 +24,12 @@
return appengine.DefaultVersionHostname(c)
}
-func Datacenter(_ context.Context) string { return appengine.Datacenter() }
-func ServerSoftware() string { return appengine.ServerSoftware() }
-func InstanceID() string { return appengine.InstanceID() }
-func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
-func RequestID(ctx context.Context) string {
+func RequestID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
@@ -38,14 +37,14 @@
return appengine.RequestID(c)
}
-func ModuleName(ctx context.Context) string {
+func ModuleName(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
}
return appengine.ModuleName(c)
}
-func VersionID(ctx context.Context) string {
+func VersionID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
@@ -53,7 +52,7 @@
return appengine.VersionID(c)
}
-func fullyQualifiedAppID(ctx context.Context) string {
+func fullyQualifiedAppID(ctx netcontext.Context) string {
c := fromContext(ctx)
if c == nil {
panic(errNotAppEngineContext)
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
index 4201b6b..d5e2e7b 100644
--- a/vendor/google.golang.org/appengine/internal/identity_flex.go
+++ b/vendor/google.golang.org/appengine/internal/identity_flex.go
@@ -2,7 +2,6 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build appenginevm
// +build appenginevm
package internal
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
index 18ddda3..5d80672 100644
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -2,17 +2,17 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build !appengine
// +build !appengine
package internal
import (
- "context"
"log"
"net/http"
"os"
"strings"
+
+ netcontext "golang.org/x/net/context"
)
// These functions are implementations of the wrapper functions
@@ -24,7 +24,7 @@
hDatacenter = "X-AppEngine-Datacenter"
)
-func ctxHeaders(ctx context.Context) http.Header {
+func ctxHeaders(ctx netcontext.Context) http.Header {
c := fromContext(ctx)
if c == nil {
return nil
@@ -32,15 +32,15 @@
return c.Request().Header
}
-func DefaultVersionHostname(ctx context.Context) string {
+func DefaultVersionHostname(ctx netcontext.Context) string {
return ctxHeaders(ctx).Get(hDefaultVersionHostname)
}
-func RequestID(ctx context.Context) string {
+func RequestID(ctx netcontext.Context) string {
return ctxHeaders(ctx).Get(hRequestLogId)
}
-func Datacenter(ctx context.Context) string {
+func Datacenter(ctx netcontext.Context) string {
if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
return dc
}
@@ -71,7 +71,7 @@
// TODO(dsymonds): Remove the metadata fetches.
-func ModuleName(_ context.Context) string {
+func ModuleName(_ netcontext.Context) string {
if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
return s
}
@@ -81,7 +81,7 @@
return string(mustGetMetadata("instance/attributes/gae_backend_name"))
}
-func VersionID(_ context.Context) string {
+func VersionID(_ netcontext.Context) string {
if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
return s1 + "." + s2
}
@@ -112,7 +112,7 @@
return string(mustGetMetadata("instance/attributes/gae_project"))
}
-func fullyQualifiedAppID(_ context.Context) string {
+func fullyQualifiedAppID(_ netcontext.Context) string {
if s := os.Getenv("GAE_APPLICATION"); s != "" {
return s
}
@@ -130,5 +130,5 @@
}
func IsDevAppServer() bool {
- return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev"
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
index afd0ae8..1e76531 100644
--- a/vendor/google.golang.org/appengine/internal/main.go
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -2,7 +2,6 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build appengine
// +build appengine
package internal
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
index 86a8caf..ddb79a3 100644
--- a/vendor/google.golang.org/appengine/internal/main_vm.go
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -2,7 +2,6 @@
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
-//go:build !appengine
// +build !appengine
package internal
@@ -30,7 +29,7 @@
if IsDevAppServer() {
host = "127.0.0.1"
}
- if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil {
+ if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
log.Fatalf("http.ListenAndServe: %v", err)
}
}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
index 2ae8ab9..9006ae6 100644
--- a/vendor/google.golang.org/appengine/internal/transaction.go
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -7,11 +7,11 @@
// This file implements hooks for applying datastore transactions.
import (
- "context"
"errors"
"reflect"
"github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
basepb "google.golang.org/appengine/internal/base"
pb "google.golang.org/appengine/internal/datastore"
@@ -38,13 +38,13 @@
var transactionKey = "used for *Transaction"
-func transactionFromContext(ctx context.Context) *transaction {
+func transactionFromContext(ctx netcontext.Context) *transaction {
t, _ := ctx.Value(&transactionKey).(*transaction)
return t
}
-func withTransaction(ctx context.Context, t *transaction) context.Context {
- return context.WithValue(ctx, &transactionKey, t)
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
}
type transaction struct {
@@ -54,7 +54,7 @@
var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
if transactionFromContext(c) != nil {
return nil, errors.New("nested transactions are not supported")
}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
index 6f169be..21860ca 100644
--- a/vendor/google.golang.org/appengine/namespace.go
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -5,10 +5,11 @@
package appengine
import (
- "context"
"fmt"
"regexp"
+ "golang.org/x/net/context"
+
"google.golang.org/appengine/internal"
)
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
index fcf3ad0..05642a9 100644
--- a/vendor/google.golang.org/appengine/timeout.go
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -4,7 +4,7 @@
package appengine
-import "context"
+import "golang.org/x/net/context"
// IsTimeoutError reports whether err is a timeout error.
func IsTimeoutError(err error) bool {
diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh
new file mode 100644
index 0000000..785b62f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/travis_install.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+set -e
+
+if [[ $GO111MODULE == "on" ]]; then
+ go get .
+else
+ go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine)
+fi
+
+if [[ $GOAPP == "true" ]]; then
+ mkdir /tmp/sdk
+ curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip"
+ unzip -q /tmp/sdk.zip -d /tmp/sdk
+ # NOTE: Set the following env vars in the test script:
+ # export PATH="$PATH:/tmp/sdk/go_appengine"
+ # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
+fi
+
diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh
new file mode 100644
index 0000000..d4390f0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/travis_test.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+
+go version
+go test -v google.golang.org/appengine/...
+go test -v -race google.golang.org/appengine/...
+if [[ $GOAPP == "true" ]]; then
+ export PATH="$PATH:/tmp/sdk/go_appengine"
+ export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py
+ goapp version
+ goapp test -v google.golang.org/appengine/...
+fi
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
index 6c0d724..6ffe1e6 100644
--- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -7,7 +7,6 @@
package urlfetch // import "google.golang.org/appengine/urlfetch"
import (
- "context"
"errors"
"fmt"
"io"
@@ -19,6 +18,7 @@
"time"
"github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/urlfetch"
@@ -44,10 +44,11 @@
var _ http.RoundTripper = (*Transport)(nil)
// Client returns an *http.Client using a default urlfetch Transport. This
-// client will check the validity of SSL certificates.
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
//
-// Any deadline of the provided context will be used for requests through this client.
-// If the client does not have a deadline, then an App Engine default of 60 second is used.
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
func Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &Transport{
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
new file mode 100644
index 0000000..a4411c2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -0,0 +1,454 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal/balancer/gracefulswitch"
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/resolver"
+)
+
+type ccbMode int
+
+const (
+ ccbModeActive = iota
+ ccbModeIdle
+ ccbModeClosed
+ ccbModeExitingIdle
+)
+
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen synchronously and in order.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc *ClientConn
+ opts balancer.BuildOptions
+
+ // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
+ // mutually exclusive manner as they are scheduled in the serializer. Fields
+ // accessed *only* in these serializer callbacks, can therefore be accessed
+ // without a mutex.
+ balancer *gracefulswitch.Balancer
+ curBalancerName string
+
+ // mu guards access to the below fields. Access to the serializer and its
+ // cancel function needs to be mutex protected because they are overwritten
+ // when the wrapper exits idle mode.
+ mu sync.Mutex
+ serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
+ serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
+ mode ccbMode // Tracks the current mode of the wrapper.
+}
+
+// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
+// is not created until the switchTo() method is invoked.
+func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
+ ctx, cancel := context.WithCancel(context.Background())
+ ccb := &ccBalancerWrapper{
+ cc: cc,
+ opts: bopts,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
+ return ccb
+}
+
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+ ccb.mu.Lock()
+ errCh := make(chan error, 1)
+ // Here and everywhere else where Schedule() is called, it is done with the
+ // lock held. But the lock guards only the scheduling part. The actual
+ // callback is called asynchronously without the lock being held.
+ ok := ccb.serializer.Schedule(func(_ context.Context) {
+ errCh <- ccb.balancer.UpdateClientConnState(*ccs)
+ })
+ if !ok {
+ // If we are unable to schedule a function with the serializer, it
+ // indicates that it has been closed. A serializer is only closed when
+ // the wrapper is closed or is in idle.
+ ccb.mu.Unlock()
+ return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
+ }
+ ccb.mu.Unlock()
+
+ // We get here only if the above call to Schedule succeeds, in which case it
+ // is guaranteed that the scheduled function will run. Therefore it is safe
+ // to block on this channel.
+ err := <-errCh
+ if logger.V(2) && err != nil {
+ logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+ }
+ return err
+}
+
+// updateSubConnState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
+ ccb.mu.Lock()
+ ccb.serializer.Schedule(func(_ context.Context) {
+ // Even though it is optional for balancers, gracefulswitch ensures
+ // opts.StateListener is set, so this cannot ever be nil.
+ sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
+ })
+ ccb.mu.Unlock()
+}
+
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+ ccb.mu.Lock()
+ ccb.serializer.Schedule(func(_ context.Context) {
+ ccb.balancer.ResolverError(err)
+ })
+ ccb.mu.Unlock()
+}
+
+// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
+// LB policy identified by name.
+//
+// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
+// first good update from the name resolver, it determines the LB policy to use
+// and invokes the switchTo() method. Upon receipt of every subsequent update
+// from the name resolver, it invokes this method.
+//
+// the ccBalancerWrapper keeps track of the current LB policy name, and skips
+// the graceful balancer switching process if the name does not change.
+func (ccb *ccBalancerWrapper) switchTo(name string) {
+ ccb.mu.Lock()
+ ccb.serializer.Schedule(func(_ context.Context) {
+ // TODO: Other languages use case-sensitive balancer registries. We should
+ // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
+ if strings.EqualFold(ccb.curBalancerName, name) {
+ return
+ }
+ ccb.buildLoadBalancingPolicy(name)
+ })
+ ccb.mu.Unlock()
+}
+
+// buildLoadBalancingPolicy performs the following:
+// - retrieve a balancer builder for the given name. Use the default LB
+// policy, pick_first, if no LB policy with name is found in the registry.
+// - instruct the gracefulswitch balancer to switch to the above builder. This
+// will actually build the new balancer.
+// - update the `curBalancerName` field
+//
+// Must be called from a serializer callback.
+func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
+ builder := balancer.Get(name)
+ if builder == nil {
+ channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
+ builder = newPickfirstBuilder()
+ } else {
+ channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
+ }
+
+ if err := ccb.balancer.SwitchTo(builder); err != nil {
+ channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
+ return
+ }
+ ccb.curBalancerName = builder.Name()
+}
+
+func (ccb *ccBalancerWrapper) close() {
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
+ ccb.closeBalancer(ccbModeClosed)
+}
+
+// enterIdleMode is invoked by grpc when the channel enters idle mode upon
+// expiry of idle_timeout. This call blocks until the balancer is closed.
+func (ccb *ccBalancerWrapper) enterIdleMode() {
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
+ ccb.closeBalancer(ccbModeIdle)
+}
+
+// closeBalancer is invoked when the channel is being closed or when it enters
+// idle mode upon expiry of idle_timeout.
+func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
+ ccb.mu.Lock()
+ if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
+ ccb.mu.Unlock()
+ return
+ }
+
+ ccb.mode = m
+ done := ccb.serializer.Done()
+ b := ccb.balancer
+ ok := ccb.serializer.Schedule(func(_ context.Context) {
+ // Close the serializer to ensure that no more calls from gRPC are sent
+ // to the balancer.
+ ccb.serializerCancel()
+ // Empty the current balancer name because we don't have a balancer
+ // anymore and also so that we act on the next call to switchTo by
+ // creating a new balancer specified by the new resolver.
+ ccb.curBalancerName = ""
+ })
+ if !ok {
+ ccb.mu.Unlock()
+ return
+ }
+ ccb.mu.Unlock()
+
+ // Give enqueued callbacks a chance to finish before closing the balancer.
+ <-done
+ b.Close()
+}
+
+// exitIdleMode is invoked by grpc when the channel exits idle mode either
+// because of an RPC or because of an invocation of the Connect() API. This
+// recreates the balancer that was closed previously when entering idle mode.
+//
+// If the channel is not in idle mode, we know for a fact that we are here as a
+// result of the user calling the Connect() method on the ClientConn. In this
+// case, we can simply forward the call to the underlying balancer, instructing
+// it to reconnect to the backends.
+func (ccb *ccBalancerWrapper) exitIdleMode() {
+ ccb.mu.Lock()
+ if ccb.mode == ccbModeClosed {
+ // Request to exit idle is a no-op when wrapper is already closed.
+ ccb.mu.Unlock()
+ return
+ }
+
+ if ccb.mode == ccbModeIdle {
+ // Recreate the serializer which was closed when we entered idle.
+ ctx, cancel := context.WithCancel(context.Background())
+ ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
+ ccb.serializerCancel = cancel
+ }
+
+ // The ClientConn guarantees that mutual exclusion between close() and
+ // exitIdleMode(), and since we just created a new serializer, we can be
+ // sure that the below function will be scheduled.
+ done := make(chan struct{})
+ ccb.serializer.Schedule(func(_ context.Context) {
+ defer close(done)
+
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+
+ if ccb.mode != ccbModeIdle {
+ ccb.balancer.ExitIdle()
+ return
+ }
+
+ // Gracefulswitch balancer does not support a switchTo operation after
+ // being closed. Hence we need to create a new one here.
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
+ ccb.mode = ccbModeActive
+ channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
+
+ })
+ ccb.mu.Unlock()
+
+ <-done
+}
+
+func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
+ ccb.mu.Lock()
+ defer ccb.mu.Unlock()
+ return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+ if ccb.isIdleOrClosed() {
+ return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
+ }
+
+ if len(addrs) == 0 {
+ return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+ }
+ ac, err := ccb.cc.newAddrConn(addrs, opts)
+ if err != nil {
+ channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
+ return nil, err
+ }
+ acbw := &acBalancerWrapper{
+ ccb: ccb,
+ ac: ac,
+ producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
+ stateListener: opts.StateListener,
+ }
+ ac.acbw = acbw
+ return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+ // The graceful switch balancer will never call this.
+ logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+ if ccb.isIdleOrClosed() {
+ return
+ }
+
+ acbw, ok := sc.(*acBalancerWrapper)
+ if !ok {
+ return
+ }
+ acbw.UpdateAddresses(addrs)
+}
+
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+ if ccb.isIdleOrClosed() {
+ return
+ }
+
+ // Update picker before updating state. Even though the ordering here does
+ // not matter, it can lead to multiple calls of Pick in the common start-up
+ // case where we wait for ready and then perform an RPC. If the picker is
+ // updated later, we could call the "connecting" picker when the state is
+ // updated, and then call the "ready" picker after the picker gets updated.
+ ccb.cc.blockingpicker.updatePicker(s.Picker)
+ ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+ if ccb.isIdleOrClosed() {
+ return
+ }
+
+ ccb.cc.resolveNow(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+ return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+ ac *addrConn // read-only
+ ccb *ccBalancerWrapper // read-only
+ stateListener func(balancer.SubConnState)
+
+ mu sync.Mutex
+ producers map[balancer.ProducerBuilder]*refCountedProducer
+}
+
+func (acbw *acBalancerWrapper) String() string {
+ return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+ acbw.ac.updateAddrs(addrs)
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+ go acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) Shutdown() {
+ ccb := acbw.ccb
+ if ccb.isIdleOrClosed() {
+ // It it safe to ignore this call when the balancer is closed or in idle
+ // because the ClientConn takes care of closing the connections.
+ //
+ // Not returning early from here when the balancer is closed or in idle
+ // leads to a deadlock though, because of the following sequence of
+ // calls when holding cc.mu:
+ // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
+ // ccb.RemoveAddrConn --> cc.removeAddrConn
+ return
+ }
+
+ ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
+}
+
+// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
+// ready, blocks until it is or ctx expires. Returns an error when the context
+// expires or the addrConn is shut down.
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+ transport, err := acbw.ac.getTransport(ctx)
+ if err != nil {
+ return nil, err
+ }
+ return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
+}
+
+// Invoke performs a unary RPC. If the addrConn is not ready, returns
+// errSubConnNotReady.
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
+ cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
+ if err != nil {
+ return err
+ }
+ if err := cs.SendMsg(args); err != nil {
+ return err
+ }
+ return cs.RecvMsg(reply)
+}
+
+type refCountedProducer struct {
+ producer balancer.Producer
+ refs int // number of current refs to the producer
+ close func() // underlying producer's close function
+}
+
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
+ acbw.mu.Lock()
+ defer acbw.mu.Unlock()
+
+ // Look up existing producer from this builder.
+ pData := acbw.producers[pb]
+ if pData == nil {
+ // Not found; create a new one and add it to the producers map.
+ p, close := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: close}
+ acbw.producers[pb] = pData
+ }
+ // Account for this new reference.
+ pData.refs++
+
+ // Return a cleanup function wrapped in a OnceFunc to remove this reference
+ // and delete the refCountedProducer from the map if the total reference
+ // count goes to zero.
+ unref := func() {
+ acbw.mu.Lock()
+ pData.refs--
+ if pData.refs == 0 {
+ defer pData.close() // Run outside the acbw mutex
+ delete(acbw.producers, pb)
+ }
+ acbw.mu.Unlock()
+ }
+ return pData.producer, grpcsync.OnceFunc(unref)
+}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
deleted file mode 100644
index b5e30cf..0000000
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/internal/balancer/gracefulswitch"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/resolver"
-)
-
-// ccBalancerWrapper sits between the ClientConn and the Balancer.
-//
-// ccBalancerWrapper implements methods corresponding to the ones on the
-// balancer.Balancer interface. The ClientConn is free to call these methods
-// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
-// to the Balancer happen in order by performing them in the serializer, without
-// any mutexes held.
-//
-// ccBalancerWrapper also implements the balancer.ClientConn interface and is
-// passed to the Balancer implementations. It invokes unexported methods on the
-// ClientConn to handle these calls from the Balancer.
-//
-// It uses the gracefulswitch.Balancer internally to ensure that balancer
-// switches happen in a graceful manner.
-type ccBalancerWrapper struct {
- // The following fields are initialized when the wrapper is created and are
- // read-only afterwards, and therefore can be accessed without a mutex.
- cc *ClientConn
- opts balancer.BuildOptions
- serializer *grpcsync.CallbackSerializer
- serializerCancel context.CancelFunc
-
- // The following fields are only accessed within the serializer or during
- // initialization.
- curBalancerName string
- balancer *gracefulswitch.Balancer
-
- // The following field is protected by mu. Caller must take cc.mu before
- // taking mu.
- mu sync.Mutex
- closed bool
-}
-
-// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
-// underlying balancer is not created until the switchTo() method is invoked.
-func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
- ctx, cancel := context.WithCancel(cc.ctx)
- ccb := &ccBalancerWrapper{
- cc: cc,
- opts: balancer.BuildOptions{
- DialCreds: cc.dopts.copts.TransportCredentials,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- Authority: cc.authority,
- CustomUserAgent: cc.dopts.copts.UserAgent,
- ChannelzParentID: cc.channelzID,
- Target: cc.parsedTarget,
- },
- serializer: grpcsync.NewCallbackSerializer(ctx),
- serializerCancel: cancel,
- }
- ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
- return ccb
-}
-
-// updateClientConnState is invoked by grpc to push a ClientConnState update to
-// the underlying balancer. This is always executed from the serializer, so
-// it is safe to call into the balancer here.
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- errCh := make(chan error)
- ok := ccb.serializer.Schedule(func(ctx context.Context) {
- defer close(errCh)
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- err := ccb.balancer.UpdateClientConnState(*ccs)
- if logger.V(2) && err != nil {
- logger.Infof("error from balancer.UpdateClientConnState: %v", err)
- }
- errCh <- err
- })
- if !ok {
- return nil
- }
- return <-errCh
-}
-
-// resolverError is invoked by grpc to push a resolver error to the underlying
-// balancer. The call to the balancer is executed from the serializer.
-func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.serializer.Schedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- ccb.balancer.ResolverError(err)
- })
-}
-
-// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
-// LB policy identified by name.
-//
-// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
-// first good update from the name resolver, it determines the LB policy to use
-// and invokes the switchTo() method. Upon receipt of every subsequent update
-// from the name resolver, it invokes this method.
-//
-// the ccBalancerWrapper keeps track of the current LB policy name, and skips
-// the graceful balancer switching process if the name does not change.
-func (ccb *ccBalancerWrapper) switchTo(name string) {
- ccb.serializer.Schedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- // TODO: Other languages use case-sensitive balancer registries. We should
- // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
- if strings.EqualFold(ccb.curBalancerName, name) {
- return
- }
- ccb.buildLoadBalancingPolicy(name)
- })
-}
-
-// buildLoadBalancingPolicy performs the following:
-// - retrieve a balancer builder for the given name. Use the default LB
-// policy, pick_first, if no LB policy with name is found in the registry.
-// - instruct the gracefulswitch balancer to switch to the above builder. This
-// will actually build the new balancer.
-// - update the `curBalancerName` field
-//
-// Must be called from a serializer callback.
-func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
- builder := balancer.Get(name)
- if builder == nil {
- channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
- builder = newPickfirstBuilder()
- } else {
- channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
- }
-
- if err := ccb.balancer.SwitchTo(builder); err != nil {
- channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
- return
- }
- ccb.curBalancerName = builder.Name()
-}
-
-// close initiates async shutdown of the wrapper. cc.mu must be held when
-// calling this function. To determine the wrapper has finished shutting down,
-// the channel should block on ccb.serializer.Done() without cc.mu held.
-func (ccb *ccBalancerWrapper) close() {
- ccb.mu.Lock()
- ccb.closed = true
- ccb.mu.Unlock()
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
- ccb.serializer.Schedule(func(context.Context) {
- if ccb.balancer == nil {
- return
- }
- ccb.balancer.Close()
- ccb.balancer = nil
- })
- ccb.serializerCancel()
-}
-
-// exitIdle invokes the balancer's exitIdle method in the serializer.
-func (ccb *ccBalancerWrapper) exitIdle() {
- ccb.serializer.Schedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccb.balancer == nil {
- return
- }
- ccb.balancer.ExitIdle()
- })
-}
-
-func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- ccb.cc.mu.Lock()
- defer ccb.cc.mu.Unlock()
-
- ccb.mu.Lock()
- if ccb.closed {
- ccb.mu.Unlock()
- return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
- }
- ccb.mu.Unlock()
-
- if len(addrs) == 0 {
- return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
- }
- ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
- if err != nil {
- channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
- return nil, err
- }
- acbw := &acBalancerWrapper{
- ccb: ccb,
- ac: ac,
- producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
- stateListener: opts.StateListener,
- }
- ac.acbw = acbw
- return acbw, nil
-}
-
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- // The graceful switch balancer will never call this.
- logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
-}
-
-func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
- acbw, ok := sc.(*acBalancerWrapper)
- if !ok {
- return
- }
- acbw.UpdateAddresses(addrs)
-}
-
-func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
- ccb.cc.mu.Lock()
- defer ccb.cc.mu.Unlock()
-
- ccb.mu.Lock()
- if ccb.closed {
- ccb.mu.Unlock()
- return
- }
- ccb.mu.Unlock()
- // Update picker before updating state. Even though the ordering here does
- // not matter, it can lead to multiple calls of Pick in the common start-up
- // case where we wait for ready and then perform an RPC. If the picker is
- // updated later, we could call the "connecting" picker when the state is
- // updated, and then call the "ready" picker after the picker gets updated.
-
- // Note that there is no need to check if the balancer wrapper was closed,
- // as we know the graceful switch LB policy will not call cc if it has been
- // closed.
- ccb.cc.pickerWrapper.updatePicker(s.Picker)
- ccb.cc.csMgr.updateState(s.ConnectivityState)
-}
-
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
- ccb.cc.mu.RLock()
- defer ccb.cc.mu.RUnlock()
-
- ccb.mu.Lock()
- if ccb.closed {
- ccb.mu.Unlock()
- return
- }
- ccb.mu.Unlock()
- ccb.cc.resolveNowLocked(o)
-}
-
-func (ccb *ccBalancerWrapper) Target() string {
- return ccb.cc.target
-}
-
-// acBalancerWrapper is a wrapper on top of ac for balancers.
-// It implements balancer.SubConn interface.
-type acBalancerWrapper struct {
- ac *addrConn // read-only
- ccb *ccBalancerWrapper // read-only
- stateListener func(balancer.SubConnState)
-
- mu sync.Mutex
- producers map[balancer.ProducerBuilder]*refCountedProducer
-}
-
-// updateState is invoked by grpc to push a subConn state update to the
-// underlying balancer.
-func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
- acbw.ccb.serializer.Schedule(func(ctx context.Context) {
- if ctx.Err() != nil || acbw.ccb.balancer == nil {
- return
- }
- // Even though it is optional for balancers, gracefulswitch ensures
- // opts.StateListener is set, so this cannot ever be nil.
- // TODO: delete this comment when UpdateSubConnState is removed.
- acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
- })
-}
-
-func (acbw *acBalancerWrapper) String() string {
- return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
-}
-
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
- acbw.ac.updateAddrs(addrs)
-}
-
-func (acbw *acBalancerWrapper) Connect() {
- go acbw.ac.connect()
-}
-
-func (acbw *acBalancerWrapper) Shutdown() {
- acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
-}
-
-// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
-// ready, blocks until it is or ctx expires. Returns an error when the context
-// expires or the addrConn is shut down.
-func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- transport, err := acbw.ac.getTransport(ctx)
- if err != nil {
- return nil, err
- }
- return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
-}
-
-// Invoke performs a unary RPC. If the addrConn is not ready, returns
-// errSubConnNotReady.
-func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
- cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
- if err != nil {
- return err
- }
- if err := cs.SendMsg(args); err != nil {
- return err
- }
- return cs.RecvMsg(reply)
-}
-
-type refCountedProducer struct {
- producer balancer.Producer
- refs int // number of current refs to the producer
- close func() // underlying producer's close function
-}
-
-func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
-
- // Look up existing producer from this builder.
- pData := acbw.producers[pb]
- if pData == nil {
- // Not found; create a new one and add it to the producers map.
- p, close := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: close}
- acbw.producers[pb] = pData
- }
- // Account for this new reference.
- pData.refs++
-
- // Return a cleanup function wrapped in a OnceFunc to remove this reference
- // and delete the refCountedProducer from the map if the total reference
- // count goes to zero.
- unref := func() {
- acbw.mu.Lock()
- pData.refs--
- if pData.refs == 0 {
- defer pData.close() // Run outside the acbw mutex
- delete(acbw.producers, pb)
- }
- acbw.mu.Unlock()
- }
- return pData.producer, grpcsync.OnceFunc(unref)
-}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index e6f2625..429c389 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -33,7 +33,9 @@
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
@@ -46,9 +48,9 @@
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
+ _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
_ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
- _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
)
const (
@@ -117,80 +119,6 @@
}, nil
}
-// newClient returns a new client in idle mode.
-func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
- cc := &ClientConn{
- target: target,
- conns: make(map[*addrConn]struct{}),
- dopts: defaultDialOptions(),
- czData: new(channelzData),
- }
-
- cc.retryThrottler.Store((*retryThrottler)(nil))
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
- cc.ctx, cc.cancel = context.WithCancel(context.Background())
-
- // Apply dial options.
- disableGlobalOpts := false
- for _, opt := range opts {
- if _, ok := opt.(*disableGlobalDialOptions); ok {
- disableGlobalOpts = true
- break
- }
- }
-
- if !disableGlobalOpts {
- for _, opt := range globalDialOptions {
- opt.apply(&cc.dopts)
- }
- }
-
- for _, opt := range opts {
- opt.apply(&cc.dopts)
- }
- chainUnaryClientInterceptors(cc)
- chainStreamClientInterceptors(cc)
-
- if err := cc.validateTransportCredentials(); err != nil {
- return nil, err
- }
-
- if cc.dopts.defaultServiceConfigRawJSON != nil {
- scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
- if scpr.Err != nil {
- return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
- }
- cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
- }
- cc.mkp = cc.dopts.copts.KeepaliveParams
-
- // Register ClientConn with channelz.
- cc.channelzRegistration(target)
-
- // TODO: Ideally it should be impossible to error from this function after
- // channelz registration. This will require removing some channelz logs
- // from the following functions that can error. Errors can be returned to
- // the user, and successful logs can be emitted here, after the checks have
- // passed and channelz is subsequently registered.
-
- // Determine the resolver to use.
- if err := cc.parseTargetAndFindResolver(); err != nil {
- channelz.RemoveEntry(cc.channelzID)
- return nil, err
- }
- if err = cc.determineAuthority(); err != nil {
- channelz.RemoveEntry(cc.channelzID)
- return nil, err
- }
-
- cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
- cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
-
- cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
- cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
- return cc, nil
-}
-
// DialContext creates a client connection to the given target. By default, it's
// a non-blocking dial (the function won't wait for connections to be
// established, and connecting happens in the background). To make it a blocking
@@ -208,21 +136,50 @@
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
- cc, err := newClient(target, opts...)
- if err != nil {
- return nil, err
+ cc := &ClientConn{
+ target: target,
+ conns: make(map[*addrConn]struct{}),
+ dopts: defaultDialOptions(),
+ czData: new(channelzData),
}
- // We start the channel off in idle mode, but kick it out of idle now,
- // instead of waiting for the first RPC. Other gRPC implementations do wait
- // for the first RPC to kick the channel out of idle. But doing so would be
- // a major behavior change for our users who are used to seeing the channel
- // active after Dial.
+ // We start the channel off in idle mode, but kick it out of idle at the end
+ // of this method, instead of waiting for the first RPC. Other gRPC
+ // implementations do wait for the first RPC to kick the channel out of
+ // idle. But doing so would be a major behavior change for our users who are
+ // used to seeing the channel active after Dial.
//
// Taking this approach of kicking it out of idle at the end of this method
// allows us to share the code between channel creation and exiting idle
// mode. This will also make it easy for us to switch to starting the
- // channel off in idle, i.e. by making newClient exported.
+ // channel off in idle, if at all we ever get to do that.
+ cc.idlenessState = ccIdlenessStateIdle
+
+ cc.retryThrottler.Store((*retryThrottler)(nil))
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+ cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ cc.exitIdleCond = sync.NewCond(&cc.mu)
+
+ disableGlobalOpts := false
+ for _, opt := range opts {
+ if _, ok := opt.(*disableGlobalDialOptions); ok {
+ disableGlobalOpts = true
+ break
+ }
+ }
+
+ if !disableGlobalOpts {
+ for _, opt := range globalDialOptions {
+ opt.apply(&cc.dopts)
+ }
+ }
+
+ for _, opt := range opts {
+ opt.apply(&cc.dopts)
+ }
+
+ chainUnaryClientInterceptors(cc)
+ chainStreamClientInterceptors(cc)
defer func() {
if err != nil {
@@ -230,14 +187,28 @@
}
}()
- // This creates the name resolver, load balancer, etc.
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ // Register ClientConn with channelz.
+ cc.channelzRegistration(target)
+
+ cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
+
+ if err := cc.validateTransportCredentials(); err != nil {
return nil, err
}
- // Return now for non-blocking dials.
- if !cc.dopts.block {
- return cc, nil
+ if cc.dopts.defaultServiceConfigRawJSON != nil {
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+ if scpr.Err != nil {
+ return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
+ }
+ cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
+ }
+ cc.mkp = cc.dopts.copts.KeepaliveParams
+
+ if cc.dopts.copts.UserAgent != "" {
+ cc.dopts.copts.UserAgent += " " + grpcUA
+ } else {
+ cc.dopts.copts.UserAgent = grpcUA
}
if cc.dopts.timeout > 0 {
@@ -260,6 +231,49 @@
}
}()
+ if cc.dopts.bs == nil {
+ cc.dopts.bs = backoff.DefaultExponential
+ }
+
+ // Determine the resolver to use.
+ if err := cc.parseTargetAndFindResolver(); err != nil {
+ return nil, err
+ }
+ if err = cc.determineAuthority(); err != nil {
+ return nil, err
+ }
+
+ if cc.dopts.scChan != nil {
+ // Blocking wait for the initial service config.
+ select {
+ case sc, ok := <-cc.dopts.scChan:
+ if ok {
+ cc.sc = &sc
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
+ }
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ }
+ if cc.dopts.scChan != nil {
+ go cc.scWatcher()
+ }
+
+ // This creates the name resolver, load balancer, blocking picker etc.
+ if err := cc.exitIdleMode(); err != nil {
+ return nil, err
+ }
+
+ // Configure idleness support with configured idle timeout or default idle
+ // timeout duration. Idleness can be explicitly disabled by the user, by
+ // setting the dial option to 0.
+ cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger})
+
+ // Return early for non-blocking dials.
+ if !cc.dopts.block {
+ return cc, nil
+ }
+
// A blocking dial blocks until the clientConn is ready.
for {
s := cc.GetState()
@@ -306,8 +320,8 @@
type idler ClientConn
-func (i *idler) EnterIdleMode() {
- (*ClientConn)(i).enterIdleMode()
+func (i *idler) EnterIdleMode() error {
+ return (*ClientConn)(i).enterIdleMode()
}
func (i *idler) ExitIdleMode() error {
@@ -315,71 +329,117 @@
}
// exitIdleMode moves the channel out of idle mode by recreating the name
-// resolver and load balancer. This should never be called directly; use
-// cc.idlenessMgr.ExitIdleMode instead.
-func (cc *ClientConn) exitIdleMode() (err error) {
+// resolver and load balancer.
+func (cc *ClientConn) exitIdleMode() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return errConnClosing
}
+ if cc.idlenessState != ccIdlenessStateIdle {
+ channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
+ cc.mu.Unlock()
+ return nil
+ }
+
+ defer func() {
+ // When Close() and exitIdleMode() race against each other, one of the
+ // following two can happen:
+ // - Close() wins the race and runs first. exitIdleMode() runs after, and
+ // sees that the ClientConn is already closed and hence returns early.
+ // - exitIdleMode() wins the race and runs first and recreates the balancer
+ // and releases the lock before recreating the resolver. If Close() runs
+ // in this window, it will wait for exitIdleMode to complete.
+ //
+ // We achieve this synchronization using the below condition variable.
+ cc.mu.Lock()
+ cc.idlenessState = ccIdlenessStateActive
+ cc.exitIdleCond.Signal()
+ cc.mu.Unlock()
+ }()
+
+ cc.idlenessState = ccIdlenessStateExitingIdle
+ exitedIdle := false
+ if cc.blockingpicker == nil {
+ cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+ } else {
+ cc.blockingpicker.exitIdleMode()
+ exitedIdle = true
+ }
+
+ var credsClone credentials.TransportCredentials
+ if creds := cc.dopts.copts.TransportCredentials; creds != nil {
+ credsClone = creds.Clone()
+ }
+ if cc.balancerWrapper == nil {
+ cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
+ DialCreds: credsClone,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ Authority: cc.authority,
+ CustomUserAgent: cc.dopts.copts.UserAgent,
+ ChannelzParentID: cc.channelzID,
+ Target: cc.parsedTarget,
+ })
+ } else {
+ cc.balancerWrapper.exitIdleMode()
+ }
+ cc.firstResolveEvent = grpcsync.NewEvent()
cc.mu.Unlock()
// This needs to be called without cc.mu because this builds a new resolver
- // which might update state or report error inline, which would then need to
- // acquire cc.mu.
- if err := cc.resolverWrapper.start(); err != nil {
+ // which might update state or report error inline which needs to be handled
+ // by cc.updateResolverState() which also grabs cc.mu.
+ if err := cc.initResolverWrapper(credsClone); err != nil {
return err
}
- cc.addTraceEvent("exiting idle mode")
+ if exitedIdle {
+ cc.addTraceEvent("exiting idle mode")
+ }
return nil
}
-// initIdleStateLocked initializes common state to how it should be while idle.
-func (cc *ClientConn) initIdleStateLocked() {
- cc.resolverWrapper = newCCResolverWrapper(cc)
- cc.balancerWrapper = newCCBalancerWrapper(cc)
- cc.firstResolveEvent = grpcsync.NewEvent()
+// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
+// name resolver, load balancer and any subchannels.
+func (cc *ClientConn) enterIdleMode() error {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ if cc.conns == nil {
+ return ErrClientConnClosing
+ }
+ if cc.idlenessState != ccIdlenessStateActive {
+ channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
+ return nil
+ }
+
// cc.conns == nil is a proxy for the ClientConn being closed. So, instead
// of setting it to nil here, we recreate the map. This also means that we
// don't have to do this when exiting idle mode.
- cc.conns = make(map[*addrConn]struct{})
-}
-
-// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
-// name resolver, load balancer, and any subchannels. This should never be
-// called directly; use cc.idlenessMgr.EnterIdleMode instead.
-func (cc *ClientConn) enterIdleMode() {
- cc.mu.Lock()
-
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
-
conns := cc.conns
+ cc.conns = make(map[*addrConn]struct{})
- rWrapper := cc.resolverWrapper
- rWrapper.close()
- cc.pickerWrapper.reset()
- bWrapper := cc.balancerWrapper
- bWrapper.close()
+ // TODO: Currently, we close the resolver wrapper upon entering idle mode
+ // and create a new one upon exiting idle mode. This means that the
+ // `cc.resolverWrapper` field would be overwritten everytime we exit idle
+ // mode. While this means that we need to hold `cc.mu` when accessing
+ // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
+ // try to do the same for the balancer and picker wrappers too.
+ cc.resolverWrapper.close()
+ cc.blockingpicker.enterIdleMode()
+ cc.balancerWrapper.enterIdleMode()
cc.csMgr.updateState(connectivity.Idle)
+ cc.idlenessState = ccIdlenessStateIdle
cc.addTraceEvent("entering idle mode")
- cc.initIdleStateLocked()
+ go func() {
+ for ac := range conns {
+ ac.tearDown(errConnIdling)
+ }
+ }()
- cc.mu.Unlock()
-
- // Block until the name resolver and LB policy are closed.
- <-rWrapper.serializer.Done()
- <-bWrapper.serializer.Done()
-
- // Close all subchannels after the LB policy is closed.
- for ac := range conns {
- ac.tearDown(errConnIdling)
- }
+ return nil
}
// validateTransportCredentials performs a series of checks on the configured
@@ -589,35 +649,66 @@
dopts dialOptions // Default and user specified dial options.
channelzID *channelz.Identifier // Channelz identifier for the channel.
resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
- idlenessMgr *idle.Manager
+ balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
+ idlenessMgr idle.Manager
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
csMgr *connectivityStateManager
- pickerWrapper *pickerWrapper
+ blockingpicker *pickerWrapper
safeConfigSelector iresolver.SafeConfigSelector
czData *channelzData
retryThrottler atomic.Value // Updated from service config.
+ // firstResolveEvent is used to track whether the name resolver sent us at
+ // least one update. RPCs block on this event.
+ firstResolveEvent *grpcsync.Event
+
// mu protects the following fields.
// TODO: split mu so the same mutex isn't used for everything.
mu sync.RWMutex
- resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close.
- balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
+ resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
- // firstResolveEvent is used to track whether the name resolver sent us at
- // least one update. RPCs block on this event. May be accessed without mu
- // if we know we cannot be asked to enter idle mode while accessing it (e.g.
- // when the idle manager has already been closed, or if we are already
- // entering idle mode).
- firstResolveEvent *grpcsync.Event
+ idlenessState ccIdlenessState // Tracks idleness state of the channel.
+ exitIdleCond *sync.Cond // Signalled when channel exits idle.
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
}
+// ccIdlenessState tracks the idleness state of the channel.
+//
+// Channels start off in `active` and move to `idle` after a period of
+// inactivity. When moving back to `active` upon an incoming RPC, they
+// transition through `exiting_idle`. This state is useful for synchronization
+// with Close().
+//
+// This state tracking is mostly for self-protection. The idlenessManager is
+// expected to keep track of the state as well, and is expected not to call into
+// the ClientConn unnecessarily.
+type ccIdlenessState int8
+
+const (
+ ccIdlenessStateActive ccIdlenessState = iota
+ ccIdlenessStateIdle
+ ccIdlenessStateExitingIdle
+)
+
+func (s ccIdlenessState) String() string {
+ switch s {
+ case ccIdlenessStateActive:
+ return "active"
+ case ccIdlenessStateIdle:
+ return "idle"
+ case ccIdlenessStateExitingIdle:
+ return "exitingIdle"
+ default:
+ return "unknown"
+ }
+}
+
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
@@ -657,15 +748,29 @@
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
- cc.addTraceEvent(err.Error())
- return
- }
+ cc.exitIdleMode()
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
- cc.mu.Lock()
- cc.balancerWrapper.exitIdle()
- cc.mu.Unlock()
+ cc.balancerWrapper.exitIdleMode()
+}
+
+func (cc *ClientConn) scWatcher() {
+ for {
+ select {
+ case sc, ok := <-cc.dopts.scChan:
+ if !ok {
+ return
+ }
+ cc.mu.Lock()
+ // TODO: load balance policy runtime change is ignored.
+ // We may revisit this decision in the future.
+ cc.sc = &sc
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
+ cc.mu.Unlock()
+ case <-cc.ctx.Done():
+ return
+ }
+ }
}
// waitForResolvedAddrs blocks until the resolver has provided addresses or the
@@ -699,11 +804,11 @@
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
return cc.csMgr.pubSub.Subscribe(s)
}
- internal.EnterIdleModeForTesting = func(cc *ClientConn) {
- cc.idlenessMgr.EnterIdleModeForTesting()
+ internal.EnterIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.enterIdleMode()
}
internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
- return cc.idlenessMgr.ExitIdleMode()
+ return cc.exitIdleMode()
}
}
@@ -719,8 +824,9 @@
}
}
-func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
+func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
defer cc.firstResolveEvent.Fire()
+ cc.mu.Lock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
@@ -766,7 +872,7 @@
if cc.sc == nil {
// Apply the failing LB only if we haven't received valid service config
// from the name resolver in the past.
- cc.applyFailingLBLocked(s.ServiceConfig)
+ cc.applyFailingLB(s.ServiceConfig)
cc.mu.Unlock()
return ret
}
@@ -788,13 +894,15 @@
return ret
}
-// applyFailingLBLocked is akin to configuring an LB policy on the channel which
+// applyFailingLB is akin to configuring an LB policy on the channel which
// always fails RPCs. Here, an actual LB policy is not configured, but an always
// erroring picker is configured, which returns errors with information about
// what was invalid in the received service config. A config selector with no
// service config is configured, and the connectivity state of the channel is
// set to TransientFailure.
-func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
+//
+// Caller must hold cc.mu.
+func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
var err error
if sc.Err != nil {
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
@@ -802,10 +910,14 @@
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
}
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
- cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
+ cc.blockingpicker.updatePicker(base.NewErrPicker(err))
cc.csMgr.updateState(connectivity.TransientFailure)
}
+func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
+ cc.balancerWrapper.updateSubConnState(sc, s, err)
+}
+
// Makes a copy of the input addresses slice and clears out the balancer
// attributes field. Addresses are passed during subconn creation and address
// update operations. In both cases, we will clear the balancer attributes by
@@ -820,14 +932,10 @@
return out
}
-// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
+// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
- if cc.conns == nil {
- return nil, ErrClientConnClosing
- }
-
+func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
@@ -839,6 +947,12 @@
stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+ // Track ac in cc. This needs to be done before any getTransport(...) is called.
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ if cc.conns == nil {
+ return nil, ErrClientConnClosing
+ }
var err error
ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
@@ -854,7 +968,6 @@
},
})
- // Track ac in cc. This needs to be done before any getTransport(...) is called.
cc.conns[ac] = struct{}{}
return ac, nil
}
@@ -1061,7 +1174,7 @@
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
- return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
+ return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
Ctx: ctx,
FullMethodName: method,
})
@@ -1103,12 +1216,12 @@
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
cc.mu.RLock()
- cc.resolverWrapper.resolveNow(o)
+ r := cc.resolverWrapper
cc.mu.RUnlock()
-}
-
-func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
- cc.resolverWrapper.resolveNow(o)
+ if r == nil {
+ return
+ }
+ go r.resolveNow(o)
}
// ResetConnectBackoff wakes up all subchannels in transient failure and causes
@@ -1140,32 +1253,40 @@
<-cc.csMgr.pubSub.Done()
}()
- // Prevent calls to enter/exit idle immediately, and ensure we are not
- // currently entering/exiting idle mode.
- cc.idlenessMgr.Close()
-
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return ErrClientConnClosing
}
+ for cc.idlenessState == ccIdlenessStateExitingIdle {
+ cc.exitIdleCond.Wait()
+ }
+
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
- // We can safely unlock and continue to access all fields now as
- // cc.conns==nil, preventing any further operations on cc.
+ pWrapper := cc.blockingpicker
+ rWrapper := cc.resolverWrapper
+ bWrapper := cc.balancerWrapper
+ idlenessMgr := cc.idlenessMgr
cc.mu.Unlock()
- cc.resolverWrapper.close()
// The order of closing matters here since the balancer wrapper assumes the
// picker is closed before it is closed.
- cc.pickerWrapper.close()
- cc.balancerWrapper.close()
-
- <-cc.resolverWrapper.serializer.Done()
- <-cc.balancerWrapper.serializer.Done()
+ if pWrapper != nil {
+ pWrapper.close()
+ }
+ if bWrapper != nil {
+ bWrapper.close()
+ }
+ if rWrapper != nil {
+ rWrapper.close()
+ }
+ if idlenessMgr != nil {
+ idlenessMgr.Close()
+ }
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
@@ -1186,7 +1307,7 @@
cc *ClientConn
dopts dialOptions
- acbw *acBalancerWrapper
+ acbw balancer.SubConn
scopts balancer.NewSubConnOptions
// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
@@ -1224,7 +1345,7 @@
} else {
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.acbw.updateState(s, lastErr)
+ ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1728,7 +1849,7 @@
if err != nil {
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
} else {
- channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget)
+ channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
@@ -1886,3 +2007,32 @@
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
return nil
}
+
+// initResolverWrapper creates a ccResolverWrapper, which builds the name
+// resolver. This method grabs the lock to assign the newly built resolver
+// wrapper to the cc.resolverWrapper field.
+func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
+ rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
+ target: cc.parsedTarget,
+ builder: cc.resolverBuilder,
+ bOpts: resolver.BuildOptions{
+ DisableServiceConfig: cc.dopts.disableServiceConfig,
+ DialCreds: creds,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ },
+ channelzID: cc.channelzID,
+ })
+ if err != nil {
+ return fmt.Errorf("failed to build resolver: %v", err)
+ }
+ // Resolver implementations may report state update or error inline when
+ // built (or right after), and this is handled in cc.updateResolverState.
+ // Also, an error from the resolver might lead to a re-resolution request
+ // from the balancer, which is handled in resolveNow() where
+ // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
+ cc.mu.Lock()
+ cc.resolverWrapper = rw
+ cc.mu.Unlock()
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 08476ad..11b1061 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -25,13 +25,7 @@
"strconv"
)
-// A Code is a status code defined according to the [gRPC documentation].
-//
-// Only the codes defined as consts in this package are valid codes. Do not use
-// other code values. Behavior of other codes is implementation-specific and
-// interoperability between implementations is not guaranteed.
-//
-// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
+// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
type Code uint32
const (
diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go
index afcdb8a..579adf2 100644
--- a/vendor/google.golang.org/grpc/credentials/alts/alts.go
+++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go
@@ -43,7 +43,7 @@
const (
// hypervisorHandshakerServiceAddress represents the default ALTS gRPC
// handshaker service address in the hypervisor.
- hypervisorHandshakerServiceAddress = "dns:///metadata.google.internal.:8080"
+ hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080"
// defaultTimeout specifies the server handshake timeout.
defaultTimeout = 30.0 * time.Second
// The following constants specify the minimum and maximum acceptable
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 5dafd34..877b7cd 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -44,25 +44,10 @@
return "tls"
}
-// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
-func cipherSuiteLookup(cipherSuiteID uint16) string {
- for _, s := range tls.CipherSuites() {
- if s.ID == cipherSuiteID {
- return s.Name
- }
- }
- for _, s := range tls.InsecureCipherSuites() {
- if s.ID == cipherSuiteID {
- return s.Name
- }
- }
- return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
-}
-
// GetSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup(t.State.CipherSuite),
+ StandardName: cipherSuiteLookup[t.State.CipherSuite],
}
// Currently there's no way to get LocalCertificate info from tls package.
if len(t.State.PeerCertificates) > 0 {
@@ -153,39 +138,10 @@
return nil
}
-// The following cipher suites are forbidden for use with HTTP/2 by
-// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
-var tls12ForbiddenCipherSuites = map[uint16]struct{}{
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: {},
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: {},
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {},
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {},
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {},
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {},
-}
-
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
- // If the user did not configure a MinVersion and did not configure a
- // MaxVersion < 1.2, use MinVersion=1.2, which is required by
- // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
- if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
- tc.config.MinVersion = tls.VersionTLS12
- }
- // If the user did not configure CipherSuites, use all "secure" cipher
- // suites reported by the TLS package, but remove some explicitly forbidden
- // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
- if tc.config.CipherSuites == nil {
- for _, cs := range tls.CipherSuites() {
- if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
- tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
- }
- }
- }
return tc
}
@@ -249,3 +205,32 @@
LocalCertificate []byte
RemoteCertificate []byte
}
+
+var cipherSuiteLookup = map[uint16]string{
+ tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
+ tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
+ tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
+ tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
+ tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
+ tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
+ tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
+}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index ba24261..cfc9fd8 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -46,7 +46,6 @@
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
- internal.WithRecvBufferPool = withRecvBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -64,6 +63,7 @@
block bool
returnLastError bool
timeout time.Duration
+ scChan <-chan ServiceConfig
authority string
binaryLogger binarylog.Logger
copts transport.ConnectOptions
@@ -250,6 +250,19 @@
})
}
+// WithServiceConfig returns a DialOption which has a channel to read the
+// service configuration.
+//
+// Deprecated: service config should be received through name resolver or via
+// WithDefaultServiceConfig, as specified at
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
+// removed in a future 1.x release.
+func WithServiceConfig(c <-chan ServiceConfig) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.scChan = c
+ })
+}
+
// WithConnectParams configures the ClientConn to use the provided ConnectParams
// for creating and maintaining connections to servers.
//
@@ -400,17 +413,6 @@
// connections. If FailOnNonTempDialError() is set to true, and an error is
// returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
-//
-// Note: All supported releases of Go (as of December 2023) override the OS
-// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
-// with OS defaults for keepalive time and interval, use a net.Dialer that sets
-// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
-// option to true from the Control field. For a concrete example of how to do
-// this, see internal.NetDialerWithTCPKeepalive().
-//
-// For more information, please see [issue 23459] in the Go github repo.
-//
-// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.Dialer = f
@@ -485,7 +487,7 @@
// the RPCs.
func WithUserAgent(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s + " " + grpcUA
+ o.copts.UserAgent = s
})
}
@@ -635,16 +637,14 @@
func defaultDialOptions() dialOptions {
return dialOptions{
- copts: transport.ConnectOptions{
- ReadBufferSize: defaultReadBufSize,
- WriteBufferSize: defaultWriteBufSize,
- UseProxy: true,
- UserAgent: grpcUA,
- },
- bs: internalbackoff.DefaultExponential,
healthCheckFunc: internal.HealthCheckFunc,
- idleTimeout: 30 * time.Minute,
- recvBufferPool: nopBufferPool{},
+ copts: transport.ConnectOptions{
+ WriteBufferSize: defaultWriteBufSize,
+ ReadBufferSize: defaultReadBufSize,
+ UseProxy: true,
+ },
+ recvBufferPool: nopBufferPool{},
+ idleTimeout: 30 * time.Minute,
}
}
@@ -705,13 +705,11 @@
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
// cases, the shared buffer pool will be ignored.
//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
- return withRecvBufferPool(bufferPool)
-}
-
-func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.recvBufferPool = bufferPool
})
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 11f9166..4399c3d 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -18,10 +18,7 @@
// Package buffer provides an implementation of an unbounded buffer.
package buffer
-import (
- "errors"
- "sync"
-)
+import "sync"
// Unbounded is an implementation of an unbounded buffer which does not use
// extra goroutines. This is typically used for passing updates from one entity
@@ -39,7 +36,6 @@
type Unbounded struct {
c chan any
closed bool
- closing bool
mu sync.Mutex
backlog []any
}
@@ -49,32 +45,32 @@
return &Unbounded{c: make(chan any, 1)}
}
-var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
-
// Put adds t to the unbounded buffer.
-func (b *Unbounded) Put(t any) error {
+func (b *Unbounded) Put(t any) {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closing {
- return errBufferClosed
+ if b.closed {
+ return
}
if len(b.backlog) == 0 {
select {
case b.c <- t:
- return nil
+ return
default:
}
}
b.backlog = append(b.backlog, t)
- return nil
}
-// Load sends the earliest buffered data, if any, onto the read channel returned
-// by Get(). Users are expected to call this every time they successfully read a
+// Load sends the earliest buffered data, if any, onto the read channel
+// returned by Get(). Users are expected to call this every time they read a
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
defer b.mu.Unlock()
+ if b.closed {
+ return
+ }
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@@ -82,8 +78,6 @@
b.backlog = b.backlog[1:]
default:
}
- } else if b.closing && !b.closed {
- close(b.c)
}
}
@@ -94,23 +88,18 @@
// send the next buffered value onto the channel if there is any.
//
// If the unbounded buffer is closed, the read channel returned by this method
-// is closed after all data is drained.
+// is closed.
func (b *Unbounded) Get() <-chan any {
return b.c
}
-// Close closes the unbounded buffer. No subsequent data may be Put(), and the
-// channel returned from Get() will be closed after all the data is read and
-// Load() is called for the final time.
+// Close closes the unbounded buffer.
func (b *Unbounded) Close() {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closing {
+ if b.closed {
return
}
- b.closing = true
- if len(b.backlog) == 0 {
- b.closed = true
- close(b.c)
- }
+ b.closed = true
+ close(b.c)
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index fc094f3..5395e77 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -31,7 +31,6 @@
"time"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
)
const (
@@ -59,12 +58,6 @@
}
}
-func init() {
- internal.ChannelzTurnOffForTesting = func() {
- atomic.StoreInt32(&curState, 0)
- }
-}
-
// IsOn returns whether channelz data collection is on.
func IsOn() bool {
return atomic.LoadInt32(&curState) == 1
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 685a3cb..3cf10dd 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -36,6 +36,9 @@
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
+ // PickFirstLBConfig is set if we should support configuration of the
+ // pick_first LB policy.
+ PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
// LeastRequestLB is set if we should support the least_request_experimental
// LB policy, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 29f234a..02b4b6a 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -50,7 +50,46 @@
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+ // XDSRingHash indicates whether ring hash support is enabled, which can be
+ // disabled by setting the environment variable
+ // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
+ XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
+ // XDSClientSideSecurity is used to control processing of security
+ // configuration on the client-side.
+ //
+ // Note that there is no env var protection for the server-side because we
+ // have a brand new API on the server-side and users explicitly need to use
+ // the new API to get security integration on the server.
+ XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
+ // XDSAggregateAndDNS indicates whether processing of aggregated cluster and
+ // DNS cluster is enabled, which can be disabled by setting the environment
+ // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
+ // to "false".
+ XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
+
+ // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
+ // which can be disabled by setting the environment variable
+ // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
+ XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
+ // XDSOutlierDetection indicates whether outlier detection support is
+ // enabled, which can be disabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
+ XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
+ // XDSFederation indicates whether federation support is enabled, which can
+ // be enabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
+ XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
+
+ // XDSRLS indicates whether processing of Cluster Specifier plugins and
+ // support for the RLS CLuster Specifier is enabled, which can be disabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
+ // "false".
+ XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+ // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
+ // can be disabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
+ XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
deleted file mode 100644
index 7f7044e..0000000
--- a/vendor/google.golang.org/grpc/internal/experimental.go
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-var (
- // WithRecvBufferPool is implemented by the grpc package and returns a dial
- // option to configure a shared buffer pool for a grpc.ClientConn.
- WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
-
- // RecvBufferPool is implemented by the grpc package and returns a server
- // option to configure a shared buffer pool for a grpc.Server.
- RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
-)
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index f7f40a1..900917d 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -20,6 +20,7 @@
import (
"context"
+ "sync"
"google.golang.org/grpc/internal/buffer"
)
@@ -37,6 +38,8 @@
done chan struct{}
callbacks *buffer.Unbounded
+ closedMu sync.Mutex
+ closed bool
}
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
@@ -62,34 +65,56 @@
// callbacks to be executed by the serializer. It is not possible to add
// callbacks once the context passed to NewCallbackSerializer is cancelled.
func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- return cs.callbacks.Put(f) == nil
+ cs.closedMu.Lock()
+ defer cs.closedMu.Unlock()
+
+ if cs.closed {
+ return false
+ }
+ cs.callbacks.Put(f)
+ return true
}
func (cs *CallbackSerializer) run(ctx context.Context) {
- defer close(cs.done)
+ var backlog []func(context.Context)
- // TODO: when Go 1.21 is the oldest supported version, this loop and Close
- // can be replaced with:
- //
- // context.AfterFunc(ctx, cs.callbacks.Close)
+ defer close(cs.done)
for ctx.Err() == nil {
select {
case <-ctx.Done():
// Do nothing here. Next iteration of the for loop will not happen,
// since ctx.Err() would be non-nil.
- case cb := <-cs.callbacks.Get():
+ case callback, ok := <-cs.callbacks.Get():
+ if !ok {
+ return
+ }
cs.callbacks.Load()
- cb.(func(context.Context))(ctx)
+ callback.(func(ctx context.Context))(ctx)
}
}
- // Close the buffer to prevent new callbacks from being added.
+ // Fetch pending callbacks if any, and execute them before returning from
+ // this method and closing cs.done.
+ cs.closedMu.Lock()
+ cs.closed = true
+ backlog = cs.fetchPendingCallbacks()
cs.callbacks.Close()
+ cs.closedMu.Unlock()
+ for _, b := range backlog {
+ b(ctx)
+ }
+}
- // Run all pending callbacks.
- for cb := range cs.callbacks.Get() {
- cs.callbacks.Load()
- cb.(func(context.Context))(ctx)
+func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
+ var backlog []func(context.Context)
+ for {
+ select {
+ case b := <-cs.callbacks.Get():
+ backlog = append(backlog, b.(func(context.Context)))
+ cs.callbacks.Load()
+ default:
+ return backlog
+ }
}
}
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index fe49cb7..6c27247 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -26,6 +26,8 @@
"sync"
"sync/atomic"
"time"
+
+ "google.golang.org/grpc/grpclog"
)
// For overriding in unit tests.
@@ -37,12 +39,27 @@
// and exit from idle mode.
type Enforcer interface {
ExitIdleMode() error
- EnterIdleMode()
+ EnterIdleMode() error
}
-// Manager implements idleness detection and calls the configured Enforcer to
-// enter/exit idle mode when appropriate. Must be created by NewManager.
-type Manager struct {
+// Manager defines the functionality required to track RPC activity on a
+// channel.
+type Manager interface {
+ OnCallBegin() error
+ OnCallEnd()
+ Close()
+}
+
+type noopManager struct{}
+
+func (noopManager) OnCallBegin() error { return nil }
+func (noopManager) OnCallEnd() {}
+func (noopManager) Close() {}
+
+// manager implements the Manager interface. It uses atomic operations to
+// synchronize access to shared state and a mutex to guarantee mutual exclusion
+// in a critical section.
+type manager struct {
// State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
@@ -52,7 +69,8 @@
// Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that.
enforcer Enforcer // Functionality provided by grpc.ClientConn.
- timeout time.Duration
+ timeout int64 // Idle timeout duration nanos stored as an int64.
+ logger grpclog.LoggerV2
// idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions:
@@ -70,48 +88,57 @@
timer *time.Timer
}
-// NewManager creates a new idleness manager implementation for the
-// given idle timeout. It begins in idle mode.
-func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
- return &Manager{
- enforcer: enforcer,
- timeout: timeout,
- actuallyIdle: true,
- activeCallsCount: -math.MaxInt32,
- }
+// ManagerOptions is a collection of options used by
+// NewManager.
+type ManagerOptions struct {
+ Enforcer Enforcer
+ Timeout time.Duration
+ Logger grpclog.LoggerV2
}
-// resetIdleTimerLocked resets the idle timer to the given duration. Called
-// when exiting idle mode or when the timer fires and we need to reset it.
-func (m *Manager) resetIdleTimerLocked(d time.Duration) {
- if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
+// NewManager creates a new idleness manager implementation for the
+// given idle timeout.
+func NewManager(opts ManagerOptions) Manager {
+ if opts.Timeout == 0 {
+ return noopManager{}
+ }
+
+ m := &manager{
+ enforcer: opts.Enforcer,
+ timeout: int64(opts.Timeout),
+ logger: opts.Logger,
+ }
+ m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
+ return m
+}
+
+// resetIdleTimer resets the idle timer to the given duration. This method
+// should only be called from the timer callback.
+func (m *manager) resetIdleTimer(d time.Duration) {
+ m.idleMu.Lock()
+ defer m.idleMu.Unlock()
+
+ if m.timer == nil {
+ // Only close sets timer to nil. We are done.
return
}
// It is safe to ignore the return value from Reset() because this method is
- // only ever called from the timer callback or when exiting idle mode.
- if m.timer != nil {
- m.timer.Stop()
- }
- m.timer = timeAfterFunc(d, m.handleIdleTimeout)
-}
-
-func (m *Manager) resetIdleTimer(d time.Duration) {
- m.idleMu.Lock()
- defer m.idleMu.Unlock()
- m.resetIdleTimerLocked(d)
+ // only ever called from the timer callback, which means the timer has
+ // already fired.
+ m.timer.Reset(d)
}
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
// configured idle timeout. The channel is considered inactive if there are no
// ongoing calls and no RPC activity since the last time the timer fired.
-func (m *Manager) handleIdleTimeout() {
+func (m *manager) handleIdleTimeout() {
if m.isClosed() {
return
}
if atomic.LoadInt32(&m.activeCallsCount) > 0 {
- m.resetIdleTimer(m.timeout)
+ m.resetIdleTimer(time.Duration(m.timeout))
return
}
@@ -121,12 +148,24 @@
// Set the timer to fire after a duration of idle timeout, calculated
// from the time the most recent RPC completed.
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
- m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
+ m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
return
}
- // Now that we've checked that there has been no activity, attempt to enter
- // idle mode, which is very likely to succeed.
+ // This CAS operation is extremely likely to succeed given that there has
+ // been no activity since the last time we were here. Setting the
+ // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
+ // channel is either in idle mode or is trying to get there.
+ if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
+ // This CAS operation can fail if an RPC started after we checked for
+ // activity at the top of this method, or one was ongoing from before
+ // the last time we were here. In both case, reset the timer and return.
+ m.resetIdleTimer(time.Duration(m.timeout))
+ return
+ }
+
+ // Now that we've set the active calls count to -math.MaxInt32, it's time to
+ // actually move to idle mode.
if m.tryEnterIdleMode() {
// Successfully entered idle mode. No timer needed until we exit idle.
return
@@ -135,7 +174,8 @@
// Failed to enter idle mode due to a concurrent RPC that kept the channel
// active, or because of an error from the channel. Undo the attempt to
// enter idle, and reset the timer to try again later.
- m.resetIdleTimer(m.timeout)
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+ m.resetIdleTimer(time.Duration(m.timeout))
}
// tryEnterIdleMode instructs the channel to enter idle mode. But before
@@ -145,49 +185,36 @@
// Return value indicates whether or not the channel moved to idle mode.
//
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
-func (m *Manager) tryEnterIdleMode() bool {
- // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
- // that the channel is either in idle mode or is trying to get there.
- if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
- // This CAS operation can fail if an RPC started after we checked for
- // activity in the timer handler, or one was ongoing from before the
- // last time the timer fired, or if a test is attempting to enter idle
- // mode without checking. In all cases, abort going into idle mode.
- return false
- }
- // N.B. if we fail to enter idle mode after this, we must re-add
- // math.MaxInt32 to m.activeCallsCount.
-
+func (m *manager) tryEnterIdleMode() bool {
m.idleMu.Lock()
defer m.idleMu.Unlock()
if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
// We raced and lost to a new RPC. Very rare, but stop entering idle.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
- // A very short RPC could have come in (and also finished) after we
+ // An very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
- // No new RPCs have come in since we set the active calls count value to
- // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
- // unconditionally now.
- m.enforcer.EnterIdleMode()
+ // No new RPCs have come in since we last set the active calls count value
+ // -math.MaxInt32 in the timer callback. And since we have the lock, it is
+ // safe to enter idle mode now.
+ if err := m.enforcer.EnterIdleMode(); err != nil {
+ m.logger.Errorf("Failed to enter idle mode: %v", err)
+ return false
+ }
+
+ // Successfully entered idle mode.
m.actuallyIdle = true
return true
}
-func (m *Manager) EnterIdleModeForTesting() {
- m.tryEnterIdleMode()
-}
-
// OnCallBegin is invoked at the start of every RPC.
-func (m *Manager) OnCallBegin() error {
+func (m *manager) OnCallBegin() error {
if m.isClosed() {
return nil
}
@@ -200,7 +227,7 @@
// Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC.
- if err := m.ExitIdleMode(); err != nil {
+ if err := m.exitIdleMode(); err != nil {
// Undo the increment to calls count, and return an error causing the
// RPC to fail.
atomic.AddInt32(&m.activeCallsCount, -1)
@@ -211,30 +238,28 @@
return nil
}
-// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
-// internal state.
-func (m *Manager) ExitIdleMode() error {
- // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
+// exitIdleMode instructs the channel to exit idle mode.
+//
+// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
+func (m *manager) exitIdleMode() error {
m.idleMu.Lock()
defer m.idleMu.Unlock()
- if m.isClosed() || !m.actuallyIdle {
- // This can happen in three scenarios:
+ if !m.actuallyIdle {
+ // This can happen in two scenarios:
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
// here. The first one to get the lock would got the channel to exit idle.
- // - Channel is not in idle mode, and the user calls Connect which calls
- // m.ExitIdleMode.
//
- // In any case, there is nothing to do here.
+ // Either way, nothing to do here.
return nil
}
if err := m.enforcer.ExitIdleMode(); err != nil {
- return fmt.Errorf("failed to exit idle mode: %w", err)
+ return fmt.Errorf("channel failed to exit idle mode: %v", err)
}
// Undo the idle entry process. This also respects any new RPC attempts.
@@ -242,12 +267,12 @@
m.actuallyIdle = false
// Start a new timer to fire after the configured idle timeout.
- m.resetIdleTimerLocked(m.timeout)
+ m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
return nil
}
// OnCallEnd is invoked at the end of every RPC.
-func (m *Manager) OnCallEnd() {
+func (m *manager) OnCallEnd() {
if m.isClosed() {
return
}
@@ -262,17 +287,15 @@
atomic.AddInt32(&m.activeCallsCount, -1)
}
-func (m *Manager) isClosed() bool {
+func (m *manager) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
-func (m *Manager) Close() {
+func (m *manager) Close() {
atomic.StoreInt32(&m.closed, 1)
m.idleMu.Lock()
- if m.timer != nil {
- m.timer.Stop()
- m.timer = nil
- }
+ m.timer.Stop()
+ m.timer = nil
m.idleMu.Unlock()
}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 2549fe8..0d94c63 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -73,11 +73,6 @@
// xDS-enabled server invokes this method on a grpc.Server when a particular
// listener moves to "not-serving" mode.
DrainServerTransports any // func(*grpc.Server, string)
- // IsRegisteredMethod returns whether the passed in method is registered as
- // a method on the server.
- IsRegisteredMethod any // func(*grpc.Server, string) bool
- // ServerFromContext returns the server from the context.
- ServerFromContext any // func(context.Context) *grpc.Server
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
@@ -182,12 +177,10 @@
GRPCResolverSchemeExtraMetadata string = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
- EnterIdleModeForTesting any // func(*grpc.ClientConn)
+ EnterIdleModeForTesting any // func(*grpc.ClientConn) error
// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
-
- ChannelzTurnOffForTesting func()
)
// HealthChecker defines the signature of the client-side LB channel health checking function.
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index b66dcb2..99e1e5b 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -23,6 +23,7 @@
import (
"context"
"encoding/json"
+ "errors"
"fmt"
"net"
"os"
@@ -36,7 +37,6 @@
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/internal/resolver/dns/internal"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
@@ -47,11 +47,15 @@
var logger = grpclog.Component("dns")
+// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
+// single variable for testing the resolver?
+var (
+ newTimer = time.NewTimer
+ newTimerDNSResRate = time.NewTimer
+)
+
func init() {
resolver.Register(NewBuilder())
- internal.TimeAfterFunc = time.After
- internal.NewNetResolver = newNetResolver
- internal.AddressDialer = addressDialer
}
const (
@@ -66,6 +70,23 @@
txtAttribute = "grpc_config="
)
+var (
+ errMissingAddr = errors.New("dns resolver: missing address")
+
+ // Addresses ending with a colon that is supposed to be the separator
+ // between host and port is not allowed. E.g. "::" is a valid address as
+ // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
+ // a colon as the host and port separator
+ errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+var (
+ defaultResolver netResolver = net.DefaultResolver
+ // To prevent excessive re-resolution, we enforce a rate limit on DNS
+ // resolution requests.
+ minDNSResRate = 30 * time.Second
+)
+
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
return func(ctx context.Context, network, _ string) (net.Conn, error) {
var dialer net.Dialer
@@ -73,11 +94,7 @@
}
}
-var newNetResolver = func(authority string) (internal.NetResolver, error) {
- if authority == "" {
- return net.DefaultResolver, nil
- }
-
+var newNetResolver = func(authority string) (netResolver, error) {
host, port, err := parseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
@@ -87,7 +104,7 @@
return &net.Resolver{
PreferGo: true,
- Dial: internal.AddressDialer(authorityWithPort),
+ Dial: addressDialer(authorityWithPort),
}, nil
}
@@ -125,9 +142,13 @@
disableServiceConfig: opts.DisableServiceConfig,
}
- d.resolver, err = internal.NewNetResolver(target.URL.Host)
- if err != nil {
- return nil, err
+ if target.URL.Host == "" {
+ d.resolver = defaultResolver
+ } else {
+ d.resolver, err = newNetResolver(target.URL.Host)
+ if err != nil {
+ return nil, err
+ }
}
d.wg.Add(1)
@@ -140,6 +161,12 @@
return "dns"
}
+type netResolver interface {
+ LookupHost(ctx context.Context, host string) (addrs []string, err error)
+ LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+ LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
// deadResolver is a resolver that does nothing.
type deadResolver struct{}
@@ -151,7 +178,7 @@
type dnsResolver struct {
host string
port string
- resolver internal.NetResolver
+ resolver netResolver
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
@@ -196,27 +223,29 @@
err = d.cc.UpdateState(*state)
}
- var waitTime time.Duration
+ var timer *time.Timer
if err == nil {
// Success resolving, wait for the next ResolveNow. However, also wait 30
// seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
- waitTime = internal.MinResolutionRate
+ timer = newTimerDNSResRate(minDNSResRate)
select {
case <-d.ctx.Done():
+ timer.Stop()
return
case <-d.rn:
}
} else {
// Poll on an error found in DNS Resolver or an error received from
// ClientConn.
- waitTime = backoff.DefaultExponential.Backoff(backoffIndex)
+ timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
backoffIndex++
}
select {
case <-d.ctx.Done():
+ timer.Stop()
return
- case <-internal.TimeAfterFunc(waitTime):
+ case <-timer.C:
}
}
}
@@ -358,7 +387,7 @@
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
func parseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
- return "", "", internal.ErrMissingAddr
+ return "", "", errMissingAddr
}
if ip := net.ParseIP(target); ip != nil {
// target is an IPv4 or IPv6(without brackets) address
@@ -368,7 +397,7 @@
if port == "" {
// If the port field is empty (target ends with colon), e.g. "[::1]:",
// this is an error.
- return "", "", internal.ErrEndsWithColon
+ return "", "", errEndsWithColon
}
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
deleted file mode 100644
index c7fc557..0000000
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains functionality internal to the dns resolver package.
-package internal
-
-import (
- "context"
- "errors"
- "net"
- "time"
-)
-
-// NetResolver groups the methods on net.Resolver that are used by the DNS
-// resolver implementation. This allows the default net.Resolver instance to be
-// overidden from tests.
-type NetResolver interface {
- LookupHost(ctx context.Context, host string) (addrs []string, err error)
- LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
- LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
-
-var (
- // ErrMissingAddr is the error returned when building a DNS resolver when
- // the provided target name is empty.
- ErrMissingAddr = errors.New("dns resolver: missing address")
-
- // ErrEndsWithColon is the error returned when building a DNS resolver when
- // the provided target name ends with a colon that is supposed to be the
- // separator between host and port. E.g. "::" is a valid address as it is
- // an IPv6 address (host only) and "[::]:" is invalid as it ends with a
- // colon as the host and port separator
- ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-// The following vars are overridden from tests.
-var (
- // MinResolutionRate is the minimum rate at which re-resolutions are
- // allowed. This helps to prevent excessive re-resolution.
- MinResolutionRate = 30 * time.Second
-
- // TimeAfterFunc is used by the DNS resolver to wait for the given duration
- // to elapse. In non-test code, this is implemented by time.After. In test
- // code, this can be used to control the amount of time the resolver is
- // blocked waiting for the duration to elapse.
- TimeAfterFunc func(time.Duration) <-chan time.Time
-
- // NewNetResolver returns the net.Resolver instance for the given target.
- NewNetResolver func(string) (NetResolver, error)
-
- // AddressDialer is the dialer used to dial the DNS server. It accepts the
- // Host portion of the URL corresponding to the user's dial target and
- // returns a dial function.
- AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
-)
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go
deleted file mode 100644
index aeffd3e..0000000
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//go:build !unix
-
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "net"
-)
-
-// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
-func NetDialerWithTCPKeepalive() *net.Dialer {
- return &net.Dialer{}
-}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
deleted file mode 100644
index 078137b..0000000
--- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
+++ /dev/null
@@ -1,54 +0,0 @@
-//go:build unix
-
-/*
- * Copyright 2023 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "net"
- "syscall"
- "time"
-
- "golang.org/x/sys/unix"
-)
-
-// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
-// the underlying connection with OS default values for keepalive parameters.
-//
-// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
-// appropriate Go version becomes less than our least supported Go version, we
-// should look into using the new API to make things more straightforward.
-func NetDialerWithTCPKeepalive() *net.Dialer {
- return &net.Dialer{
- // Setting a negative value here prevents the Go stdlib from overriding
- // the values of TCP keepalive time and interval. It also prevents the
- // Go stdlib from enabling TCP keepalives by default.
- KeepAlive: time.Duration(-1),
- // This method is called after the underlying network socket is created,
- // but before dialing the socket (or calling its connect() method). The
- // combination of unconditionally enabling TCP keepalives here, and
- // disabling the overriding of TCP keepalive parameters by setting the
- // KeepAlive field to a negative value above, results in OS defaults for
- // the TCP keealive interval and time parameters.
- Control: func(_, _ string, c syscall.RawConn) error {
- return c.Control(func(fd uintptr) {
- unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
- })
- },
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index a9d70e2..17f7a21 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -75,25 +75,11 @@
return nil, errors.New(msg)
}
- var localAddr net.Addr
- if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
- localAddr, _ = la.(net.Addr)
- }
- var authInfo credentials.AuthInfo
- if r.TLS != nil {
- authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
- }
- p := peer.Peer{
- Addr: strAddr(r.RemoteAddr),
- LocalAddr: localAddr,
- AuthInfo: authInfo,
- }
st := &serverHandlerTransport{
rw: w,
req: r,
closedCh: make(chan struct{}),
writes: make(chan func()),
- peer: p,
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
@@ -148,8 +134,6 @@
headerMD metadata.MD
- peer peer.Peer
-
closeOnce sync.Once
closedCh chan struct{} // closed on Close
@@ -181,13 +165,7 @@
})
}
-func (ht *serverHandlerTransport) Peer() *peer.Peer {
- return &peer.Peer{
- Addr: ht.peer.Addr,
- LocalAddr: ht.peer.LocalAddr,
- AuthInfo: ht.peer.AuthInfo,
- }
-}
+func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
// the empty string if unknown.
@@ -369,8 +347,10 @@
return err
}
-func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
+
+ ctx := ht.req.Context()
var cancel context.CancelFunc
if ht.timeoutSet {
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@@ -390,19 +370,34 @@
ht.Close(errors.New("request is done processing"))
}()
- ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
+
s := &Stream{
- id: 0, // irrelevant
- ctx: ctx,
- requestRead: func(int) {},
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
- headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
+ id: 0, // irrelevant
+ requestRead: func(int) {},
+ cancel: cancel,
+ buf: newRecvBuffer(),
+ st: ht,
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
+ contentSubtype: ht.contentSubtype,
+ }
+ pr := &peer.Peer{
+ Addr: ht.RemoteAddr(),
+ }
+ if req.TLS != nil {
+ pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+ }
+ ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
+ s.ctx = peer.NewContext(ctx, pr)
+ for _, sh := range ht.stats {
+ s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+ inHeader := &stats.InHeader{
+ FullMethod: s.method,
+ RemoteAddr: ht.RemoteAddr(),
+ Compression: s.recvCompress,
+ }
+ sh.HandleRPC(s.ctx, inHeader)
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 59f6765..d6f5c49 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -36,7 +36,6 @@
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/internal/grpclog"
@@ -44,7 +43,7 @@
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
istatus "google.golang.org/grpc/internal/status"
- isyscall "google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@@ -177,7 +176,7 @@
if networkType == "tcp" && useProxy {
return proxyDial(ctx, address, grpcUA)
}
- return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
+ return (&net.Dialer{}).DialContext(ctx, networkType, address)
}
func isTemporary(err error) bool {
@@ -263,7 +262,7 @@
}
keepaliveEnabled := false
if kp.Time != infinity {
- if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+ if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
keepaliveEnabled = true
@@ -494,9 +493,8 @@
func (t *http2Client) getPeer() *peer.Peer {
return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo, // Can be nil
- LocalAddr: t.localAddr,
+ Addr: t.remoteAddr,
+ AuthInfo: t.authInfo, // Can be nil
}
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 680c9eb..6fa1eb4 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -68,15 +68,18 @@
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
- done chan struct{}
- conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- loopyWriterDone chan struct{}
- peer peer.Peer
- inTapHandle tap.ServerInHandle
- framer *framer
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
+ ctx context.Context
+ done chan struct{}
+ conn net.Conn
+ loopy *loopyWriter
+ readerDone chan struct{} // sync point to enable testing.
+ writerDone chan struct{} // sync point to enable testing.
+ remoteAddr net.Addr
+ localAddr net.Addr
+ authInfo credentials.AuthInfo // auth info about the connection
+ inTapHandle tap.ServerInHandle
+ framer *framer
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
@@ -240,18 +243,16 @@
}
done := make(chan struct{})
- peer := peer.Peer{
- Addr: conn.RemoteAddr(),
- LocalAddr: conn.LocalAddr(),
- AuthInfo: authInfo,
- }
t := &http2Server{
+ ctx: setConnection(context.Background(), rawConn),
done: done,
conn: conn,
- peer: peer,
+ remoteAddr: conn.RemoteAddr(),
+ localAddr: conn.LocalAddr(),
+ authInfo: authInfo,
framer: framer,
readerDone: make(chan struct{}),
- loopyWriterDone: make(chan struct{}),
+ writerDone: make(chan struct{}),
maxStreams: config.MaxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
@@ -266,6 +267,8 @@
bufferPool: newBufferPool(),
}
t.logger = prefixLoggerForServerTransport(t)
+ // Add peer information to the http2server context.
+ t.ctx = peer.NewContext(t.ctx, t.getPeer())
t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
@@ -274,7 +277,15 @@
updateFlowControl: t.updateFlowControl,
}
}
- t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr))
+ for _, sh := range t.stats {
+ t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ })
+ connBegin := &stats.ConnBegin{}
+ sh.HandleConn(t.ctx, connBegin)
+ }
+ t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
if err != nil {
return nil, err
}
@@ -323,7 +334,7 @@
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
t.loopy.run()
- close(t.loopyWriterDone)
+ close(t.writerDone)
}()
go t.keepalive()
return t, nil
@@ -331,7 +342,7 @@
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@@ -358,11 +369,10 @@
buf := newRecvBuffer()
s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- headerWireLength: int(frame.Header().Length),
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
}
var (
// if false, content-type was missing or invalid
@@ -501,9 +511,9 @@
s.state = streamReadDone
}
if timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
+ s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
} else {
- s.ctx, s.cancel = context.WithCancel(ctx)
+ s.ctx, s.cancel = context.WithCancel(t.ctx)
}
// Attach the received metadata to the context.
@@ -582,6 +592,18 @@
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
+ for _, sh := range t.stats {
+ s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+ inHeader := &stats.InHeader{
+ FullMethod: s.method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: s.recvCompress,
+ WireLength: int(frame.Header().Length),
+ Header: mdata.Copy(),
+ }
+ sh.HandleRPC(s.ctx, inHeader)
+ }
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
@@ -607,11 +629,8 @@
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
- defer func() {
- <-t.loopyWriterDone
- close(t.readerDone)
- }()
+func (t *http2Server) HandleStreams(handle func(*Stream)) {
+ defer close(t.readerDone)
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
@@ -645,7 +664,7 @@
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if err := t.operateHeaders(ctx, frame, handle); err != nil {
+ if err := t.operateHeaders(frame, handle); err != nil {
t.Close(err)
break
}
@@ -1223,6 +1242,10 @@
for _, s := range streams {
s.cancel()
}
+ for _, sh := range t.stats {
+ connEnd := &stats.ConnEnd{}
+ sh.HandleConn(t.ctx, connEnd)
+ }
}
// deleteStream deletes the stream s from transport's active streams.
@@ -1288,6 +1311,10 @@
})
}
+func (t *http2Server) RemoteAddr() net.Addr {
+ return t.remoteAddr
+}
+
func (t *http2Server) Drain(debugData string) {
t.mu.Lock()
defer t.mu.Unlock()
@@ -1370,11 +1397,11 @@
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
LocalFlowControlWindow: int64(t.fc.getSize()),
SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.peer.LocalAddr,
- RemoteAddr: t.peer.Addr,
+ LocalAddr: t.localAddr,
+ RemoteAddr: t.remoteAddr,
// RemoteName :
}
- if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok {
+ if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
s.Security = au.GetSecurityValue()
}
s.RemoteFlowControlWindow = t.getOutFlowWindow()
@@ -1406,12 +1433,10 @@
}
}
-// Peer returns the peer of the transport.
-func (t *http2Server) Peer() *peer.Peer {
+func (t *http2Server) getPeer() *peer.Peer {
return &peer.Peer{
- Addr: t.peer.Addr,
- LocalAddr: t.peer.LocalAddr,
- AuthInfo: t.peer.AuthInfo, // Can be nil
+ Addr: t.remoteAddr,
+ AuthInfo: t.authInfo, // Can be nil
}
}
@@ -1436,6 +1461,6 @@
// SetConnection adds the connection to the context to be able to get
// information about the destination ip and port for an incoming RPC. This also
// allows any unary or streaming interceptors to see the connection.
-func SetConnection(ctx context.Context, conn net.Conn) context.Context {
+func setConnection(ctx context.Context, conn net.Conn) context.Context {
return context.WithValue(ctx, connectionKey{}, conn)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 24fa103..4159619 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -28,8 +28,6 @@
"net/http"
"net/http/httputil"
"net/url"
-
- "google.golang.org/grpc/internal"
)
const proxyAuthHeaderKey = "Proxy-Authorization"
@@ -114,7 +112,7 @@
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
// is necessary, dials, does the HTTP CONNECT handshake, and returns the
// connection.
-func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
+func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
newAddr := addr
proxyURL, err := mapAddress(addr)
if err != nil {
@@ -124,15 +122,15 @@
newAddr = proxyURL.Host
}
- conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
+ conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
if err != nil {
- return nil, err
+ return
}
- if proxyURL == nil {
+ if proxyURL != nil {
// proxy is disabled if proxyURL is nil.
- return conn, err
+ conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
}
- return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ return
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index b7b8fec..aac056e 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -37,7 +37,6 @@
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
@@ -266,8 +265,7 @@
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value). Not valid on server side.
- headerValid bool
- headerWireLength int // Only set on server side.
+ headerValid bool
// hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex
@@ -427,12 +425,6 @@
return s.ctx
}
-// SetContext sets the context of the stream. This will be deleted once the
-// stats handler callouts all move to gRPC layer.
-func (s *Stream) SetContext(ctx context.Context) {
- s.ctx = ctx
-}
-
// Method returns the method for the stream.
func (s *Stream) Method() string {
return s.method
@@ -445,12 +437,6 @@
return s.status
}
-// HeaderWireLength returns the size of the headers of the stream as received
-// from the wire. Valid only on the server.
-func (s *Stream) HeaderWireLength() int {
- return s.headerWireLength
-}
-
// SetHeader sets the header metadata. This can be called multiple times.
// Server side only.
// This should not be called in parallel to other data writes.
@@ -712,7 +698,7 @@
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(context.Context, func(*Stream))
+ HandleStreams(func(*Stream))
// WriteHeader sends the header metadata for the given stream.
// WriteHeader may not be called on all streams.
@@ -731,8 +717,8 @@
// handlers will be terminated asynchronously.
Close(err error)
- // Peer returns the peer of the server transport.
- Peer() *peer.Peer
+ // RemoteAddr returns the remote network address.
+ RemoteAddr() net.Addr
// Drain notifies the client this ServerTransport stops accepting new RPCs.
Drain(debugData string)
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 4944682..a2cdcaf 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -153,16 +153,14 @@
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
-// NewIncomingContext creates a new context with incoming md attached. md must
-// not be modified after calling this function.
+// NewIncomingContext creates a new context with incoming md attached.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached. If used
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata. md must not be modified after
-// calling this function.
+// overwrite any previously-appended metadata.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
}
@@ -205,8 +203,7 @@
}
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
-// key from the incoming metadata if it exists. Keys are matched in a case insensitive
-// manner.
+// key from the incoming metadata if it exists. Key must be lower-case.
//
// # Experimental
//
@@ -222,16 +219,17 @@
return copyOf(v)
}
for k, v := range md {
- // Case insenitive comparison: MD is a map, and there's no guarantee
- // that the MD attached to the context is created using our helper
- // functions.
- if strings.EqualFold(k, key) {
+ // We need to manually convert all keys to lower case, because MD is a
+ // map, and there's no guarantee that the MD attached to the context is
+ // created using our helper functions.
+ if strings.ToLower(k) == key {
return copyOf(v)
}
}
return nil
}
+// the returned slice must not be modified in place
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index a821ff9..e01d219 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -32,8 +32,6 @@
type Peer struct {
// Addr is the peer address.
Addr net.Addr
- // LocalAddr is the local address.
- LocalAddr net.Addr
// AuthInfo is the authentication information of the transport.
// It is nil if there is no transport security being used.
AuthInfo credentials.AuthInfo
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index bf56faa..236837f 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -37,6 +37,7 @@
type pickerWrapper struct {
mu sync.Mutex
done bool
+ idle bool
blockingCh chan struct{}
picker balancer.Picker
statsHandlers []stats.Handler // to record blocking picker calls
@@ -52,7 +53,11 @@
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
pw.mu.Lock()
- if pw.done {
+ if pw.done || pw.idle {
+ // There is a small window where a picker update from the LB policy can
+ // race with the channel going to idle mode. If the picker is idle here,
+ // it is because the channel asked it to do so, and therefore it is sage
+ // to ignore the update from the LB policy.
pw.mu.Unlock()
return
}
@@ -205,15 +210,23 @@
close(pw.blockingCh)
}
-// reset clears the pickerWrapper and prepares it for being used again when idle
-// mode is exited.
-func (pw *pickerWrapper) reset() {
+func (pw *pickerWrapper) enterIdleMode() {
+ pw.mu.Lock()
+ defer pw.mu.Unlock()
+ if pw.done {
+ return
+ }
+ pw.idle = true
+}
+
+func (pw *pickerWrapper) exitIdleMode() {
pw.mu.Lock()
defer pw.mu.Unlock()
if pw.done {
return
}
pw.blockingCh = make(chan struct{})
+ pw.idle = false
}
// dropError is a wrapper error that indicates the LB policy wishes to drop the
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index 5128f93..2e9cf66 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -25,6 +25,7 @@
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/pretty"
@@ -64,6 +65,19 @@
}
func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ if !envconfig.PickFirstLBConfig {
+ // Prior to supporting loadbalancing configuration, the pick_first LB
+ // policy did not implement the balancer.ConfigParser interface. This
+ // meant that if a non-empty configuration was passed to it, the service
+ // config unmarshaling code would throw a warning log, but would
+ // continue using the pick_first LB policy. The code below ensures the
+ // same behavior is retained if the env var is not set.
+ if string(js) != "{}" {
+ logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
+ }
+ return nil, nil
+ }
+
var cfg pfConfig
if err := json.Unmarshal(js, &cfg); err != nil {
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
deleted file mode 100644
index 14aa6f2..0000000
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package dns implements a dns resolver to be installed as the default resolver
-// in grpc.
-//
-// Deprecated: this package is imported by grpc and should not need to be
-// imported directly by users.
-package dns
-
-import (
- "google.golang.org/grpc/internal/resolver/dns"
- "google.golang.org/grpc/resolver"
-)
-
-// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
-//
-// Deprecated: import grpc and use resolver.Get("dns") instead.
-func NewBuilder() resolver.Builder {
- return dns.NewBuilder()
-}
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
index f2efa2a..0a42623 100644
--- a/vendor/google.golang.org/grpc/resolver/manual/manual.go
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -78,12 +78,12 @@
func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
r.BuildCallback(target, cc, opts)
r.mu.Lock()
- defer r.mu.Unlock()
r.CC = cc
if r.lastSeenState != nil {
err := r.CC.UpdateState(*r.lastSeenState)
go r.UpdateStateCallback(err)
}
+ r.mu.Unlock()
return r, nil
}
@@ -105,22 +105,15 @@
// UpdateState calls CC.UpdateState.
func (r *Resolver) UpdateState(s resolver.State) {
r.mu.Lock()
- defer r.mu.Unlock()
- var err error
- if r.CC == nil {
- panic("cannot update state as grpc.Dial with resolver has not been called")
- }
- err = r.CC.UpdateState(s)
+ err := r.CC.UpdateState(s)
r.lastSeenState = &s
+ r.mu.Unlock()
r.UpdateStateCallback(err)
}
// ReportError calls CC.ReportError.
func (r *Resolver) ReportError(err error) {
r.mu.Lock()
- defer r.mu.Unlock()
- if r.CC == nil {
- panic("cannot report error as grpc.Dial with resolver has not been called")
- }
r.CC.ReportError(err)
+ r.mu.Unlock()
}
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
index ada5b9b..804be88 100644
--- a/vendor/google.golang.org/grpc/resolver/map.go
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -136,116 +136,3 @@
}
return ret
}
-
-type endpointNode struct {
- addrs map[string]struct{}
-}
-
-// Equal returns whether the unordered set of addrs are the same between the
-// endpoint nodes.
-func (en *endpointNode) Equal(en2 *endpointNode) bool {
- if len(en.addrs) != len(en2.addrs) {
- return false
- }
- for addr := range en.addrs {
- if _, ok := en2.addrs[addr]; !ok {
- return false
- }
- }
- return true
-}
-
-func toEndpointNode(endpoint Endpoint) endpointNode {
- en := make(map[string]struct{})
- for _, addr := range endpoint.Addresses {
- en[addr.Addr] = struct{}{}
- }
- return endpointNode{
- addrs: en,
- }
-}
-
-// EndpointMap is a map of endpoints to arbitrary values keyed on only the
-// unordered set of address strings within an endpoint. This map is not thread
-// safe, thus it is unsafe to access concurrently. Must be created via
-// NewEndpointMap; do not construct directly.
-type EndpointMap struct {
- endpoints map[*endpointNode]any
-}
-
-// NewEndpointMap creates a new EndpointMap.
-func NewEndpointMap() *EndpointMap {
- return &EndpointMap{
- endpoints: make(map[*endpointNode]any),
- }
-}
-
-// Get returns the value for the address in the map, if present.
-func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
- en := toEndpointNode(e)
- if endpoint := em.find(en); endpoint != nil {
- return em.endpoints[endpoint], true
- }
- return nil, false
-}
-
-// Set updates or adds the value to the address in the map.
-func (em *EndpointMap) Set(e Endpoint, value any) {
- en := toEndpointNode(e)
- if endpoint := em.find(en); endpoint != nil {
- em.endpoints[endpoint] = value
- return
- }
- em.endpoints[&en] = value
-}
-
-// Len returns the number of entries in the map.
-func (em *EndpointMap) Len() int {
- return len(em.endpoints)
-}
-
-// Keys returns a slice of all current map keys, as endpoints specifying the
-// addresses present in the endpoint keys, in which uniqueness is determined by
-// the unordered set of addresses. Thus, endpoint information returned is not
-// the full endpoint data (drops duplicated addresses and attributes) but can be
-// used for EndpointMap accesses.
-func (em *EndpointMap) Keys() []Endpoint {
- ret := make([]Endpoint, 0, len(em.endpoints))
- for en := range em.endpoints {
- var endpoint Endpoint
- for addr := range en.addrs {
- endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
- }
- ret = append(ret, endpoint)
- }
- return ret
-}
-
-// Values returns a slice of all current map values.
-func (em *EndpointMap) Values() []any {
- ret := make([]any, 0, len(em.endpoints))
- for _, val := range em.endpoints {
- ret = append(ret, val)
- }
- return ret
-}
-
-// find returns a pointer to the endpoint node in em if the endpoint node is
-// already present. If not found, nil is returned. The comparisons are done on
-// the unordered set of addresses within an endpoint.
-func (em EndpointMap) find(e endpointNode) *endpointNode {
- for endpoint := range em.endpoints {
- if e.Equal(endpoint) {
- return endpoint
- }
- }
- return nil
-}
-
-// Delete removes the specified endpoint from the map.
-func (em *EndpointMap) Delete(e Endpoint) {
- en := toEndpointNode(e)
- if entry := em.find(en); entry != nil {
- delete(em.endpoints, entry)
- }
-}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index bd1c7d0..11384e2 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -240,6 +240,11 @@
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
+ // NewServiceConfig is called by resolver to notify ClientConn a new
+ // service config. The service config should be provided as a json string.
+ //
+ // Deprecated: Use UpdateState instead.
+ NewServiceConfig(serviceConfig string)
// ParseServiceConfig parses the provided service config and returns an
// object that provides the parsed config.
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
@@ -281,11 +286,6 @@
return strings.TrimPrefix(endpoint, "/")
}
-// String returns a string representation of Target.
-func (t Target) String() string {
- return t.URL.String()
-}
-
// Builder creates a resolver that will be used to watch name resolution updates.
type Builder interface {
// Build creates a new resolver for the given target.
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
new file mode 100644
index 0000000..d683305
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -0,0 +1,247 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "strings"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+// resolverStateUpdater wraps the single method used by ccResolverWrapper to
+// report a state update from the actual resolver implementation.
+type resolverStateUpdater interface {
+ updateResolverState(s resolver.State, err error) error
+}
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConn interface.
+type ccResolverWrapper struct {
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc resolverStateUpdater
+ channelzID *channelz.Identifier
+ ignoreServiceConfig bool
+ opts ccResolverWrapperOpts
+ serializer *grpcsync.CallbackSerializer // To serialize all incoming calls.
+ serializerCancel context.CancelFunc // To close the serializer, accessed only from close().
+
+ // All incoming (resolver --> gRPC) calls are guaranteed to execute in a
+ // mutually exclusive manner as they are scheduled on the serializer.
+ // Fields accessed *only* in these serializer callbacks, can therefore be
+ // accessed without a mutex.
+ curState resolver.State
+
+ // mu guards access to the below fields.
+ mu sync.Mutex
+ closed bool
+ resolver resolver.Resolver // Accessed only from outgoing calls.
+}
+
+// ccResolverWrapperOpts wraps the arguments to be passed when creating a new
+// ccResolverWrapper.
+type ccResolverWrapperOpts struct {
+ target resolver.Target // User specified dial target to resolve.
+ builder resolver.Builder // Resolver builder to use.
+ bOpts resolver.BuildOptions // Resolver build options to use.
+ channelzID *channelz.Identifier // Channelz identifier for the channel.
+}
+
+// newCCResolverWrapper uses the resolver.Builder to build a Resolver and
+// returns a ccResolverWrapper object which wraps the newly built resolver.
+func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ ccr := &ccResolverWrapper{
+ cc: cc,
+ channelzID: opts.channelzID,
+ ignoreServiceConfig: opts.bOpts.DisableServiceConfig,
+ opts: opts,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+
+ // Cannot hold the lock at build time because the resolver can send an
+ // update or error inline and these incoming calls grab the lock to schedule
+ // a callback in the serializer.
+ r, err := opts.builder.Build(opts.target, ccr, opts.bOpts)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ // Any error reported by the resolver at build time that leads to a
+ // re-resolution request from the balancer is dropped by grpc until we
+ // return from this function. So, we don't have to handle pending resolveNow
+ // requests here.
+ ccr.mu.Lock()
+ ccr.resolver = r
+ ccr.mu.Unlock()
+
+ return ccr, nil
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+ ccr.mu.Lock()
+ defer ccr.mu.Unlock()
+
+ // ccr.resolver field is set only after the call to Build() returns. But in
+ // the process of building, the resolver may send an error update which when
+ // propagated to the balancer may result in a re-resolution request.
+ if ccr.closed || ccr.resolver == nil {
+ return
+ }
+ ccr.resolver.ResolveNow(o)
+}
+
+func (ccr *ccResolverWrapper) close() {
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ return
+ }
+
+ channelz.Info(logger, ccr.channelzID, "Closing the name resolver")
+
+ // Close the serializer to ensure that no more calls from the resolver are
+ // handled, before actually closing the resolver.
+ ccr.serializerCancel()
+ ccr.closed = true
+ r := ccr.resolver
+ ccr.mu.Unlock()
+
+ // Give enqueued callbacks a chance to finish.
+ <-ccr.serializer.Done()
+
+ // Spawn a goroutine to close the resolver (since it may block trying to
+ // cleanup all allocated resources) and return early.
+ go r.Close()
+}
+
+// serializerScheduleLocked is a convenience method to schedule a function to be
+// run on the serializer while holding ccr.mu.
+func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) {
+ ccr.mu.Lock()
+ ccr.serializer.Schedule(f)
+ ccr.mu.Unlock()
+}
+
+// UpdateState is called by resolver implementations to report new state to gRPC
+// which includes addresses and service config.
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+ errCh := make(chan error, 1)
+ if s.Endpoints == nil {
+ s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
+ for _, a := range s.Addresses {
+ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+ ep.Addresses[0].BalancerAttributes = nil
+ s.Endpoints = append(s.Endpoints, ep)
+ }
+ }
+ ok := ccr.serializer.Schedule(func(context.Context) {
+ ccr.addChannelzTraceEvent(s)
+ ccr.curState = s
+ if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState {
+ errCh <- balancer.ErrBadResolverState
+ return
+ }
+ errCh <- nil
+ })
+ if !ok {
+ // The only time when Schedule() fail to add the callback to the
+ // serializer is when the serializer is closed, and this happens only
+ // when the resolver wrapper is closed.
+ return nil
+ }
+ return <-errCh
+}
+
+// ReportError is called by resolver implementations to report errors
+// encountered during name resolution to gRPC.
+func (ccr *ccResolverWrapper) ReportError(err error) {
+ ccr.serializerScheduleLocked(func(_ context.Context) {
+ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
+ ccr.cc.updateResolverState(resolver.State{}, err)
+ })
+}
+
+// NewAddress is called by the resolver implementation to send addresses to
+// gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+ ccr.serializerScheduleLocked(func(_ context.Context) {
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
+ ccr.curState.Addresses = addrs
+ ccr.cc.updateResolverState(ccr.curState, nil)
+ })
+}
+
+// NewServiceConfig is called by the resolver implementation to send service
+// configs to gRPC.
+func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
+ ccr.serializerScheduleLocked(func(_ context.Context) {
+ channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc)
+ if ccr.ignoreServiceConfig {
+ channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config")
+ return
+ }
+ scpr := parseServiceConfig(sc)
+ if scpr.Err != nil {
+ channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err)
+ return
+ }
+ ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
+ ccr.curState.ServiceConfig = scpr
+ ccr.cc.updateResolverState(ccr.curState, nil)
+ })
+}
+
+// ParseServiceConfig is called by resolver implementations to parse a JSON
+// representation of the service config.
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+ return parseServiceConfig(scJSON)
+}
+
+// addChannelzTraceEvent adds a channelz trace event containing the new
+// state received from resolver implementations.
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ var updates []string
+ var oldSC, newSC *ServiceConfig
+ var oldOK, newOK bool
+ if ccr.curState.ServiceConfig != nil {
+ oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if s.ServiceConfig != nil {
+ newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
+ updates = append(updates, "service config updated")
+ }
+ if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+ updates = append(updates, "resolver returned an empty address list")
+ } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+ updates = append(updates, "resolver returned new addresses")
+ }
+ channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
deleted file mode 100644
index c79bab1..0000000
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "strings"
- "sync"
-
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConn interface.
-type ccResolverWrapper struct {
- // The following fields are initialized when the wrapper is created and are
- // read-only afterwards, and therefore can be accessed without a mutex.
- cc *ClientConn
- ignoreServiceConfig bool
- serializer *grpcsync.CallbackSerializer
- serializerCancel context.CancelFunc
-
- resolver resolver.Resolver // only accessed within the serializer
-
- // The following fields are protected by mu. Caller must take cc.mu before
- // taking mu.
- mu sync.Mutex
- curState resolver.State
- closed bool
-}
-
-// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used
-// after calling start, which builds the resolver.
-func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
- ctx, cancel := context.WithCancel(cc.ctx)
- return &ccResolverWrapper{
- cc: cc,
- ignoreServiceConfig: cc.dopts.disableServiceConfig,
- serializer: grpcsync.NewCallbackSerializer(ctx),
- serializerCancel: cancel,
- }
-}
-
-// start builds the name resolver using the resolver.Builder in cc and returns
-// any error encountered. It must always be the first operation performed on
-// any newly created ccResolverWrapper, except that close may be called instead.
-func (ccr *ccResolverWrapper) start() error {
- errCh := make(chan error)
- ccr.serializer.Schedule(func(ctx context.Context) {
- if ctx.Err() != nil {
- return
- }
- opts := resolver.BuildOptions{
- DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
- DialCreds: ccr.cc.dopts.copts.TransportCredentials,
- CredsBundle: ccr.cc.dopts.copts.CredsBundle,
- Dialer: ccr.cc.dopts.copts.Dialer,
- }
- var err error
- ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
- errCh <- err
- })
- return <-errCh
-}
-
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
- ccr.serializer.Schedule(func(ctx context.Context) {
- if ctx.Err() != nil || ccr.resolver == nil {
- return
- }
- ccr.resolver.ResolveNow(o)
- })
-}
-
-// close initiates async shutdown of the wrapper. To determine the wrapper has
-// finished shutting down, the channel should block on ccr.serializer.Done()
-// without cc.mu held.
-func (ccr *ccResolverWrapper) close() {
- channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver")
- ccr.mu.Lock()
- ccr.closed = true
- ccr.mu.Unlock()
-
- ccr.serializer.Schedule(func(context.Context) {
- if ccr.resolver == nil {
- return
- }
- ccr.resolver.Close()
- ccr.resolver = nil
- })
- ccr.serializerCancel()
-}
-
-// UpdateState is called by resolver implementations to report new state to gRPC
-// which includes addresses and service config.
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
- ccr.cc.mu.Lock()
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- ccr.cc.mu.Unlock()
- return nil
- }
- if s.Endpoints == nil {
- s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
- for _, a := range s.Addresses {
- ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
- ep.Addresses[0].BalancerAttributes = nil
- s.Endpoints = append(s.Endpoints, ep)
- }
- }
- ccr.addChannelzTraceEvent(s)
- ccr.curState = s
- ccr.mu.Unlock()
- return ccr.cc.updateResolverStateAndUnlock(s, nil)
-}
-
-// ReportError is called by resolver implementations to report errors
-// encountered during name resolution to gRPC.
-func (ccr *ccResolverWrapper) ReportError(err error) {
- ccr.cc.mu.Lock()
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- ccr.cc.mu.Unlock()
- return
- }
- ccr.mu.Unlock()
- channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err)
- ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
-}
-
-// NewAddress is called by the resolver implementation to send addresses to
-// gRPC.
-func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- ccr.cc.mu.Lock()
- ccr.mu.Lock()
- if ccr.closed {
- ccr.mu.Unlock()
- ccr.cc.mu.Unlock()
- return
- }
- s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
- ccr.addChannelzTraceEvent(s)
- ccr.curState = s
- ccr.mu.Unlock()
- ccr.cc.updateResolverStateAndUnlock(s, nil)
-}
-
-// ParseServiceConfig is called by resolver implementations to parse a JSON
-// representation of the service config.
-func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
- return parseServiceConfig(scJSON)
-}
-
-// addChannelzTraceEvent adds a channelz trace event containing the new
-// state received from resolver implementations.
-func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
- var updates []string
- var oldSC, newSC *ServiceConfig
- var oldOK, newOK bool
- if ccr.curState.ServiceConfig != nil {
- oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
- }
- if s.ServiceConfig != nil {
- newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
- }
- if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
- updates = append(updates, "service config updated")
- }
- if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
- updates = append(updates, "resolver returned an empty address list")
- } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
- updates = append(updates, "resolver returned new addresses")
- }
- channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
-}
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 682fa18..8f60d42 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -70,10 +70,6 @@
internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
return srv.opts.creds
}
- internal.IsRegisteredMethod = func(srv *Server, method string) bool {
- return srv.isRegisteredMethod(method)
- }
- internal.ServerFromContext = serverFromContext
internal.DrainServerTransports = func(srv *Server, addr string) {
srv.drainServerTransports(addr)
}
@@ -85,7 +81,6 @@
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
- internal.RecvBufferPool = recvBufferPool
}
var statusOK = status.New(codes.OK, "")
@@ -144,8 +139,7 @@
channelzID *channelz.Identifier
czData *channelzData
- serverWorkerChannel chan func()
- serverWorkerChannelClose func()
+ serverWorkerChannel chan func()
}
type serverOptions struct {
@@ -584,13 +578,11 @@
// options are used: StatsHandler, EnableTracing, or binary logging. In such
// cases, the shared buffer pool will be ignored.
//
-// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
-// v1.60.0 or later.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
- return recvBufferPool(bufferPool)
-}
-
-func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.recvBufferPool = bufferPool
})
@@ -624,14 +616,15 @@
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
s.serverWorkerChannel = make(chan func())
- s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
- close(s.serverWorkerChannel)
- })
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
go s.serverWorker()
}
}
+func (s *Server) stopServerWorkers() {
+ close(s.serverWorkerChannel)
+}
+
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@@ -813,18 +806,6 @@
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns.
// Serve will return a non-nil error unless Stop or GracefulStop is called.
-//
-// Note: All supported releases of Go (as of December 2023) override the OS
-// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
-// with OS defaults for keepalive time and interval, callers need to do the
-// following two things:
-// - pass a net.Listener created by calling the Listen method on a
-// net.ListenConfig with the `KeepAlive` field set to a negative value. This
-// will result in the Go standard library not overriding OS defaults for TCP
-// keepalive interval and time. But this will also result in the Go standard
-// library not enabling TCP keepalives by default.
-// - override the Accept method on the passed in net.Listener and set the
-// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("serving")
@@ -936,7 +917,7 @@
return
}
go func() {
- s.serveStreams(context.Background(), st, rawConn)
+ s.serveStreams(st)
s.removeConn(lisAddr, st)
}()
}
@@ -990,29 +971,18 @@
return st
}
-func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
- ctx = transport.SetConnection(ctx, rawConn)
- ctx = peer.NewContext(ctx, st.Peer())
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
- RemoteAddr: st.Peer().Addr,
- LocalAddr: st.Peer().LocalAddr,
- })
- sh.HandleConn(ctx, &stats.ConnBegin{})
- }
-
- defer func() {
- st.Close(errors.New("finished serving streams for the server transport"))
- for _, sh := range s.opts.statsHandlers {
- sh.HandleConn(ctx, &stats.ConnEnd{})
- }
- }()
+func (s *Server) serveStreams(st transport.ServerTransport) {
+ defer st.Close(errors.New("finished serving streams for the server transport"))
+ var wg sync.WaitGroup
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
- st.HandleStreams(ctx, func(stream *transport.Stream) {
+ st.HandleStreams(func(stream *transport.Stream) {
+ wg.Add(1)
+
streamQuota.acquire()
f := func() {
defer streamQuota.release()
+ defer wg.Done()
s.handleStream(st, stream)
}
@@ -1026,6 +996,7 @@
}
go f()
})
+ wg.Wait()
}
var _ http.Handler = (*Server)(nil)
@@ -1069,7 +1040,7 @@
return
}
defer s.removeConn(listenerAddressForServeHTTP, st)
- s.serveStreams(r.Context(), st, nil)
+ s.serveStreams(st)
}
func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
@@ -1718,7 +1689,6 @@
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
ctx := stream.Context()
- ctx = contextWithServer(ctx, s)
var ti *traceInfo
if EnableTracing {
tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
@@ -1727,7 +1697,7 @@
tr: tr,
firstLine: firstLine{
client: false,
- remoteAddr: t.Peer().Addr,
+ remoteAddr: t.RemoteAddr(),
},
}
if dl, ok := ctx.Deadline(); ok {
@@ -1761,22 +1731,6 @@
service := sm[:pos]
method := sm[pos+1:]
- md, _ := metadata.FromIncomingContext(ctx)
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
- sh.HandleRPC(ctx, &stats.InHeader{
- FullMethod: stream.Method(),
- RemoteAddr: t.Peer().Addr,
- LocalAddr: t.Peer().LocalAddr,
- Compression: stream.RecvCompress(),
- WireLength: stream.HeaderWireLength(),
- Header: md,
- })
- }
- // To have calls in stream callouts work. Will delete once all stats handler
- // calls come from the gRPC layer.
- stream.SetContext(ctx)
-
srv, knownService := s.services[service]
if knownService {
if md, ok := srv.methods[method]; ok {
@@ -1866,68 +1820,62 @@
// pending RPCs on the client side will get notified by connection
// errors.
func (s *Server) Stop() {
- s.stop(false)
+ s.quit.Fire()
+
+ defer func() {
+ s.serveWG.Wait()
+ s.done.Fire()
+ }()
+
+ s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
+
+ s.mu.Lock()
+ listeners := s.lis
+ s.lis = nil
+ conns := s.conns
+ s.conns = nil
+ // interrupt GracefulStop if Stop and GracefulStop are called concurrently.
+ s.cv.Broadcast()
+ s.mu.Unlock()
+
+ for lis := range listeners {
+ lis.Close()
+ }
+ for _, cs := range conns {
+ for st := range cs {
+ st.Close(errors.New("Server.Stop called"))
+ }
+ }
+ if s.opts.numServerWorkers > 0 {
+ s.stopServerWorkers()
+ }
+
+ s.mu.Lock()
+ if s.events != nil {
+ s.events.Finish()
+ s.events = nil
+ }
+ s.mu.Unlock()
}
// GracefulStop stops the gRPC server gracefully. It stops the server from
// accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() {
- s.stop(true)
-}
-
-func (s *Server) stop(graceful bool) {
s.quit.Fire()
defer s.done.Fire()
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
-
s.mu.Lock()
- s.closeListenersLocked()
- // Wait for serving threads to be ready to exit. Only then can we be sure no
- // new conns will be created.
- s.mu.Unlock()
- s.serveWG.Wait()
-
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if graceful {
- s.drainAllServerTransportsLocked()
- } else {
- s.closeServerTransportsLocked()
+ if s.conns == nil {
+ s.mu.Unlock()
+ return
}
- for len(s.conns) != 0 {
- s.cv.Wait()
+ for lis := range s.lis {
+ lis.Close()
}
- s.conns = nil
-
- if s.opts.numServerWorkers > 0 {
- // Closing the channel (only once, via grpcsync.OnceFunc) after all the
- // connections have been closed above ensures that there are no
- // goroutines executing the callback passed to st.HandleStreams (where
- // the channel is written to).
- s.serverWorkerChannelClose()
- }
-
- if s.events != nil {
- s.events.Finish()
- s.events = nil
- }
-}
-
-// s.mu must be held by the caller.
-func (s *Server) closeServerTransportsLocked() {
- for _, conns := range s.conns {
- for st := range conns {
- st.Close(errors.New("Server.Stop called"))
- }
- }
-}
-
-// s.mu must be held by the caller.
-func (s *Server) drainAllServerTransportsLocked() {
+ s.lis = nil
if !s.drain {
for _, conns := range s.conns {
for st := range conns {
@@ -1936,14 +1884,22 @@
}
s.drain = true
}
-}
-// s.mu must be held by the caller.
-func (s *Server) closeListenersLocked() {
- for lis := range s.lis {
- lis.Close()
+ // Wait for serving threads to be ready to exit. Only then can we be sure no
+ // new conns will be created.
+ s.mu.Unlock()
+ s.serveWG.Wait()
+ s.mu.Lock()
+
+ for len(s.conns) != 0 {
+ s.cv.Wait()
}
- s.lis = nil
+ s.conns = nil
+ if s.events != nil {
+ s.events.Finish()
+ s.events = nil
+ }
+ s.mu.Unlock()
}
// contentSubtype must be lowercase
@@ -1957,50 +1913,11 @@
}
codec := encoding.GetCodec(contentSubtype)
if codec == nil {
- logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
return encoding.GetCodec(proto.Name)
}
return codec
}
-type serverKey struct{}
-
-// serverFromContext gets the Server from the context.
-func serverFromContext(ctx context.Context) *Server {
- s, _ := ctx.Value(serverKey{}).(*Server)
- return s
-}
-
-// contextWithServer sets the Server in the context.
-func contextWithServer(ctx context.Context, server *Server) context.Context {
- return context.WithValue(ctx, serverKey{}, server)
-}
-
-// isRegisteredMethod returns whether the passed in method is registered as a
-// method on the server. /service/method and service/method will match if the
-// service and method are registered on the server.
-func (s *Server) isRegisteredMethod(serviceMethod string) bool {
- if serviceMethod != "" && serviceMethod[0] == '/' {
- serviceMethod = serviceMethod[1:]
- }
- pos := strings.LastIndex(serviceMethod, "/")
- if pos == -1 { // Invalid method name syntax.
- return false
- }
- service := serviceMethod[:pos]
- method := serviceMethod[pos+1:]
- srv, knownService := s.services[service]
- if knownService {
- if _, ok := srv.methods[method]; ok {
- return true
- }
- if _, ok := srv.streams[method]; ok {
- return true
- }
- }
- return false
-}
-
// SetHeader sets the header metadata to be sent from the server to the client.
// The context provided must be the context passed to the server's handler.
//
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index dc2cea5..6d2cadd 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.60.1"
+const Version = "1.59.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index 896dc38..bb480f1 100644
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -35,6 +35,7 @@
# Install the pinned versions as defined in module tools.
pushd ./test/tools
go install \
+ golang.org/x/lint/golint \
golang.org/x/tools/cmd/goimports \
honnef.co/go/tools/cmd/staticcheck \
github.com/client9/misspell/cmd/misspell
@@ -76,16 +77,12 @@
not grep 'func Test[^(]' *_test.go
not grep 'func Test[^(]' test/*.go
-# - Check for typos in test function names
-git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test'
-git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example'
-
# - Do not import x/net/context.
not git grep -l 'x/net/context' -- "*.go"
# - Do not import math/rand for real library code. Use internal/grpcrand for
# thread safety.
-git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test'
+git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test'
# - Do not use "interface{}"; use "any" instead.
git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc'
@@ -97,14 +94,15 @@
not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go"
# - Ensure all usages of grpc_testing package are renamed when importing.
-not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
+not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go"
# - Ensure all xds proto imports are renamed to *pb or *grpc.
git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "'
misspell -error .
-# - gofmt, goimports, go vet, go mod tidy.
+# - gofmt, goimports, golint (with exceptions for generated code), go vet,
+# go mod tidy.
# Perform these checks on each module inside gRPC.
for MOD_FILE in $(find . -name 'go.mod'); do
MOD_DIR=$(dirname ${MOD_FILE})
@@ -112,6 +110,7 @@
go vet -all ./... | fail_on_output
gofmt -s -d -l . 2>&1 | fail_on_output
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
+ golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
go mod tidy -compat=1.19
git status --porcelain 2>&1 | fail_on_output || \
@@ -120,73 +119,94 @@
done
# - Collection of static analysis checks
+#
+# TODO(dfawley): don't use deprecated functions in examples or first-party
+# plugins.
+# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs.
SC_OUT="$(mktemp)"
-staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true
-
-# Error for anything other than checks that need exclusions.
-grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)"
-
-# Exclude underscore checks for generated code.
-grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)'
-
-# Error for duplicate imports not including grpc protos.
-grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
-channelz/grpc_channelz_v1"
-go-control-plane/envoy
-grpclb/grpc_lb_v1"
-health/grpc_health_v1"
-interop/grpc_testing"
-orca/v3"
-proto/grpc_gcp"
-proto/grpc_lookup_v1"
-reflection/grpc_reflection_v1"
-reflection/grpc_reflection_v1alpha"
-XXXXX PleaseIgnoreUnused'
-
-# Error for any package comments not in generated code.
-grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:"
-
-# Only ignore the following deprecated types/fields/functions and exclude
-# generated code.
-grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused
-XXXXX Protobuf related deprecation errors:
-"github.com/golang/protobuf
-.pb.go:
-: ptypes.
-proto.RegisterType
-XXXXX gRPC internal usage deprecation errors:
-"google.golang.org/grpc
-: grpc.
-: v1alpha.
-: v1alphareflectionpb.
-BalancerAttributes is deprecated:
-CredsBundle is deprecated:
-Metadata is deprecated: use Attributes instead.
-NewSubConn is deprecated:
-OverrideServerName is deprecated:
-RemoveSubConn is deprecated:
-SecurityVersion is deprecated:
+staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true
+# Error if anything other than deprecation warnings are printed.
+not grep -v "is deprecated:.*SA1019" "${SC_OUT}"
+# Only ignore the following deprecated types/fields/functions.
+not grep -Fv '.CredsBundle
+.HeaderMap
+.Metadata is deprecated: use Attributes
+.NewAddress
+.NewServiceConfig
+.Type is deprecated: use Attributes
+BuildVersion is deprecated
+balancer.ErrTransientFailure
+balancer.Picker
+extDesc.Filename is deprecated
+github.com/golang/protobuf/jsonpb is deprecated
+grpc.CallCustomCodec
+grpc.Code
+grpc.Compressor
+grpc.CustomCodec
+grpc.Decompressor
+grpc.MaxMsgSize
+grpc.MethodConfig
+grpc.NewGZIPCompressor
+grpc.NewGZIPDecompressor
+grpc.RPCCompressor
+grpc.RPCDecompressor
+grpc.ServiceConfig
+grpc.WithCompressor
+grpc.WithDecompressor
+grpc.WithDialer
+grpc.WithMaxMsgSize
+grpc.WithServiceConfig
+grpc.WithTimeout
+http.CloseNotifier
+info.SecurityVersion
+proto is deprecated
+proto.InternalMessageInfo is deprecated
+proto.EnumName is deprecated
+proto.ErrInternalBadWireType is deprecated
+proto.FileDescriptor is deprecated
+proto.Marshaler is deprecated
+proto.MessageType is deprecated
+proto.RegisterEnum is deprecated
+proto.RegisterFile is deprecated
+proto.RegisterType is deprecated
+proto.RegisterExtension is deprecated
+proto.RegisteredExtension is deprecated
+proto.RegisteredExtensions is deprecated
+proto.RegisterMapType is deprecated
+proto.Unmarshaler is deprecated
Target is deprecated: Use the Target field in the BuildOptions instead.
-UpdateAddresses is deprecated:
-UpdateSubConnState is deprecated:
-balancer.ErrTransientFailure is deprecated:
-grpc/reflection/v1alpha/reflection.proto
-XXXXX xDS deprecated fields we support
-.ExactMatch
-.PrefixMatch
-.SafeRegexMatch
-.SuffixMatch
-GetContainsMatch
-GetExactMatch
-GetMatchSubjectAltNames
-GetPrefixMatch
-GetSafeRegexMatch
-GetSuffixMatch
-GetTlsCertificateCertificateProviderInstance
-GetValidationContextCertificateProviderInstance
-XXXXX TODO: Remove the below deprecation usages:
-CloseNotifier
-Roots.Subjects
-XXXXX PleaseIgnoreUnused'
+xxx_messageInfo_
+' "${SC_OUT}"
+
+# - special golint on package comments.
+lint_package_comment_per_package() {
+ # Number of files in this go package.
+ fileCount=$(go list -f '{{len .GoFiles}}' $1)
+ if [ ${fileCount} -eq 0 ]; then
+ return 0
+ fi
+ # Number of package errors generated by golint.
+ lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment")
+ # golint complains about every file that's missing the package comment. If the
+ # number of files for this package is greater than the number of errors, there's
+ # at least one file with package comment, good. Otherwise, fail.
+ if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then
+ echo "Package $1 (with ${fileCount} files) is missing package comment"
+ return 1
+ fi
+}
+lint_package_comment() {
+ set +ex
+
+ count=0
+ for i in $(go list ./...); do
+ lint_package_comment_per_package "$i"
+ ((count += $?))
+ done
+
+ set -ex
+ return $count
+}
+lint_package_comment
echo SUCCESS
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index f479023..5f28148 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -11,7 +11,6 @@
"strconv"
"strings"
- "google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/json"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
@@ -24,7 +23,7 @@
"google.golang.org/protobuf/reflect/protoregistry"
)
-// Unmarshal reads the given []byte into the given [proto.Message].
+// Unmarshal reads the given []byte into the given proto.Message.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
@@ -38,7 +37,7 @@
// required fields will not return an error.
AllowPartial bool
- // If DiscardUnknown is set, unknown fields and enum name values are ignored.
+ // If DiscardUnknown is set, unknown fields are ignored.
DiscardUnknown bool
// Resolver is used for looking up types when unmarshaling
@@ -48,13 +47,9 @@
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
-
- // RecursionLimit limits how deeply messages may be nested.
- // If zero, a default limit is applied.
- RecursionLimit int
}
-// Unmarshal reads the given []byte and populates the given [proto.Message]
+// Unmarshal reads the given []byte and populates the given proto.Message
// using options in the UnmarshalOptions object.
// It will clear the message first before setting the fields.
// If it returns an error, the given message may be partially set.
@@ -72,9 +67,6 @@
if o.Resolver == nil {
o.Resolver = protoregistry.GlobalTypes
}
- if o.RecursionLimit == 0 {
- o.RecursionLimit = protowire.DefaultRecursionLimit
- }
dec := decoder{json.NewDecoder(b), o}
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
@@ -122,10 +114,6 @@
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
- d.opts.RecursionLimit--
- if d.opts.RecursionLimit < 0 {
- return errors.New("exceeded max recursion depth")
- }
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
return unmarshal(d, m)
}
@@ -278,9 +266,7 @@
if err != nil {
return err
}
- if val.IsValid() {
- m.Set(fd, val)
- }
+ m.Set(fd, val)
return nil
}
@@ -343,7 +329,7 @@
}
case protoreflect.EnumKind:
- if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok {
+ if v, ok := unmarshalEnum(tok, fd); ok {
return v, nil
}
@@ -488,7 +474,7 @@
return protoreflect.ValueOfBytes(b), true
}
-func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) {
+func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
switch tok.Kind() {
case json.String:
// Lookup EnumNumber based on name.
@@ -496,9 +482,6 @@
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
return protoreflect.ValueOfEnum(enumVal.Number()), true
}
- if discardUnknown {
- return protoreflect.Value{}, true
- }
case json.Number:
if n, ok := tok.Int(32); ok {
@@ -559,9 +542,7 @@
if err != nil {
return err
}
- if val.IsValid() {
- list.Append(val)
- }
+ list.Append(val)
}
}
@@ -628,9 +609,8 @@
if err != nil {
return err
}
- if pval.IsValid() {
- mmap.Set(pkey, pval)
- }
+
+ mmap.Set(pkey, pval)
}
return nil
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
index ae71007..21d5d2c 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -6,6 +6,6 @@
// format. It follows the guide at
// https://protobuf.dev/programming-guides/proto3#json.
//
-// This package produces a different output than the standard [encoding/json]
+// This package produces a different output than the standard "encoding/json"
// package, which does not operate correctly on protocol buffer messages.
package protojson
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 3f75098..66b9587 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -31,7 +31,7 @@
return MarshalOptions{Multiline: true}.Format(m)
}
-// Marshal writes the given [proto.Message] in JSON format using default options.
+// Marshal writes the given proto.Message in JSON format using default options.
// Do not depend on the output being stable. It may change over time across
// different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
@@ -81,25 +81,6 @@
// ╚═══════╧════════════════════════════╝
EmitUnpopulated bool
- // EmitDefaultValues specifies whether to emit default-valued primitive fields,
- // empty lists, and empty maps. The fields affected are as follows:
- // ╔═══════╤════════════════════════════════════════╗
- // ║ JSON │ Protobuf field ║
- // ╠═══════╪════════════════════════════════════════╣
- // ║ false │ non-optional scalar boolean fields ║
- // ║ 0 │ non-optional scalar numeric fields ║
- // ║ "" │ non-optional scalar string/byte fields ║
- // ║ [] │ empty repeated fields ║
- // ║ {} │ empty map fields ║
- // ╚═══════╧════════════════════════════════════════╝
- //
- // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields,
- // i.e. presence-sensing fields that are omitted will remain omitted to preserve
- // presence-sensing.
- // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates
- // a strict superset of the latter.
- EmitDefaultValues bool
-
// Resolver is used for looking up types when expanding google.protobuf.Any
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
Resolver interface {
@@ -121,7 +102,7 @@
return string(b)
}
-// Marshal marshals the given [proto.Message] in the JSON format using options in
+// Marshal marshals the given proto.Message in the JSON format using options in
// MarshalOptions. Do not depend on the output being stable. It may change over
// time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
@@ -197,11 +178,7 @@
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
// method to additionally iterate over unpopulated fields.
-type unpopulatedFieldRanger struct {
- protoreflect.Message
-
- skipNull bool
-}
+type unpopulatedFieldRanger struct{ protoreflect.Message }
func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
fds := m.Descriptor().Fields()
@@ -215,9 +192,6 @@
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
if isProto2Scalar || isSingularMessage {
- if m.skipNull {
- continue
- }
v = protoreflect.Value{} // use invalid value to emit null
}
if !f(fd, v) {
@@ -243,11 +217,8 @@
defer e.EndObject()
var fields order.FieldRanger = m
- switch {
- case e.opts.EmitUnpopulated:
- fields = unpopulatedFieldRanger{Message: m, skipNull: false}
- case e.opts.EmitDefaultValues:
- fields = unpopulatedFieldRanger{Message: m, skipNull: true}
+ if e.opts.EmitUnpopulated {
+ fields = unpopulatedFieldRanger{m}
}
if typeURL != "" {
fields = typeURLFieldRanger{fields, typeURL}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 25329b7..6c37d41 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -176,7 +176,7 @@
// Use another decoder to parse the unread bytes for @type field. This
// avoids advancing a read from current decoder because the current JSON
// object may contain the fields of the embedded type.
- dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}}
+ dec := decoder{d.Clone(), UnmarshalOptions{}}
tok, err := findTypeURL(dec)
switch err {
case errEmptyObject:
@@ -308,25 +308,48 @@
// array) in order to advance the read to the next JSON value. It relies on
// the decoder returning an error if the types are not in valid sequence.
func (d decoder) skipJSONValue() error {
- var open int
- for {
- tok, err := d.Read()
- if err != nil {
- return err
- }
- switch tok.Kind() {
- case json.ObjectClose, json.ArrayClose:
- open--
- case json.ObjectOpen, json.ArrayOpen:
- open++
- if open > d.opts.RecursionLimit {
- return errors.New("exceeded max recursion depth")
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ // Only need to continue reading for objects and arrays.
+ switch tok.Kind() {
+ case json.ObjectOpen:
+ for {
+ tok, err := d.Read()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case json.ObjectClose:
+ return nil
+ case json.Name:
+ // Skip object field value.
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
}
}
- if open == 0 {
- return nil
+
+ case json.ArrayOpen:
+ for {
+ tok, err := d.Peek()
+ if err != nil {
+ return err
+ }
+ switch tok.Kind() {
+ case json.ArrayClose:
+ d.Read()
+ return nil
+ default:
+ // Skip array item.
+ if err := d.skipJSONValue(); err != nil {
+ return err
+ }
+ }
}
}
+ return nil
}
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index a45f112..4921b2d 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -21,7 +21,7 @@
"google.golang.org/protobuf/reflect/protoregistry"
)
-// Unmarshal reads the given []byte into the given [proto.Message].
+// Unmarshal reads the given []byte into the given proto.Message.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func Unmarshal(b []byte, m proto.Message) error {
return UnmarshalOptions{}.Unmarshal(b, m)
@@ -51,7 +51,7 @@
}
}
-// Unmarshal reads the given []byte and populates the given [proto.Message]
+// Unmarshal reads the given []byte and populates the given proto.Message
// using options in the UnmarshalOptions object.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
@@ -739,9 +739,7 @@
case text.ListClose:
return nil
case text.MessageOpen:
- if err := d.skipMessageValue(); err != nil {
- return err
- }
+ return d.skipMessageValue()
default:
// Skip items. This will not validate whether skipped values are
// of the same type or not, same behavior as C++
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 95967e8..722a7b4 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -33,7 +33,7 @@
return MarshalOptions{Multiline: true}.Format(m)
}
-// Marshal writes the given [proto.Message] in textproto format using default
+// Marshal writes the given proto.Message in textproto format using default
// options. Do not depend on the output being stable. It may change over time
// across different versions of the program.
func Marshal(m proto.Message) ([]byte, error) {
@@ -97,7 +97,7 @@
return string(b)
}
-// Marshal writes the given [proto.Message] in textproto format using options in
+// Marshal writes the given proto.Message in textproto format using options in
// MarshalOptions object. Do not depend on the output being stable. It may
// change over time across different versions of the program.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc9..f4b4686 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -6,7 +6,7 @@
// See https://protobuf.dev/programming-guides/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
-// use the [google.golang.org/protobuf/proto] package instead.
+// use the "google.golang.org/protobuf/proto" package instead.
package protowire
import (
@@ -87,7 +87,7 @@
// ConsumeField parses an entire field record (both tag and value) and returns
// the field number, the wire type, and the total length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
//
// The total length includes the tag header and the end group marker (if the
// field is a group).
@@ -104,8 +104,8 @@
}
// ConsumeFieldValue parses a field value and returns its length.
-// This assumes that the field [Number] and wire [Type] have already been parsed.
-// This returns a negative length upon an error (see [ParseError]).
+// This assumes that the field Number and wire Type have already been parsed.
+// This returns a negative length upon an error (see ParseError).
//
// When parsing a group, the length includes the end group marker and
// the end group is verified to match the starting field number.
@@ -164,7 +164,7 @@
}
// ConsumeTag parses b as a varint-encoded tag, reporting its length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeTag(b []byte) (Number, Type, int) {
v, n := ConsumeVarint(b)
if n < 0 {
@@ -263,7 +263,7 @@
}
// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeVarint(b []byte) (v uint64, n int) {
var y uint64
if len(b) <= 0 {
@@ -384,7 +384,7 @@
}
// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeFixed32(b []byte) (v uint32, n int) {
if len(b) < 4 {
return 0, errCodeTruncated
@@ -412,7 +412,7 @@
}
// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeFixed64(b []byte) (v uint64, n int) {
if len(b) < 8 {
return 0, errCodeTruncated
@@ -432,7 +432,7 @@
}
// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeBytes(b []byte) (v []byte, n int) {
m, n := ConsumeVarint(b)
if n < 0 {
@@ -456,7 +456,7 @@
}
// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeString(b []byte) (v string, n int) {
bb, n := ConsumeBytes(b)
return string(bb), n
@@ -471,7 +471,7 @@
// ConsumeGroup parses b as a group value until the trailing end group marker,
// and verifies that the end marker matches the provided num. The value v
// does not contain the end marker, while the length does contain the end marker.
-// This returns a negative length upon an error (see [ParseError]).
+// This returns a negative length upon an error (see ParseError).
func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
n = ConsumeFieldValue(num, StartGroupType, b)
if n < 0 {
@@ -495,8 +495,8 @@
return n + SizeTag(num)
}
-// DecodeTag decodes the field [Number] and wire [Type] from its unified form.
-// The [Number] is -1 if the decoded field number overflows int32.
+// DecodeTag decodes the field Number and wire Type from its unified form.
+// The Number is -1 if the decoded field number overflows int32.
// Other than overflow, this does not check for field number validity.
func DecodeTag(x uint64) (Number, Type) {
// NOTE: MessageSet allows for larger field numbers than normal.
@@ -506,7 +506,7 @@
return Number(x >> 3), Type(x & 7)
}
-// EncodeTag encodes the field [Number] and wire [Type] into its unified form.
+// EncodeTag encodes the field Number and wire Type into its unified form.
func EncodeTag(num Number, typ Type) uint64 {
return uint64(num)<<3 | uint64(typ&7)
}
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index a45625c..db5248e 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -83,13 +83,7 @@
case protoreflect.FileImports:
for i := 0; i < vs.Len(); i++ {
var rs records
- rv := reflect.ValueOf(vs.Get(i))
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Path"), "Path"},
- {rv.MethodByName("Package"), "Package"},
- {rv.MethodByName("IsPublic"), "IsPublic"},
- {rv.MethodByName("IsWeak"), "IsWeak"},
- }...)
+ rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
ss = append(ss, "{"+rs.Join()+"}")
}
return start + joinStrings(ss, allowMulti) + end
@@ -98,26 +92,34 @@
for i := 0; i < vs.Len(); i++ {
m := reflect.ValueOf(vs).MethodByName("Get")
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
- ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil))
+ ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue))
}
return start + joinStrings(ss, allowMulti && isEnumValue) + end
}
}
-type methodAndName struct {
- method reflect.Value
- name string
+// descriptorAccessors is a list of accessors to print for each descriptor.
+//
+// Do not print all accessors since some contain redundant information,
+// while others are pointers that we do not want to follow since the descriptor
+// is actually a cyclic graph.
+//
+// Using a list allows us to print the accessors in a sensible order.
+var descriptorAccessors = map[reflect.Type][]string{
+ reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
+ reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
+ reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
+ reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
+ reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
+ reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"},
+ reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"},
+ reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
}
func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
- io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil))
+ io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
}
-
-func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
- return formatDescOpt(t, isRoot, allowMulti, record)
-}
-
-func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
+func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
rv := reflect.ValueOf(t)
rt := rv.MethodByName("ProtoType").Type().In(0)
@@ -127,60 +129,26 @@
}
_, isFile := t.(protoreflect.FileDescriptor)
- rs := records{
- allowMulti: allowMulti,
- record: record,
- }
+ rs := records{allowMulti: allowMulti}
if t.IsPlaceholder() {
if isFile {
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Path"), "Path"},
- {rv.MethodByName("Package"), "Package"},
- {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
- }...)
+ rs.Append(rv, "Path", "Package", "IsPlaceholder")
} else {
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("FullName"), "FullName"},
- {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
- }...)
+ rs.Append(rv, "FullName", "IsPlaceholder")
}
} else {
switch {
case isFile:
- rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"})
+ rs.Append(rv, "Syntax")
case isRoot:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Syntax"), "Syntax"},
- {rv.MethodByName("FullName"), "FullName"},
- }...)
+ rs.Append(rv, "Syntax", "FullName")
default:
- rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"})
+ rs.Append(rv, "Name")
}
switch t := t.(type) {
case protoreflect.FieldDescriptor:
- accessors := []methodAndName{
- {rv.MethodByName("Number"), "Number"},
- {rv.MethodByName("Cardinality"), "Cardinality"},
- {rv.MethodByName("Kind"), "Kind"},
- {rv.MethodByName("HasJSONName"), "HasJSONName"},
- {rv.MethodByName("JSONName"), "JSONName"},
- {rv.MethodByName("HasPresence"), "HasPresence"},
- {rv.MethodByName("IsExtension"), "IsExtension"},
- {rv.MethodByName("IsPacked"), "IsPacked"},
- {rv.MethodByName("IsWeak"), "IsWeak"},
- {rv.MethodByName("IsList"), "IsList"},
- {rv.MethodByName("IsMap"), "IsMap"},
- {rv.MethodByName("MapKey"), "MapKey"},
- {rv.MethodByName("MapValue"), "MapValue"},
- {rv.MethodByName("HasDefault"), "HasDefault"},
- {rv.MethodByName("Default"), "Default"},
- {rv.MethodByName("ContainingOneof"), "ContainingOneof"},
- {rv.MethodByName("ContainingMessage"), "ContainingMessage"},
- {rv.MethodByName("Message"), "Message"},
- {rv.MethodByName("Enum"), "Enum"},
- }
- for _, s := range accessors {
- switch s.name {
+ for _, s := range descriptorAccessors[rt] {
+ switch s {
case "MapKey":
if k := t.MapKey(); k != nil {
rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
@@ -189,20 +157,20 @@
if v := t.MapValue(); v != nil {
switch v.Kind() {
case protoreflect.EnumKind:
- rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())})
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
case protoreflect.MessageKind, protoreflect.GroupKind:
- rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())})
+ rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
default:
- rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()})
+ rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
}
}
case "ContainingOneof":
if od := t.ContainingOneof(); od != nil {
- rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())})
+ rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
}
case "ContainingMessage":
if t.IsExtension() {
- rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())})
+ rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
}
case "Message":
if !t.IsMap() {
@@ -219,61 +187,13 @@
ss = append(ss, string(fs.Get(i).Name()))
}
if len(ss) > 0 {
- rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
+ rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
}
-
- case protoreflect.FileDescriptor:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Path"), "Path"},
- {rv.MethodByName("Package"), "Package"},
- {rv.MethodByName("Imports"), "Imports"},
- {rv.MethodByName("Messages"), "Messages"},
- {rv.MethodByName("Enums"), "Enums"},
- {rv.MethodByName("Extensions"), "Extensions"},
- {rv.MethodByName("Services"), "Services"},
- }...)
-
- case protoreflect.MessageDescriptor:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("IsMapEntry"), "IsMapEntry"},
- {rv.MethodByName("Fields"), "Fields"},
- {rv.MethodByName("Oneofs"), "Oneofs"},
- {rv.MethodByName("ReservedNames"), "ReservedNames"},
- {rv.MethodByName("ReservedRanges"), "ReservedRanges"},
- {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"},
- {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"},
- {rv.MethodByName("Messages"), "Messages"},
- {rv.MethodByName("Enums"), "Enums"},
- {rv.MethodByName("Extensions"), "Extensions"},
- }...)
-
- case protoreflect.EnumDescriptor:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Values"), "Values"},
- {rv.MethodByName("ReservedNames"), "ReservedNames"},
- {rv.MethodByName("ReservedRanges"), "ReservedRanges"},
- }...)
-
- case protoreflect.EnumValueDescriptor:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Number"), "Number"},
- }...)
-
- case protoreflect.ServiceDescriptor:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Methods"), "Methods"},
- }...)
-
- case protoreflect.MethodDescriptor:
- rs.Append(rv, []methodAndName{
- {rv.MethodByName("Input"), "Input"},
- {rv.MethodByName("Output"), "Output"},
- {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"},
- {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"},
- }...)
+ default:
+ rs.Append(rv, descriptorAccessors[rt]...)
}
- if m := rv.MethodByName("GoType"); m.IsValid() {
- rs.Append(rv, methodAndName{m, "GoType"})
+ if rv.MethodByName("GoType").IsValid() {
+ rs.Append(rv, "GoType")
}
}
return start + rs.Join() + end
@@ -282,34 +202,19 @@
type records struct {
recs [][2]string
allowMulti bool
-
- // record is a function that will be called for every Append() or
- // AppendRecs() call, to be used for testing with the
- // InternalFormatDescOptForTesting function.
- record func(string)
}
-func (rs *records) AppendRecs(fieldName string, newRecs [2]string) {
- if rs.record != nil {
- rs.record(fieldName)
- }
- rs.recs = append(rs.recs, newRecs)
-}
-
-func (rs *records) Append(v reflect.Value, accessors ...methodAndName) {
+func (rs *records) Append(v reflect.Value, accessors ...string) {
for _, a := range accessors {
- if rs.record != nil {
- rs.record(a.name)
- }
var rv reflect.Value
- if a.method.IsValid() {
- rv = a.method.Call(nil)[0]
+ if m := v.MethodByName(a); m.IsValid() {
+ rv = m.Call(nil)[0]
}
if v.Kind() == reflect.Struct && !rv.IsValid() {
- rv = v.FieldByName(a.name)
+ rv = v.FieldByName(a)
}
if !rv.IsValid() {
- panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name))
+ panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
}
if _, ok := rv.Interface().(protoreflect.Value); ok {
rv = rv.MethodByName("Interface").Call(nil)[0]
@@ -356,7 +261,7 @@
default:
s = fmt.Sprint(v)
}
- rs.recs = append(rs.recs, [2]string{a.name, s})
+ rs.recs = append(rs.recs, [2]string{a, s})
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 193c68e..7c3689b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -21,26 +21,11 @@
"google.golang.org/protobuf/reflect/protoregistry"
)
-// Edition is an Enum for proto2.Edition
-type Edition int32
-
-// These values align with the value of Enum in descriptor.proto which allows
-// direct conversion between the proto enum and this enum.
-const (
- EditionUnknown Edition = 0
- EditionProto2 Edition = 998
- EditionProto3 Edition = 999
- Edition2023 Edition = 1000
- EditionUnsupported Edition = 100000
-)
-
// The types in this file may have a suffix:
// • L0: Contains fields common to all descriptors (except File) and
// must be initialized up front.
// • L1: Contains fields specific to a descriptor and
-// must be initialized up front. If the associated proto uses Editions, the
-// Editions features must always be resolved. If not explicitly set, the
-// appropriate default must be resolved and set.
+// must be initialized up front.
// • L2: Contains fields that are lazily initialized when constructing
// from the raw file descriptor. When constructing as a literal, the L2
// fields must be initialized up front.
@@ -59,7 +44,6 @@
}
FileL1 struct {
Syntax protoreflect.Syntax
- Edition Edition // Only used if Syntax == Editions
Path string
Package protoreflect.FullName
@@ -67,35 +51,12 @@
Messages Messages
Extensions Extensions
Services Services
-
- EditionFeatures FileEditionFeatures
}
FileL2 struct {
Options func() protoreflect.ProtoMessage
Imports FileImports
Locations SourceLocations
}
-
- FileEditionFeatures struct {
- // IsFieldPresence is true if field_presence is EXPLICIT
- // https://protobuf.dev/editions/features/#field_presence
- IsFieldPresence bool
- // IsOpenEnum is true if enum_type is OPEN
- // https://protobuf.dev/editions/features/#enum_type
- IsOpenEnum bool
- // IsPacked is true if repeated_field_encoding is PACKED
- // https://protobuf.dev/editions/features/#repeated_field_encoding
- IsPacked bool
- // IsUTF8Validated is true if utf_validation is VERIFY
- // https://protobuf.dev/editions/features/#utf8_validation
- IsUTF8Validated bool
- // IsDelimitedEncoded is true if message_encoding is DELIMITED
- // https://protobuf.dev/editions/features/#message_encoding
- IsDelimitedEncoded bool
- // IsJSONCompliant is true if json_format is ALLOW
- // https://protobuf.dev/editions/features/#json_format
- IsJSONCompliant bool
- }
)
func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
@@ -249,9 +210,6 @@
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
-
- // Edition features.
- Presence bool
}
Oneof struct {
@@ -315,9 +273,6 @@
func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) }
func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) }
func (fd *Field) HasPresence() bool {
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
- return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
- }
return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
}
func (fd *Field) HasOptionalKeyword() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 8f94230..136f1b2 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -12,12 +12,6 @@
const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto"
-// Full and short names for google.protobuf.Edition.
-const (
- Edition_enum_fullname = "google.protobuf.Edition"
- Edition_enum_name = "Edition"
-)
-
// Names for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
@@ -87,7 +81,7 @@
FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
- FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14
+ FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13
)
// Names for google.protobuf.DescriptorProto.
@@ -190,12 +184,10 @@
const (
ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration"
- ExtensionRangeOptions_Features_field_name protoreflect.Name = "features"
ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification"
ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration"
- ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features"
ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification"
)
@@ -203,7 +195,6 @@
const (
ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2
- ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50
ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3
)
@@ -221,26 +212,29 @@
// Field names for google.protobuf.ExtensionRangeOptions.Declaration.
const (
- ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
- ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
- ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
- ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
- ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
+ ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number"
+ ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
+ ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type"
+ ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated"
+ ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
+ ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
- ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
- ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
- ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
- ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
- ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
+ ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
+ ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
+ ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
+ ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated"
+ ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
+ ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
)
// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration.
const (
- ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
- ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
- ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
- ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
- ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
+ ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1
+ ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
+ ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3
+ ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4
+ ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
+ ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.FieldDescriptorProto.
@@ -484,7 +478,6 @@
FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace"
FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace"
FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package"
- FileOptions_Features_field_name protoreflect.Name = "features"
FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package"
@@ -507,7 +500,6 @@
FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace"
FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace"
FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package"
- FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features"
FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option"
)
@@ -533,7 +525,6 @@
FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41
FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44
FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45
- FileOptions_Features_field_number protoreflect.FieldNumber = 50
FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -556,7 +547,6 @@
MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
- MessageOptions_Features_field_name protoreflect.Name = "features"
MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
@@ -564,7 +554,6 @@
MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
- MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features"
MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
)
@@ -575,7 +564,6 @@
MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11
- MessageOptions_Features_field_number protoreflect.FieldNumber = 12
MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -596,9 +584,8 @@
FieldOptions_Weak_field_name protoreflect.Name = "weak"
FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
FieldOptions_Retention_field_name protoreflect.Name = "retention"
+ FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_Targets_field_name protoreflect.Name = "targets"
- FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
- FieldOptions_Features_field_name protoreflect.Name = "features"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@@ -610,9 +597,8 @@
FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
+ FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
- FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
- FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@@ -627,9 +613,8 @@
FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
+ FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
- FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
- FieldOptions_Features_field_number protoreflect.FieldNumber = 21
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -657,27 +642,6 @@
FieldOptions_OptionTargetType_enum_name = "OptionTargetType"
)
-// Names for google.protobuf.FieldOptions.EditionDefault.
-const (
- FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault"
- FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault"
-)
-
-// Field names for google.protobuf.FieldOptions.EditionDefault.
-const (
- FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition"
- FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value"
-
- FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition"
- FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value"
-)
-
-// Field numbers for google.protobuf.FieldOptions.EditionDefault.
-const (
- FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3
- FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
-)
-
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@@ -686,16 +650,13 @@
// Field names for google.protobuf.OneofOptions.
const (
- OneofOptions_Features_field_name protoreflect.Name = "features"
OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
- OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features"
OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.OneofOptions.
const (
- OneofOptions_Features_field_number protoreflect.FieldNumber = 1
OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -710,13 +671,11 @@
EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
- EnumOptions_Features_field_name protoreflect.Name = "features"
EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts"
- EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features"
EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
)
@@ -725,7 +684,6 @@
EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6
- EnumOptions_Features_field_number protoreflect.FieldNumber = 7
EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -738,21 +696,15 @@
// Field names for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
- EnumValueOptions_Features_field_name protoreflect.Name = "features"
- EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
- EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
- EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.EnumValueOptions.
const (
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
- EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
- EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -764,18 +716,15 @@
// Field names for google.protobuf.ServiceOptions.
const (
- ServiceOptions_Features_field_name protoreflect.Name = "features"
ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated"
ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
- ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features"
ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated"
ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.ServiceOptions.
const (
- ServiceOptions_Features_field_number protoreflect.FieldNumber = 34
ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33
ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -790,12 +739,10 @@
const (
MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated"
MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level"
- MethodOptions_Features_field_name protoreflect.Name = "features"
MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated"
MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level"
- MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features"
MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option"
)
@@ -803,7 +750,6 @@
const (
MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33
MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34
- MethodOptions_Features_field_number protoreflect.FieldNumber = 35
MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -870,120 +816,6 @@
UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2
)
-// Names for google.protobuf.FeatureSet.
-const (
- FeatureSet_message_name protoreflect.Name = "FeatureSet"
- FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet"
-)
-
-// Field names for google.protobuf.FeatureSet.
-const (
- FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
- FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
- FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
- FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
- FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
- FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
-
- FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
- FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
- FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
- FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
- FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
- FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
-)
-
-// Field numbers for google.protobuf.FeatureSet.
-const (
- FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
- FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
- FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
- FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
- FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
- FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
-)
-
-// Full and short names for google.protobuf.FeatureSet.FieldPresence.
-const (
- FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence"
- FeatureSet_FieldPresence_enum_name = "FieldPresence"
-)
-
-// Full and short names for google.protobuf.FeatureSet.EnumType.
-const (
- FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType"
- FeatureSet_EnumType_enum_name = "EnumType"
-)
-
-// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding.
-const (
- FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding"
- FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding"
-)
-
-// Full and short names for google.protobuf.FeatureSet.Utf8Validation.
-const (
- FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation"
- FeatureSet_Utf8Validation_enum_name = "Utf8Validation"
-)
-
-// Full and short names for google.protobuf.FeatureSet.MessageEncoding.
-const (
- FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding"
- FeatureSet_MessageEncoding_enum_name = "MessageEncoding"
-)
-
-// Full and short names for google.protobuf.FeatureSet.JsonFormat.
-const (
- FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat"
- FeatureSet_JsonFormat_enum_name = "JsonFormat"
-)
-
-// Names for google.protobuf.FeatureSetDefaults.
-const (
- FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
- FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults"
-)
-
-// Field names for google.protobuf.FeatureSetDefaults.
-const (
- FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults"
- FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition"
- FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition"
-
- FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults"
- FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition"
- FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition"
-)
-
-// Field numbers for google.protobuf.FeatureSetDefaults.
-const (
- FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1
- FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4
- FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5
-)
-
-// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
-const (
- FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault"
- FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault"
-)
-
-// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
-const (
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
-
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
-)
-
-// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
-const (
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
-)
-
// Names for google.protobuf.SourceCodeInfo.
const (
SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
index f55dc01..1a509b6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -162,20 +162,11 @@
func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.BoolSlice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growBoolSlice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -741,20 +732,11 @@
func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growInt32Slice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -1156,20 +1138,11 @@
func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growInt32Slice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -1571,20 +1544,11 @@
func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growUint32Slice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -1986,20 +1950,11 @@
func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growInt64Slice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -2401,20 +2356,11 @@
func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growInt64Slice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -2816,20 +2762,11 @@
func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := 0
- for _, v := range b {
- if v < 0x80 {
- count++
- }
- }
- if count > 0 {
- p.growUint64Slice(count)
- }
- s := *sp
for len(b) > 0 {
var v uint64
var n int
@@ -3208,15 +3145,11 @@
func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int32Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := len(b) / protowire.SizeFixed32()
- if count > 0 {
- p.growInt32Slice(count)
- }
- s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@@ -3528,15 +3461,11 @@
func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint32Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := len(b) / protowire.SizeFixed32()
- if count > 0 {
- p.growUint32Slice(count)
- }
- s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@@ -3848,15 +3777,11 @@
func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float32Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := len(b) / protowire.SizeFixed32()
- if count > 0 {
- p.growFloat32Slice(count)
- }
- s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
@@ -4168,15 +4093,11 @@
func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Int64Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := len(b) / protowire.SizeFixed64()
- if count > 0 {
- p.growInt64Slice(count)
- }
- s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
@@ -4488,15 +4409,11 @@
func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Uint64Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := len(b) / protowire.SizeFixed64()
- if count > 0 {
- p.growUint64Slice(count)
- }
- s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
@@ -4808,15 +4725,11 @@
func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
sp := p.Float64Slice()
if wtyp == protowire.BytesType {
+ s := *sp
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
- count := len(b) / protowire.SizeFixed64()
- if count > 0 {
- p.growFloat64Slice(count)
- }
- s := *sp
for len(b) > 0 {
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 2ab2c62..61c483f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -206,18 +206,13 @@
// Obtain a list of oneof wrapper types.
var oneofWrappers []reflect.Type
- methods := make([]reflect.Method, 0, 2)
- if m, ok := t.MethodByName("XXX_OneofFuncs"); ok {
- methods = append(methods, m)
- }
- if m, ok := t.MethodByName("XXX_OneofWrappers"); ok {
- methods = append(methods, m)
- }
- for _, fn := range methods {
- for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
- for _, v := range vs {
- oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := t.MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ for _, v := range vs {
+ oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
+ }
}
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 629bacd..4f5fb67 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -192,17 +192,12 @@
// Derive a mapping of oneof wrappers to fields.
oneofWrappers := mi.OneofWrappers
- methods := make([]reflect.Method, 0, 2)
- if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
- methods = append(methods, m)
- }
- if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
- methods = append(methods, m)
- }
- for _, fn := range methods {
- for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
- oneofWrappers = vs
+ for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
+ if fn, ok := reflect.PtrTo(t).MethodByName(method); ok {
+ for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+ if vs, ok := v.Interface().([]interface{}); ok {
+ oneofWrappers = vs
+ }
}
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
index 517e944..4c491bd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -159,42 +159,6 @@
p.v.Elem().Set(v.v)
}
-func growSlice(p pointer, addCap int) {
- // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
- in := p.v.Elem()
- out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
- reflect.Copy(out, in)
- p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
func (ms *messageState) pointer() pointer { panic("not supported") }
func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 4b020e3..ee0e057 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -138,46 +138,6 @@
*(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p)
}
-func (p pointer) growBoolSlice(addCap int) {
- sp := p.BoolSlice()
- s := make([]bool, 0, addCap+len(*sp))
- s = s[:len(*sp)]
- copy(s, *sp)
- *sp = s
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- sp := p.Int32Slice()
- s := make([]int32, 0, addCap+len(*sp))
- s = s[:len(*sp)]
- copy(s, *sp)
- *sp = s
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- p.growInt32Slice(addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- p.growInt32Slice(addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- sp := p.Int64Slice()
- s := make([]int64, 0, addCap+len(*sp))
- s = s[:len(*sp)]
- copy(s, *sp)
- *sp = s
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- p.growInt64Slice(addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- p.growInt64Slice(addCap)
-}
-
// Static check that MessageState does not exceed the size of a pointer.
const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
similarity index 96%
rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index a008acd..61a84d3 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !purego && !appengine
+// +build !purego,!appengine
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
deleted file mode 100644
index 60166f2..0000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
-
-package strs
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) string {
- return unsafe.String(unsafe.SliceData(b), len(b))
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) []byte {
- return unsafe.Slice(unsafe.StringData(s), len(s))
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
- buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
- n := len(prefix) + len(".") + len(name)
- if len(prefix) == 0 {
- n -= len(".")
- }
- sb.grow(n)
- sb.buf = append(sb.buf, prefix...)
- sb.buf = append(sb.buf, '.')
- sb.buf = append(sb.buf, name...)
- return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
- sb.grow(len(b))
- sb.buf = append(sb.buf, b...)
- return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
- if cap(sb.buf)-len(sb.buf) >= n {
- return
- }
-
- // Unlike strings.Builder, we do not need to copy over the contents
- // of the old buffer since our builder provides no API for
- // retrieving previously created strings.
- sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
- return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index d8f48fa..0999f29 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,7 +51,7 @@
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 32
+ Minor = 31
Patch = 0
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index e5b03b5..48d4794 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -69,7 +69,7 @@
// UnmarshalState parses a wire-format message and places the result in m.
//
// This method permits fine-grained control over the unmarshaler.
-// Most users should use [Unmarshal] instead.
+// Most users should use Unmarshal instead.
func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
if o.RecursionLimit == 0 {
o.RecursionLimit = protowire.DefaultRecursionLimit
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
index 80ed16a..ec71e71 100644
--- a/vendor/google.golang.org/protobuf/proto/doc.go
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -18,27 +18,27 @@
// This package contains functions to convert to and from the wire format,
// an efficient binary serialization of protocol buffers.
//
-// - [Size] reports the size of a message in the wire format.
+// • Size reports the size of a message in the wire format.
//
-// - [Marshal] converts a message to the wire format.
-// The [MarshalOptions] type provides more control over wire marshaling.
+// • Marshal converts a message to the wire format.
+// The MarshalOptions type provides more control over wire marshaling.
//
-// - [Unmarshal] converts a message from the wire format.
-// The [UnmarshalOptions] type provides more control over wire unmarshaling.
+// • Unmarshal converts a message from the wire format.
+// The UnmarshalOptions type provides more control over wire unmarshaling.
//
// # Basic message operations
//
-// - [Clone] makes a deep copy of a message.
+// • Clone makes a deep copy of a message.
//
-// - [Merge] merges the content of a message into another.
+// • Merge merges the content of a message into another.
//
-// - [Equal] compares two messages. For more control over comparisons
-// and detailed reporting of differences, see package
-// [google.golang.org/protobuf/testing/protocmp].
+// • Equal compares two messages. For more control over comparisons
+// and detailed reporting of differences, see package
+// "google.golang.org/protobuf/testing/protocmp".
//
-// - [Reset] clears the content of a message.
+// • Reset clears the content of a message.
//
-// - [CheckInitialized] reports whether all required fields in a message are set.
+// • CheckInitialized reports whether all required fields in a message are set.
//
// # Optional scalar constructors
//
@@ -46,9 +46,9 @@
// as pointers to a value. For example, an optional string field has the
// Go type *string.
//
-// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String]
-// take a value and return a pointer to a new instance of it,
-// to simplify construction of optional field values.
+// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String
+// take a value and return a pointer to a new instance of it,
+// to simplify construction of optional field values.
//
// Generated enum types usually have an Enum method which performs the
// same operation.
@@ -57,29 +57,29 @@
//
// # Extension accessors
//
-// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension]
-// access extension field values in a protocol buffer message.
+// • HasExtension, GetExtension, SetExtension, and ClearExtension
+// access extension field values in a protocol buffer message.
//
// Extension fields are only supported in proto2.
//
// # Related packages
//
-// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to
-// and from JSON.
+// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
+// and from JSON.
//
-// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to
-// and from the text format.
+// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to
+// and from the text format.
//
-// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a
-// reflection interface for protocol buffer data types.
+// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a
+// reflection interface for protocol buffer data types.
//
-// - Package [google.golang.org/protobuf/testing/protocmp] provides features
-// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp]
-// package.
+// • Package "google.golang.org/protobuf/testing/protocmp" provides features
+// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp"
+// package.
//
-// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic
-// message type, suitable for working with messages where the protocol buffer
-// type is only known at runtime.
+// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic
+// message type, suitable for working with messages where the protocol buffer
+// type is only known at runtime.
//
// This module contains additional packages for more specialized use cases.
// Consult the individual package documentation for details.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 4fed202..bf7f816 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -129,7 +129,7 @@
// MarshalState returns the wire-format encoding of a message.
//
// This method permits fine-grained control over the marshaler.
-// Most users should use [Marshal] instead.
+// Most users should use Marshal instead.
func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
return o.marshal(in.Buf, in.Message)
}
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 17899a3..5f293cd 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -26,7 +26,7 @@
}
// ClearExtension clears an extension field such that subsequent
-// [HasExtension] calls return false.
+// HasExtension calls return false.
// It panics if m is invalid or if xt does not extend m.
func ClearExtension(m Message, xt protoreflect.ExtensionType) {
m.ProtoReflect().Clear(xt.TypeDescriptor())
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe57..d761ab3 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -21,7 +21,7 @@
// The unknown fields of src are appended to the unknown fields of dst.
//
// It is semantically equivalent to unmarshaling the encoded form of src
-// into dst with the [UnmarshalOptions.Merge] option specified.
+// into dst with the UnmarshalOptions.Merge option specified.
func Merge(dst, src Message) {
// TODO: Should nil src be treated as semantically equivalent to a
// untyped, read-only, empty message? What about a nil dst?
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
index 7543ee6..1f0d183 100644
--- a/vendor/google.golang.org/protobuf/proto/proto.go
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -15,20 +15,18 @@
// protobuf module that accept a Message, except where otherwise specified.
//
// This is the v2 interface definition for protobuf messages.
-// The v1 interface definition is [github.com/golang/protobuf/proto.Message].
+// The v1 interface definition is "github.com/golang/protobuf/proto".Message.
//
-// - To convert a v1 message to a v2 message,
-// use [google.golang.org/protobuf/protoadapt.MessageV2Of].
-// - To convert a v2 message to a v1 message,
-// use [google.golang.org/protobuf/protoadapt.MessageV1Of].
+// To convert a v1 message to a v2 message,
+// use "github.com/golang/protobuf/proto".MessageV2.
+// To convert a v2 message to a v1 message,
+// use "github.com/golang/protobuf/proto".MessageV1.
type Message = protoreflect.ProtoMessage
-// Error matches all errors produced by packages in the protobuf module
-// according to [errors.Is].
+// Error matches all errors produced by packages in the protobuf module.
//
-// Example usage:
-//
-// if errors.Is(err, proto.Error) { ... }
+// That is, errors.Is(err, Error) reports whether an error is produced
+// by this module.
var Error error
func init() {
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index baa0cc6..e4dfb12 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -3,11 +3,11 @@
// license that can be found in the LICENSE file.
// Package protodesc provides functionality for converting
-// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values.
+// FileDescriptorProto messages to/from protoreflect.FileDescriptor values.
//
// The google.protobuf.FileDescriptorProto is a protobuf message that describes
// the type information for a .proto file in a form that is easily serializable.
-// The [protoreflect.FileDescriptor] is a more structured representation of
+// The protoreflect.FileDescriptor is a more structured representation of
// the FileDescriptorProto message where references and remote dependencies
// can be directly followed.
package protodesc
@@ -24,11 +24,11 @@
"google.golang.org/protobuf/types/descriptorpb"
)
-// Resolver is the resolver used by [NewFile] to resolve dependencies.
+// Resolver is the resolver used by NewFile to resolve dependencies.
// The enums and messages provided must belong to some parent file,
// which is also registered.
//
-// It is implemented by [protoregistry.Files].
+// It is implemented by protoregistry.Files.
type Resolver interface {
FindFileByPath(string) (protoreflect.FileDescriptor, error)
FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
@@ -61,19 +61,19 @@
AllowUnresolvable bool
}
-// NewFile creates a new [protoreflect.FileDescriptor] from the provided
-// file descriptor message. See [FileOptions.New] for more information.
+// NewFile creates a new protoreflect.FileDescriptor from the provided
+// file descriptor message. See FileOptions.New for more information.
func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
return FileOptions{}.New(fd, r)
}
-// NewFiles creates a new [protoregistry.Files] from the provided
-// FileDescriptorSet message. See [FileOptions.NewFiles] for more information.
+// NewFiles creates a new protoregistry.Files from the provided
+// FileDescriptorSet message. See FileOptions.NewFiles for more information.
func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
return FileOptions{}.NewFiles(fd)
}
-// New creates a new [protoreflect.FileDescriptor] from the provided
+// New creates a new protoreflect.FileDescriptor from the provided
// file descriptor message. The file must represent a valid proto file according
// to protobuf semantics. The returned descriptor is a deep copy of the input.
//
@@ -93,15 +93,9 @@
f.L1.Syntax = protoreflect.Proto2
case "proto3":
f.L1.Syntax = protoreflect.Proto3
- case "editions":
- f.L1.Syntax = protoreflect.Editions
- f.L1.Edition = fromEditionProto(fd.GetEdition())
default:
return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
}
- if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) {
- return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
- }
f.L1.Path = fd.GetName()
if f.L1.Path == "" {
return nil, errors.New("file path must be populated")
@@ -114,9 +108,6 @@
opts = proto.Clone(opts).(*descriptorpb.FileOptions)
f.L2.Options = func() protoreflect.ProtoMessage { return opts }
}
- if f.L1.Syntax == protoreflect.Editions {
- initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
- }
f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
for _, i := range fd.GetPublicDependency() {
@@ -240,7 +231,7 @@
}
}
-// NewFiles creates a new [protoregistry.Files] from the provided
+// NewFiles creates a new protoregistry.Files from the provided
// FileDescriptorSet message. The descriptor set must include only
// valid files according to protobuf semantics. The returned descriptors
// are a deep copy of the input.
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index aff6fd4..37efda1 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -137,30 +137,6 @@
if fd.JsonName != nil {
f.L1.StringName.InitJSON(fd.GetJsonName())
}
-
- if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
- f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd)
- // We reuse the existing field because the old option `[packed =
- // true]` is mutually exclusive with the editions feature.
- if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
- f.L1.HasPacked = true
- f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd)
- }
-
- // We pretend this option is always explicitly set because the only
- // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8
- // or to return the appropriate default.
- // When using editions we either parse the option or resolve the
- // appropriate default here (instead of later when this option is
- // requested from the descriptor).
- // In proto2/proto3 syntax HasEnforceUTF8 might be false.
- f.L1.HasEnforceUTF8 = true
- f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd)
-
- if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) {
- f.L1.Kind = protoreflect.GroupKind
- }
- }
}
return fs, nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
deleted file mode 100644
index 7352926..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package protodesc
-
-import (
- _ "embed"
- "fmt"
- "os"
- "sync"
-
- "google.golang.org/protobuf/internal/filedesc"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/types/descriptorpb"
-)
-
-const (
- SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2
- SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023
-)
-
-//go:embed editions_defaults.binpb
-var binaryEditionDefaults []byte
-var defaults = &descriptorpb.FeatureSetDefaults{}
-var defaultsCacheMu sync.Mutex
-var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet)
-
-func init() {
- err := proto.Unmarshal(binaryEditionDefaults, defaults)
- if err != nil {
- fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err)
- os.Exit(1)
- }
-}
-
-func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition {
- return filedesc.Edition(epb)
-}
-
-func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
- switch ed {
- case filedesc.EditionUnknown:
- return descriptorpb.Edition_EDITION_UNKNOWN
- case filedesc.EditionProto2:
- return descriptorpb.Edition_EDITION_PROTO2
- case filedesc.EditionProto3:
- return descriptorpb.Edition_EDITION_PROTO3
- case filedesc.Edition2023:
- return descriptorpb.Edition_EDITION_2023
- default:
- panic(fmt.Sprintf("unknown value for edition: %v", ed))
- }
-}
-
-func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
- defaultsCacheMu.Lock()
- defer defaultsCacheMu.Unlock()
- if def, ok := defaultsCache[ed]; ok {
- return def
- }
- edpb := toEditionProto(ed)
- if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb {
- // This should never happen protodesc.(FileOptions).New would fail when
- // initializing the file descriptor.
- // This most likely means the embedded defaults were not updated.
- fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
- os.Exit(1)
- }
- fs := defaults.GetDefaults()[0].GetFeatures()
- // Using a linear search for now.
- // Editions are guaranteed to be sorted and thus we could use a binary search.
- // Given that there are only a handful of editions (with one more per year)
- // there is not much reason to use a binary search.
- for _, def := range defaults.GetDefaults() {
- if def.GetEdition() <= edpb {
- fs = def.GetFeatures()
- } else {
- break
- }
- }
- defaultsCache[ed] = fs
- return fs
-}
-
-func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
- fs := fieldDesc.GetOptions().GetFeatures()
- if fs == nil || fs.FieldPresence == nil {
- return fileDesc.L1.EditionFeatures.IsFieldPresence
- }
- return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
- fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT
-}
-
-func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
- fs := fieldDesc.GetOptions().GetFeatures()
- if fs == nil || fs.RepeatedFieldEncoding == nil {
- return fileDesc.L1.EditionFeatures.IsPacked
- }
- return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED
-}
-
-func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
- fs := fieldDesc.GetOptions().GetFeatures()
- if fs == nil || fs.Utf8Validation == nil {
- return fileDesc.L1.EditionFeatures.IsUTF8Validated
- }
- return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY
-}
-
-func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
- fs := fieldDesc.GetOptions().GetFeatures()
- if fs == nil || fs.MessageEncoding == nil {
- return fileDesc.L1.EditionFeatures.IsDelimitedEncoded
- }
- return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED
-}
-
-// initFileDescFromFeatureSet initializes editions related fields in fd based
-// on fs. If fs is nil it is assumed to be an empty featureset and all fields
-// will be initialized with the appropriate default. fd.L1.Edition must be set
-// before calling this function.
-func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) {
- dfs := getFeatureSetFor(fd.L1.Edition)
- if fs == nil {
- fs = &descriptorpb.FeatureSet{}
- }
-
- var fieldPresence descriptorpb.FeatureSet_FieldPresence
- if fp := fs.FieldPresence; fp != nil {
- fieldPresence = *fp
- } else {
- fieldPresence = *dfs.FieldPresence
- }
- fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
- fieldPresence == descriptorpb.FeatureSet_EXPLICIT
-
- var enumType descriptorpb.FeatureSet_EnumType
- if et := fs.EnumType; et != nil {
- enumType = *et
- } else {
- enumType = *dfs.EnumType
- }
- fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN
-
- var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding
- if rfe := fs.RepeatedFieldEncoding; rfe != nil {
- respeatedFieldEncoding = *rfe
- } else {
- respeatedFieldEncoding = *dfs.RepeatedFieldEncoding
- }
- fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED
-
- var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation
- if utf8val := fs.Utf8Validation; utf8val != nil {
- isUTF8Validated = *utf8val
- } else {
- isUTF8Validated = *dfs.Utf8Validation
- }
- fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY
-
- var messageEncoding descriptorpb.FeatureSet_MessageEncoding
- if me := fs.MessageEncoding; me != nil {
- messageEncoding = *me
- } else {
- messageEncoding = *dfs.MessageEncoding
- }
- fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED
-
- var jsonFormat descriptorpb.FeatureSet_JsonFormat
- if jf := fs.JsonFormat; jf != nil {
- jsonFormat = *jf
- } else {
- jsonFormat = *dfs.JsonFormat
- }
- fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
deleted file mode 100644
index 1a8610a..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
+++ /dev/null
@@ -1,4 +0,0 @@
-
- (0æ
- (0ç
- (0è æ(è
\ No newline at end of file
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index 9d6e054..a7c5cef 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -16,7 +16,7 @@
"google.golang.org/protobuf/types/descriptorpb"
)
-// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a
+// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a
// google.protobuf.FileDescriptorProto message.
func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
p := &descriptorpb.FileDescriptorProto{
@@ -70,13 +70,13 @@
for i, exts := 0, file.Extensions(); i < exts.Len(); i++ {
p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
}
- if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
+ if syntax := file.Syntax(); syntax != protoreflect.Proto2 {
p.Syntax = proto.String(file.Syntax().String())
}
return p
}
-// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a
+// ToDescriptorProto copies a protoreflect.MessageDescriptor into a
// google.protobuf.DescriptorProto message.
func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
p := &descriptorpb.DescriptorProto{
@@ -119,7 +119,7 @@
return p
}
-// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a
+// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a
// google.protobuf.FieldDescriptorProto message.
func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
p := &descriptorpb.FieldDescriptorProto{
@@ -168,7 +168,7 @@
return p
}
-// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a
+// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a
// google.protobuf.OneofDescriptorProto message.
func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
return &descriptorpb.OneofDescriptorProto{
@@ -177,7 +177,7 @@
}
}
-// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a
+// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a
// google.protobuf.EnumDescriptorProto message.
func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
p := &descriptorpb.EnumDescriptorProto{
@@ -200,7 +200,7 @@
return p
}
-// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a
+// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a
// google.protobuf.EnumValueDescriptorProto message.
func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
return &descriptorpb.EnumValueDescriptorProto{
@@ -210,7 +210,7 @@
}
}
-// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a
+// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a
// google.protobuf.ServiceDescriptorProto message.
func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
p := &descriptorpb.ServiceDescriptorProto{
@@ -223,7 +223,7 @@
return p
}
-// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a
+// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a
// google.protobuf.MethodDescriptorProto message.
func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
p := &descriptorpb.MethodDescriptorProto{
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index ec6572d..55aa149 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -10,46 +10,46 @@
//
// # Protocol Buffer Descriptors
//
-// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor])
+// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
// are immutable objects that represent protobuf type information.
// They are wrappers around the messages declared in descriptor.proto.
// Protobuf descriptors alone lack any information regarding Go types.
//
-// Enums and messages generated by this module implement [Enum] and [ProtoMessage],
+// Enums and messages generated by this module implement Enum and ProtoMessage,
// where the Descriptor and ProtoReflect.Descriptor accessors respectively
// return the protobuf descriptor for the values.
//
// The protobuf descriptor interfaces are not meant to be implemented by
// user code since they might need to be extended in the future to support
// additions to the protobuf language.
-// The [google.golang.org/protobuf/reflect/protodesc] package converts between
+// The "google.golang.org/protobuf/reflect/protodesc" package converts between
// google.protobuf.DescriptorProto messages and protobuf descriptors.
//
// # Go Type Descriptors
//
-// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for
+// A type descriptor (e.g., EnumType or MessageType) is a constructor for
// a concrete Go type that represents the associated protobuf descriptor.
// There is commonly a one-to-one relationship between protobuf descriptors and
// Go type descriptors, but it can potentially be a one-to-many relationship.
//
-// Enums and messages generated by this module implement [Enum] and [ProtoMessage],
+// Enums and messages generated by this module implement Enum and ProtoMessage,
// where the Type and ProtoReflect.Type accessors respectively
// return the protobuf descriptor for the values.
//
-// The [google.golang.org/protobuf/types/dynamicpb] package can be used to
+// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
// create Go type descriptors from protobuf descriptors.
//
// # Value Interfaces
//
-// The [Enum] and [Message] interfaces provide a reflective view over an
+// The Enum and Message interfaces provide a reflective view over an
// enum or message instance. For enums, it provides the ability to retrieve
// the enum value number for any concrete enum type. For messages, it provides
// the ability to access or manipulate fields of the message.
//
-// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the
+// To convert a proto.Message to a protoreflect.Message, use the
// former's ProtoReflect method. Since the ProtoReflect method is new to the
// v2 message interface, it may not be present on older message implementations.
-// The [github.com/golang/protobuf/proto.MessageReflect] function can be used
+// The "github.com/golang/protobuf/proto".MessageReflect function can be used
// to obtain a reflective view on older messages.
//
// # Relationships
@@ -71,12 +71,12 @@
// │ │
// └────────────────── Type() ───────┘
//
-// • An [EnumType] describes a concrete Go enum type.
+// • An EnumType describes a concrete Go enum type.
// It has an EnumDescriptor and can construct an Enum instance.
//
-// • An [EnumDescriptor] describes an abstract protobuf enum type.
+// • An EnumDescriptor describes an abstract protobuf enum type.
//
-// • An [Enum] is a concrete enum instance. Generated enums implement Enum.
+// • An Enum is a concrete enum instance. Generated enums implement Enum.
//
// ┌──────────────── New() ─────────────────┐
// │ │
@@ -90,26 +90,24 @@
// │ │
// └─────────────────── Type() ─────────┘
//
-// • A [MessageType] describes a concrete Go message type.
-// It has a [MessageDescriptor] and can construct a [Message] instance.
-// Just as how Go's [reflect.Type] is a reflective description of a Go type,
-// a [MessageType] is a reflective description of a Go type for a protobuf message.
+// • A MessageType describes a concrete Go message type.
+// It has a MessageDescriptor and can construct a Message instance.
+// Just as how Go's reflect.Type is a reflective description of a Go type,
+// a MessageType is a reflective description of a Go type for a protobuf message.
//
-// • A [MessageDescriptor] describes an abstract protobuf message type.
-// It has no understanding of Go types. In order to construct a [MessageType]
-// from just a [MessageDescriptor], you can consider looking up the message type
-// in the global registry using the FindMessageByName method on
-// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes]
-// or constructing a dynamic [MessageType] using
-// [google.golang.org/protobuf/types/dynamicpb.NewMessageType].
+// • A MessageDescriptor describes an abstract protobuf message type.
+// It has no understanding of Go types. In order to construct a MessageType
+// from just a MessageDescriptor, you can consider looking up the message type
+// in the global registry using protoregistry.GlobalTypes.FindMessageByName
+// or constructing a dynamic MessageType using dynamicpb.NewMessageType.
//
-// • A [Message] is a reflective view over a concrete message instance.
-// Generated messages implement [ProtoMessage], which can convert to a [Message].
-// Just as how Go's [reflect.Value] is a reflective view over a Go value,
-// a [Message] is a reflective view over a concrete protobuf message instance.
-// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to
-// calling [reflect.ValueOf], and the [Message.Interface] method is similar to
-// calling [reflect.Value.Interface].
+// • A Message is a reflective view over a concrete message instance.
+// Generated messages implement ProtoMessage, which can convert to a Message.
+// Just as how Go's reflect.Value is a reflective view over a Go value,
+// a Message is a reflective view over a concrete protobuf message instance.
+// Using Go reflection as an analogy, the ProtoReflect method is similar to
+// calling reflect.ValueOf, and the Message.Interface method is similar to
+// calling reflect.Value.Interface.
//
// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐
// │ V │ V
@@ -121,15 +119,15 @@
// │ │
// └────── implements ────────┘
//
-// • An [ExtensionType] describes a concrete Go implementation of an extension.
-// It has an [ExtensionTypeDescriptor] and can convert to/from
-// an abstract [Value] and a Go value.
+// • An ExtensionType describes a concrete Go implementation of an extension.
+// It has an ExtensionTypeDescriptor and can convert to/from
+// abstract Values and Go values.
//
-// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor]
-// which also has an [ExtensionType].
+// • An ExtensionTypeDescriptor is an ExtensionDescriptor
+// which also has an ExtensionType.
//
-// • An [ExtensionDescriptor] describes an abstract protobuf extension field and
-// may not always be an [ExtensionTypeDescriptor].
+// • An ExtensionDescriptor describes an abstract protobuf extension field and
+// may not always be an ExtensionTypeDescriptor.
package protoreflect
import (
@@ -144,7 +142,7 @@
// ProtoMessage is the top-level interface that all proto messages implement.
// This is declared in the protoreflect package to avoid a cyclic dependency;
-// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type.
+// use the proto.Message type instead, which aliases this type.
type ProtoMessage interface{ ProtoReflect() Message }
// Syntax is the language version of the proto file.
@@ -153,9 +151,8 @@
type syntax int8 // keep exact type opaque as the int type may change
const (
- Proto2 Syntax = 2
- Proto3 Syntax = 3
- Editions Syntax = 4
+ Proto2 Syntax = 2
+ Proto3 Syntax = 3
)
// IsValid reports whether the syntax is valid.
@@ -439,7 +436,7 @@
// FullName is a qualified name that uniquely identifies a proto declaration.
// A qualified name is the concatenation of the proto package along with the
// fully-declared name (i.e., name of parent preceding the name of the child),
-// with a '.' delimiter placed between each [Name].
+// with a '.' delimiter placed between each Name.
//
// This should not have any leading or trailing dots.
type FullName string // e.g., "google.protobuf.Field.Kind"
@@ -483,7 +480,7 @@
}
// Name returns the short name, which is the last identifier segment.
-// A single segment FullName is the [Name] itself.
+// A single segment FullName is the Name itself.
func (n FullName) Name() Name {
if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
return Name(n[i+1:])
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index 0c045db..717b106 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -35,7 +35,7 @@
b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
case 12:
b = p.appendSingularField(b, "syntax", nil)
- case 14:
+ case 13:
b = p.appendSingularField(b, "edition", nil)
}
return b
@@ -180,8 +180,6 @@
b = p.appendSingularField(b, "php_metadata_namespace", nil)
case 45:
b = p.appendSingularField(b, "ruby_package", nil)
- case 50:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -242,8 +240,6 @@
b = p.appendSingularField(b, "map_entry", nil)
case 11:
b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
- case 12:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -289,8 +285,6 @@
b = p.appendSingularField(b, "deprecated", nil)
case 6:
b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
- case 7:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -336,8 +330,6 @@
return b
}
switch (*p)[0] {
- case 34:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 33:
b = p.appendSingularField(b, "deprecated", nil)
case 999:
@@ -369,39 +361,16 @@
b = p.appendSingularField(b, "debug_redact", nil)
case 17:
b = p.appendSingularField(b, "retention", nil)
+ case 18:
+ b = p.appendSingularField(b, "target", nil)
case 19:
b = p.appendRepeatedField(b, "targets", nil)
- case 20:
- b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault)
- case 21:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
return b
}
-func (p *SourcePath) appendFeatureSet(b []byte) []byte {
- if len(*p) == 0 {
- return b
- }
- switch (*p)[0] {
- case 1:
- b = p.appendSingularField(b, "field_presence", nil)
- case 2:
- b = p.appendSingularField(b, "enum_type", nil)
- case 3:
- b = p.appendSingularField(b, "repeated_field_encoding", nil)
- case 4:
- b = p.appendSingularField(b, "utf8_validation", nil)
- case 5:
- b = p.appendSingularField(b, "message_encoding", nil)
- case 6:
- b = p.appendSingularField(b, "json_format", nil)
- }
- return b
-}
-
func (p *SourcePath) appendUninterpretedOption(b []byte) []byte {
if len(*p) == 0 {
return b
@@ -453,8 +422,6 @@
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
case 2:
b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration)
- case 50:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 3:
b = p.appendSingularField(b, "verification", nil)
}
@@ -466,8 +433,6 @@
return b
}
switch (*p)[0] {
- case 1:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -481,10 +446,6 @@
switch (*p)[0] {
case 1:
b = p.appendSingularField(b, "deprecated", nil)
- case 2:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
- case 3:
- b = p.appendSingularField(b, "debug_redact", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -500,27 +461,12 @@
b = p.appendSingularField(b, "deprecated", nil)
case 34:
b = p.appendSingularField(b, "idempotency_level", nil)
- case 35:
- b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
return b
}
-func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte {
- if len(*p) == 0 {
- return b
- }
- switch (*p)[0] {
- case 3:
- b = p.appendSingularField(b, "edition", nil)
- case 2:
- b = p.appendSingularField(b, "value", nil)
- }
- return b
-}
-
func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
if len(*p) == 0 {
return b
@@ -545,6 +491,8 @@
b = p.appendSingularField(b, "full_name", nil)
case 3:
b = p.appendSingularField(b, "type", nil)
+ case 4:
+ b = p.appendSingularField(b, "is_repeated", nil)
case 5:
b = p.appendSingularField(b, "reserved", nil)
case 6:
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 60ff62b..3867470 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -12,7 +12,7 @@
// exactly identical. However, it is possible for the same semantically
// identical proto type to be represented by multiple type descriptors.
//
-// For example, suppose we have t1 and t2 which are both an [MessageDescriptor].
+// For example, suppose we have t1 and t2 which are both MessageDescriptors.
// If t1 == t2, then the types are definitely equal and all accessors return
// the same information. However, if t1 != t2, then it is still possible that
// they still represent the same proto type (e.g., t1.FullName == t2.FullName).
@@ -115,7 +115,7 @@
// corresponds with the google.protobuf.FileDescriptorProto message.
//
// Top-level declarations:
-// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor].
+// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor.
type FileDescriptor interface {
Descriptor // Descriptor.FullName is identical to Package
@@ -180,8 +180,8 @@
// corresponds with the google.protobuf.DescriptorProto message.
//
// Nested declarations:
-// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor],
-// and/or [MessageDescriptor].
+// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor,
+// and/or MessageDescriptor.
type MessageDescriptor interface {
Descriptor
@@ -214,7 +214,7 @@
ExtensionRanges() FieldRanges
// ExtensionRangeOptions returns the ith extension range options.
//
- // To avoid a dependency cycle, this method returns a proto.Message] value,
+ // To avoid a dependency cycle, this method returns a proto.Message value,
// which always contains a google.protobuf.ExtensionRangeOptions message.
// This method returns a typed nil-pointer if no options are present.
// The caller must import the descriptorpb package to use this.
@@ -231,9 +231,9 @@
}
type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
-// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation.
+// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
// It is recommended that implementations of this interface also implement the
-// [MessageFieldTypes] interface.
+// MessageFieldTypes interface.
type MessageType interface {
// New returns a newly allocated empty message.
// It may return nil for synthetic messages representing a map entry.
@@ -249,19 +249,19 @@
Descriptor() MessageDescriptor
}
-// MessageFieldTypes extends a [MessageType] by providing type information
+// MessageFieldTypes extends a MessageType by providing type information
// regarding enums and messages referenced by the message fields.
type MessageFieldTypes interface {
MessageType
- // Enum returns the EnumType for the ith field in MessageDescriptor.Fields.
+ // Enum returns the EnumType for the ith field in Descriptor.Fields.
// It returns nil if the ith field is not an enum kind.
// It panics if out of bounds.
//
// Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum()
Enum(i int) EnumType
- // Message returns the MessageType for the ith field in MessageDescriptor.Fields.
+ // Message returns the MessageType for the ith field in Descriptor.Fields.
// It returns nil if the ith field is not a message or group kind.
// It panics if out of bounds.
//
@@ -286,8 +286,8 @@
// corresponds with the google.protobuf.FieldDescriptorProto message.
//
// It is used for both normal fields defined within the parent message
-// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message
-// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]).
+// (e.g., MessageDescriptor.Fields) and fields that extend some remote message
+// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions).
type FieldDescriptor interface {
Descriptor
@@ -344,7 +344,7 @@
// IsMap reports whether this field represents a map,
// where the value type for the associated field is a Map.
// It is equivalent to checking whether Cardinality is Repeated,
- // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true.
+ // that the Kind is MessageKind, and that Message.IsMapEntry reports true.
IsMap() bool
// MapKey returns the field descriptor for the key in the map entry.
@@ -419,7 +419,7 @@
// IsSynthetic reports whether this is a synthetic oneof created to support
// proto3 optional semantics. If true, Fields contains exactly one field
- // with FieldDescriptor.HasOptionalKeyword specified.
+ // with HasOptionalKeyword specified.
IsSynthetic() bool
// Fields is a list of fields belonging to this oneof.
@@ -442,10 +442,10 @@
doNotImplement
}
-// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation.
+// ExtensionDescriptor is an alias of FieldDescriptor for documentation.
type ExtensionDescriptor = FieldDescriptor
-// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType].
+// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType.
type ExtensionTypeDescriptor interface {
ExtensionDescriptor
@@ -470,12 +470,12 @@
doNotImplement
}
-// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete
+// ExtensionType encapsulates an ExtensionDescriptor with a concrete
// Go implementation. The nested field descriptor must be for a extension field.
//
// While a normal field is a member of the parent message that it is declared
-// within (see [Descriptor.Parent]), an extension field is a member of some other
-// target message (see [FieldDescriptor.ContainingMessage]) and may have no
+// within (see Descriptor.Parent), an extension field is a member of some other
+// target message (see ExtensionDescriptor.Extendee) and may have no
// relationship with the parent. However, the full name of an extension field is
// relative to the parent that it is declared within.
//
@@ -532,7 +532,7 @@
// corresponds with the google.protobuf.EnumDescriptorProto message.
//
// Nested declarations:
-// [EnumValueDescriptor].
+// EnumValueDescriptor.
type EnumDescriptor interface {
Descriptor
@@ -548,7 +548,7 @@
}
type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
-// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation.
+// EnumType encapsulates an EnumDescriptor with a concrete Go implementation.
type EnumType interface {
// New returns an instance of this enum type with its value set to n.
New(n EnumNumber) Enum
@@ -610,7 +610,7 @@
// ServiceDescriptor describes a service and
// corresponds with the google.protobuf.ServiceDescriptorProto message.
//
-// Nested declarations: [MethodDescriptor].
+// Nested declarations: MethodDescriptor.
type ServiceDescriptor interface {
Descriptor
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a7b0d06..37601b7 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -27,16 +27,16 @@
// Message is a reflective interface for a concrete message value,
// encapsulating both type and value information for the message.
//
-// Accessor/mutators for individual fields are keyed by [FieldDescriptor].
+// Accessor/mutators for individual fields are keyed by FieldDescriptor.
// For non-extension fields, the descriptor must exactly match the
// field known by the parent message.
-// For extension fields, the descriptor must implement [ExtensionTypeDescriptor],
-// extend the parent message (i.e., have the same message [FullName]), and
+// For extension fields, the descriptor must implement ExtensionTypeDescriptor,
+// extend the parent message (i.e., have the same message FullName), and
// be within the parent's extension range.
//
-// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]).
-// See [Value] for the Go types associated with a [FieldDescriptor].
-// Providing a [Value] that is invalid or of an incorrect type panics.
+// Each field Value can be a scalar or a composite type (Message, List, or Map).
+// See Value for the Go types associated with a FieldDescriptor.
+// Providing a Value that is invalid or of an incorrect type panics.
type Message interface {
// Descriptor returns message descriptor, which contains only the protobuf
// type information for the message.
@@ -152,7 +152,7 @@
// This method may return nil.
//
// The returned methods type is identical to
- // google.golang.org/protobuf/runtime/protoiface.Methods.
+ // "google.golang.org/protobuf/runtime/protoiface".Methods.
// Consult the protoiface package documentation for details.
ProtoMethods() *methods
}
@@ -175,8 +175,8 @@
}
// List is a zero-indexed, ordered list.
-// The element [Value] type is determined by [FieldDescriptor.Kind].
-// Providing a [Value] that is invalid or of an incorrect type panics.
+// The element Value type is determined by FieldDescriptor.Kind.
+// Providing a Value that is invalid or of an incorrect type panics.
type List interface {
// Len reports the number of entries in the List.
// Get, Set, and Truncate panic with out of bound indexes.
@@ -226,9 +226,9 @@
}
// Map is an unordered, associative map.
-// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind.
-// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind.
-// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics.
+// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind.
+// The entry Value type is determined by FieldDescriptor.MapValue.Kind.
+// Providing a MapKey or Value that is invalid or of an incorrect type panics.
type Map interface {
// Len reports the number of elements in the map.
Len() int
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
index 654599d..5916525 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
@@ -24,19 +24,19 @@
// Unlike the == operator, a NaN is equal to another NaN.
//
// - Enums are equal if they contain the same number.
-// Since [Value] does not contain an enum descriptor,
+// Since Value does not contain an enum descriptor,
// enum values do not consider the type of the enum.
//
// - Other scalar values are equal if they contain the same value.
//
-// - [Message] values are equal if they belong to the same message descriptor,
+// - Message values are equal if they belong to the same message descriptor,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
-// - [List] values are equal if they are the same length and
+// - Lists are equal if they are the same length and
// each corresponding element is equal.
//
-// - [Map] values are equal if they have the same set of keys and
+// - Maps are equal if they have the same set of keys and
// the corresponding value for each key is equal.
func (v1 Value) Equal(v2 Value) bool {
return equalValue(v1, v2)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index 1603097..08e5ef7 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -11,7 +11,7 @@
// Value is a union where only one Go type may be set at a time.
// The Value is used to represent all possible values a field may take.
-// The following shows which Go type is used to represent each proto [Kind]:
+// The following shows which Go type is used to represent each proto Kind:
//
// ╔════════════╤═════════════════════════════════════╗
// ║ Go type │ Protobuf kind ║
@@ -31,22 +31,22 @@
//
// Multiple protobuf Kinds may be represented by a single Go type if the type
// can losslessly represent the information for the proto kind. For example,
-// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64,
+// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64,
// but use different integer encoding methods.
//
-// The [List] or [Map] types are used if the field cardinality is repeated.
-// A field is a [List] if [FieldDescriptor.IsList] reports true.
-// A field is a [Map] if [FieldDescriptor.IsMap] reports true.
+// The List or Map types are used if the field cardinality is repeated.
+// A field is a List if FieldDescriptor.IsList reports true.
+// A field is a Map if FieldDescriptor.IsMap reports true.
//
// Converting to/from a Value and a concrete Go value panics on type mismatch.
-// For example, [ValueOf]("hello").Int() panics because this attempts to
+// For example, ValueOf("hello").Int() panics because this attempts to
// retrieve an int64 from a string.
//
-// [List], [Map], and [Message] Values are called "composite" values.
+// List, Map, and Message Values are called "composite" values.
//
// A composite Value may alias (reference) memory at some location,
// such that changes to the Value updates the that location.
-// A composite value acquired with a Mutable method, such as [Message.Mutable],
+// A composite value acquired with a Mutable method, such as Message.Mutable,
// always references the source object.
//
// For example:
@@ -65,7 +65,7 @@
// // appending to the List here may or may not modify the message.
// list.Append(protoreflect.ValueOfInt32(0))
//
-// Some operations, such as [Message.Get], may return an "empty, read-only"
+// Some operations, such as Message.Get, may return an "empty, read-only"
// composite Value. Modifying an empty, read-only value panics.
type Value value
@@ -306,7 +306,7 @@
}
}
-// String returns v as a string. Since this method implements [fmt.Stringer],
+// String returns v as a string. Since this method implements fmt.Stringer,
// this returns the formatted string value for any non-string type.
func (v Value) String() string {
switch v.typ {
@@ -327,7 +327,7 @@
}
}
-// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber].
+// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber.
func (v Value) Enum() EnumNumber {
switch v.typ {
case enumType:
@@ -337,7 +337,7 @@
}
}
-// Message returns v as a [Message] and panics if the type is not a [Message].
+// Message returns v as a Message and panics if the type is not a Message.
func (v Value) Message() Message {
switch vi := v.getIface().(type) {
case Message:
@@ -347,7 +347,7 @@
}
}
-// List returns v as a [List] and panics if the type is not a [List].
+// List returns v as a List and panics if the type is not a List.
func (v Value) List() List {
switch vi := v.getIface().(type) {
case List:
@@ -357,7 +357,7 @@
}
}
-// Map returns v as a [Map] and panics if the type is not a [Map].
+// Map returns v as a Map and panics if the type is not a Map.
func (v Value) Map() Map {
switch vi := v.getIface().(type) {
case Map:
@@ -367,7 +367,7 @@
}
}
-// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types.
+// MapKey returns v as a MapKey and panics for invalid MapKey types.
func (v Value) MapKey() MapKey {
switch v.typ {
case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
@@ -378,8 +378,8 @@
}
// MapKey is used to index maps, where the Go type of the MapKey must match
-// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]).
-// The following shows what Go type is used to represent each proto [Kind]:
+// the specified key Kind (see MessageDescriptor.IsMapEntry).
+// The following shows what Go type is used to represent each proto Kind:
//
// ╔═════════╤═════════════════════════════════════╗
// ║ Go type │ Protobuf kind ║
@@ -392,13 +392,13 @@
// ║ string │ StringKind ║
// ╚═════════╧═════════════════════════════════════╝
//
-// A MapKey is constructed and accessed through a [Value]:
+// A MapKey is constructed and accessed through a Value:
//
// k := ValueOf("hash").MapKey() // convert string to MapKey
// s := k.String() // convert MapKey to string
//
-// The MapKey is a strict subset of valid types used in [Value];
-// converting a [Value] to a MapKey with an invalid type panics.
+// The MapKey is a strict subset of valid types used in Value;
+// converting a Value to a MapKey with an invalid type panics.
type MapKey value
// IsValid reports whether k is populated with a value.
@@ -426,13 +426,13 @@
return Value(k).Uint()
}
-// String returns k as a string. Since this method implements [fmt.Stringer],
+// String returns k as a string. Since this method implements fmt.Stringer,
// this returns the formatted string value for any non-string type.
func (k MapKey) String() string {
return Value(k).String()
}
-// Value returns k as a [Value].
+// Value returns k as a Value.
func (k MapKey) Value() Value {
return Value(k)
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
similarity index 96%
rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
index b1fdbe3..702ddf2 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !purego && !appengine
+// +build !purego,!appengine
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
deleted file mode 100644
index 4354701..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
-
-package protoreflect
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/internal/pragma"
-)
-
-type (
- ifaceHeader struct {
- _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
- Type unsafe.Pointer
- Data unsafe.Pointer
- }
-)
-
-var (
- nilType = typeOf(nil)
- boolType = typeOf(*new(bool))
- int32Type = typeOf(*new(int32))
- int64Type = typeOf(*new(int64))
- uint32Type = typeOf(*new(uint32))
- uint64Type = typeOf(*new(uint64))
- float32Type = typeOf(*new(float32))
- float64Type = typeOf(*new(float64))
- stringType = typeOf(*new(string))
- bytesType = typeOf(*new([]byte))
- enumType = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
- return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
- pragma.DoNotCompare // 0B
-
- // typ stores the type of the value as a pointer to the Go type.
- typ unsafe.Pointer // 8B
-
- // ptr stores the data pointer for a String, Bytes, or interface value.
- ptr unsafe.Pointer // 8B
-
- // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
- // Enum value as a raw uint64.
- //
- // It is also used to store the length of a String or Bytes value;
- // the capacity is ignored.
- num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
-}
-func valueOfIface(v interface{}) Value {
- p := (*ifaceHeader)(unsafe.Pointer(&v))
- return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() string {
- return unsafe.String((*byte)(v.ptr), v.num)
-}
-func (v Value) getBytes() []byte {
- return unsafe.Slice((*byte)(v.ptr), v.num)
-}
-func (v Value) getIface() (x interface{}) {
- *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
- return x
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 6267dc5..aeb5597 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -5,12 +5,12 @@
// Package protoregistry provides data structures to register and lookup
// protobuf descriptor types.
//
-// The [Files] registry contains file descriptors and provides the ability
+// The Files registry contains file descriptors and provides the ability
// to iterate over the files or lookup a specific descriptor within the files.
-// [Files] only contains protobuf descriptors and has no understanding of Go
+// Files only contains protobuf descriptors and has no understanding of Go
// type information that may be associated with each descriptor.
//
-// The [Types] registry contains descriptor types for which there is a known
+// The Types registry contains descriptor types for which there is a known
// Go type associated with that descriptor. It provides the ability to iterate
// over the registered types or lookup a type by name.
package protoregistry
@@ -218,7 +218,7 @@
// FindDescriptorByName looks up a descriptor by the full name.
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
if r == nil {
return nil, NotFound
@@ -310,7 +310,7 @@
// FindFileByPath looks up a file by the path.
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
// This returns an error if multiple files have the same path.
func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
if r == nil {
@@ -431,7 +431,7 @@
// A compliant implementation must deterministically return the same type
// if no error is encountered.
//
-// The [Types] type implements this interface.
+// The Types type implements this interface.
type MessageTypeResolver interface {
// FindMessageByName looks up a message by its full name.
// E.g., "google.protobuf.Any"
@@ -451,7 +451,7 @@
// A compliant implementation must deterministically return the same type
// if no error is encountered.
//
-// The [Types] type implements this interface.
+// The Types type implements this interface.
type ExtensionTypeResolver interface {
// FindExtensionByName looks up a extension field by the field's full name.
// Note that this is the full name of the field as determined by
@@ -590,7 +590,7 @@
// FindEnumByName looks up an enum by its full name.
// E.g., "google.protobuf.Field.Kind".
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) {
if r == nil {
return nil, NotFound
@@ -611,7 +611,7 @@
// FindMessageByName looks up a message by its full name,
// e.g. "google.protobuf.Any".
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
if r == nil {
return nil, NotFound
@@ -632,7 +632,7 @@
// FindMessageByURL looks up a message by a URL identifier.
// See documentation on google.protobuf.Any.type_url for the URL format.
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
// This function is similar to FindMessageByName but
// truncates anything before and including '/' in the URL.
@@ -662,7 +662,7 @@
// where the extension is declared and is unrelated to the full name of the
// message being extended.
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if r == nil {
return nil, NotFound
@@ -703,7 +703,7 @@
// FindExtensionByNumber looks up a extension field by the field number
// within some parent message, identified by full name.
//
-// This returns (nil, [NotFound]) if not found.
+// This returns (nil, NotFound) if not found.
func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if r == nil {
return nil, NotFound
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 38daa85..04c00f7 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -48,94 +48,6 @@
sync "sync"
)
-// The full set of known editions.
-type Edition int32
-
-const (
- // A placeholder for an unknown edition value.
- Edition_EDITION_UNKNOWN Edition = 0
- // Legacy syntax "editions". These pre-date editions, but behave much like
- // distinct editions. These can't be used to specify the edition of proto
- // files, but feature definitions must supply proto2/proto3 defaults for
- // backwards compatibility.
- Edition_EDITION_PROTO2 Edition = 998
- Edition_EDITION_PROTO3 Edition = 999
- // Editions that have been released. The specific values are arbitrary and
- // should not be depended on, but they will always be time-ordered for easy
- // comparison.
- Edition_EDITION_2023 Edition = 1000
- // Placeholder editions for testing feature resolution. These should not be
- // used or relyed on outside of tests.
- Edition_EDITION_1_TEST_ONLY Edition = 1
- Edition_EDITION_2_TEST_ONLY Edition = 2
- Edition_EDITION_99997_TEST_ONLY Edition = 99997
- Edition_EDITION_99998_TEST_ONLY Edition = 99998
- Edition_EDITION_99999_TEST_ONLY Edition = 99999
-)
-
-// Enum value maps for Edition.
-var (
- Edition_name = map[int32]string{
- 0: "EDITION_UNKNOWN",
- 998: "EDITION_PROTO2",
- 999: "EDITION_PROTO3",
- 1000: "EDITION_2023",
- 1: "EDITION_1_TEST_ONLY",
- 2: "EDITION_2_TEST_ONLY",
- 99997: "EDITION_99997_TEST_ONLY",
- 99998: "EDITION_99998_TEST_ONLY",
- 99999: "EDITION_99999_TEST_ONLY",
- }
- Edition_value = map[string]int32{
- "EDITION_UNKNOWN": 0,
- "EDITION_PROTO2": 998,
- "EDITION_PROTO3": 999,
- "EDITION_2023": 1000,
- "EDITION_1_TEST_ONLY": 1,
- "EDITION_2_TEST_ONLY": 2,
- "EDITION_99997_TEST_ONLY": 99997,
- "EDITION_99998_TEST_ONLY": 99998,
- "EDITION_99999_TEST_ONLY": 99999,
- }
-)
-
-func (x Edition) Enum() *Edition {
- p := new(Edition)
- *p = x
- return p
-}
-
-func (x Edition) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Edition) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
-}
-
-func (Edition) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[0]
-}
-
-func (x Edition) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *Edition) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = Edition(num)
- return nil
-}
-
-// Deprecated: Use Edition.Descriptor instead.
-func (Edition) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
-}
-
// The verification state of the extension range.
type ExtensionRangeOptions_VerificationState int32
@@ -168,11 +80,11 @@
}
func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
}
func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[1]
+ return &file_google_protobuf_descriptor_proto_enumTypes[0]
}
func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -213,10 +125,9 @@
FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
// Tag-delimited aggregate.
- // Group type is deprecated and not supported after google.protobuf. However, Proto3
+ // Group type is deprecated and not supported in proto3. However, Proto3
// implementations should still be able to parse the group wire format and
- // treat group fields as unknown fields. In Editions, the group wire format
- // can be enabled via the `message_encoding` feature.
+ // treat group fields as unknown fields.
FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate.
// New in version 2.
@@ -284,11 +195,11 @@
}
func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
}
func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[2]
+ return &file_google_protobuf_descriptor_proto_enumTypes[1]
}
func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -315,24 +226,21 @@
const (
// 0 is reserved for errors
FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
- FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
- // The required label is only allowed in google.protobuf. In proto3 and Editions
- // it's explicitly prohibited. In Editions, the `field_presence` feature
- // can be used to get this behavior.
FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+ FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
)
// Enum value maps for FieldDescriptorProto_Label.
var (
FieldDescriptorProto_Label_name = map[int32]string{
1: "LABEL_OPTIONAL",
- 3: "LABEL_REPEATED",
2: "LABEL_REQUIRED",
+ 3: "LABEL_REPEATED",
}
FieldDescriptorProto_Label_value = map[string]int32{
"LABEL_OPTIONAL": 1,
- "LABEL_REPEATED": 3,
"LABEL_REQUIRED": 2,
+ "LABEL_REPEATED": 3,
}
)
@@ -347,11 +255,11 @@
}
func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
}
func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[3]
+ return &file_google_protobuf_descriptor_proto_enumTypes[2]
}
func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -408,11 +316,11 @@
}
func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
}
func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[4]
+ return &file_google_protobuf_descriptor_proto_enumTypes[3]
}
func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -474,11 +382,11 @@
}
func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
}
func (FieldOptions_CType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[5]
+ return &file_google_protobuf_descriptor_proto_enumTypes[4]
}
func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -536,11 +444,11 @@
}
func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
}
func (FieldOptions_JSType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[6]
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
}
func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -598,11 +506,11 @@
}
func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
}
func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[7]
+ return &file_google_protobuf_descriptor_proto_enumTypes[6]
}
func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -682,11 +590,11 @@
}
func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
}
func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[8]
+ return &file_google_protobuf_descriptor_proto_enumTypes[7]
}
func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -744,11 +652,11 @@
}
func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
}
func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[9]
+ return &file_google_protobuf_descriptor_proto_enumTypes[8]
}
func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -770,363 +678,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0}
}
-type FeatureSet_FieldPresence int32
-
-const (
- FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0
- FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1
- FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2
- FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3
-)
-
-// Enum value maps for FeatureSet_FieldPresence.
-var (
- FeatureSet_FieldPresence_name = map[int32]string{
- 0: "FIELD_PRESENCE_UNKNOWN",
- 1: "EXPLICIT",
- 2: "IMPLICIT",
- 3: "LEGACY_REQUIRED",
- }
- FeatureSet_FieldPresence_value = map[string]int32{
- "FIELD_PRESENCE_UNKNOWN": 0,
- "EXPLICIT": 1,
- "IMPLICIT": 2,
- "LEGACY_REQUIRED": 3,
- }
-)
-
-func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence {
- p := new(FeatureSet_FieldPresence)
- *p = x
- return p
-}
-
-func (x FeatureSet_FieldPresence) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
-}
-
-func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[10]
-}
-
-func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = FeatureSet_FieldPresence(num)
- return nil
-}
-
-// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead.
-func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
-}
-
-type FeatureSet_EnumType int32
-
-const (
- FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0
- FeatureSet_OPEN FeatureSet_EnumType = 1
- FeatureSet_CLOSED FeatureSet_EnumType = 2
-)
-
-// Enum value maps for FeatureSet_EnumType.
-var (
- FeatureSet_EnumType_name = map[int32]string{
- 0: "ENUM_TYPE_UNKNOWN",
- 1: "OPEN",
- 2: "CLOSED",
- }
- FeatureSet_EnumType_value = map[string]int32{
- "ENUM_TYPE_UNKNOWN": 0,
- "OPEN": 1,
- "CLOSED": 2,
- }
-)
-
-func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType {
- p := new(FeatureSet_EnumType)
- *p = x
- return p
-}
-
-func (x FeatureSet_EnumType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
-}
-
-func (FeatureSet_EnumType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[11]
-}
-
-func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = FeatureSet_EnumType(num)
- return nil
-}
-
-// Deprecated: Use FeatureSet_EnumType.Descriptor instead.
-func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1}
-}
-
-type FeatureSet_RepeatedFieldEncoding int32
-
-const (
- FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0
- FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1
- FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2
-)
-
-// Enum value maps for FeatureSet_RepeatedFieldEncoding.
-var (
- FeatureSet_RepeatedFieldEncoding_name = map[int32]string{
- 0: "REPEATED_FIELD_ENCODING_UNKNOWN",
- 1: "PACKED",
- 2: "EXPANDED",
- }
- FeatureSet_RepeatedFieldEncoding_value = map[string]int32{
- "REPEATED_FIELD_ENCODING_UNKNOWN": 0,
- "PACKED": 1,
- "EXPANDED": 2,
- }
-)
-
-func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding {
- p := new(FeatureSet_RepeatedFieldEncoding)
- *p = x
- return p
-}
-
-func (x FeatureSet_RepeatedFieldEncoding) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
-}
-
-func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[12]
-}
-
-func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = FeatureSet_RepeatedFieldEncoding(num)
- return nil
-}
-
-// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead.
-func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2}
-}
-
-type FeatureSet_Utf8Validation int32
-
-const (
- FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0
- FeatureSet_NONE FeatureSet_Utf8Validation = 1
- FeatureSet_VERIFY FeatureSet_Utf8Validation = 2
-)
-
-// Enum value maps for FeatureSet_Utf8Validation.
-var (
- FeatureSet_Utf8Validation_name = map[int32]string{
- 0: "UTF8_VALIDATION_UNKNOWN",
- 1: "NONE",
- 2: "VERIFY",
- }
- FeatureSet_Utf8Validation_value = map[string]int32{
- "UTF8_VALIDATION_UNKNOWN": 0,
- "NONE": 1,
- "VERIFY": 2,
- }
-)
-
-func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation {
- p := new(FeatureSet_Utf8Validation)
- *p = x
- return p
-}
-
-func (x FeatureSet_Utf8Validation) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
-}
-
-func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[13]
-}
-
-func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = FeatureSet_Utf8Validation(num)
- return nil
-}
-
-// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead.
-func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3}
-}
-
-type FeatureSet_MessageEncoding int32
-
-const (
- FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0
- FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1
- FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2
-)
-
-// Enum value maps for FeatureSet_MessageEncoding.
-var (
- FeatureSet_MessageEncoding_name = map[int32]string{
- 0: "MESSAGE_ENCODING_UNKNOWN",
- 1: "LENGTH_PREFIXED",
- 2: "DELIMITED",
- }
- FeatureSet_MessageEncoding_value = map[string]int32{
- "MESSAGE_ENCODING_UNKNOWN": 0,
- "LENGTH_PREFIXED": 1,
- "DELIMITED": 2,
- }
-)
-
-func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding {
- p := new(FeatureSet_MessageEncoding)
- *p = x
- return p
-}
-
-func (x FeatureSet_MessageEncoding) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
-}
-
-func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[14]
-}
-
-func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = FeatureSet_MessageEncoding(num)
- return nil
-}
-
-// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead.
-func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4}
-}
-
-type FeatureSet_JsonFormat int32
-
-const (
- FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0
- FeatureSet_ALLOW FeatureSet_JsonFormat = 1
- FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2
-)
-
-// Enum value maps for FeatureSet_JsonFormat.
-var (
- FeatureSet_JsonFormat_name = map[int32]string{
- 0: "JSON_FORMAT_UNKNOWN",
- 1: "ALLOW",
- 2: "LEGACY_BEST_EFFORT",
- }
- FeatureSet_JsonFormat_value = map[string]int32{
- "JSON_FORMAT_UNKNOWN": 0,
- "ALLOW": 1,
- "LEGACY_BEST_EFFORT": 2,
- }
-)
-
-func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat {
- p := new(FeatureSet_JsonFormat)
- *p = x
- return p
-}
-
-func (x FeatureSet_JsonFormat) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
-}
-
-func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[15]
-}
-
-func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Do not use.
-func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error {
- num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
- if err != nil {
- return err
- }
- *x = FeatureSet_JsonFormat(num)
- return nil
-}
-
-// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead.
-func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
-}
-
// Represents the identified object's effect on the element in the original
// .proto file.
type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1165,11 +716,11 @@
}
func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
}
func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[16]
+ return &file_google_protobuf_descriptor_proto_enumTypes[9]
}
func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1188,7 +739,7 @@
// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead.
func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0}
}
// The protocol compiler can output a FileDescriptorSet containing the .proto
@@ -1271,8 +822,8 @@
//
// If `edition` is present, this value must be "editions".
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
- // The edition of the proto file.
- Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ // The edition of the proto file, which is an opaque string.
+ Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"`
}
func (x *FileDescriptorProto) Reset() {
@@ -1391,11 +942,11 @@
return ""
}
-func (x *FileDescriptorProto) GetEdition() Edition {
+func (x *FileDescriptorProto) GetEdition() string {
if x != nil && x.Edition != nil {
return *x.Edition
}
- return Edition_EDITION_UNKNOWN
+ return ""
}
// Describes a message type.
@@ -1528,14 +1079,13 @@
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
- // For external users: DO NOT USE. We are in the process of open sourcing
- // extension declaration and executing internal cleanups before it can be
- // used externally.
+ // go/protobuf-stripping-extension-declarations
+ // Like Metadata, but we use a repeated field to hold all extension
+ // declarations. This should avoid the size increases of transforming a large
+ // extension range into small ranges in generated binaries.
Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The verification state of the range.
- // TODO: flip the default to DECLARATION once all empty ranges
+ // TODO(b/278783756): flip the default to DECLARATION once all empty ranges
// are marked as UNVERIFIED.
Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
}
@@ -1591,13 +1141,6 @@
return nil
}
-func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState {
if x != nil && x.Verification != nil {
return *x.Verification
@@ -2229,8 +1772,6 @@
// is empty. When this option is not set, the package name will be used for
// determining the ruby package.
RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2422,13 +1963,6 @@
return ""
}
-func (x *FileOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2505,13 +2039,11 @@
// This should only be used as a temporary measure against broken builds due
// to the change in behavior for JSON field name conflicts.
//
- // TODO This is legacy behavior we plan to remove once downstream
+ // TODO(b/261750190) This is legacy behavior we plan to remove once downstream
// teams have had time to migrate.
//
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2591,13 +2123,6 @@
return false
}
-func (x *MessageOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2622,9 +2147,7 @@
// a more efficient representation on the wire. Rather than repeatedly
// writing the tag and type for each element, the entire array is encoded as
// a single length-delimited blob. In proto3, only explicit setting it to
- // false will avoid using packed encoding. This option is prohibited in
- // Editions, but the `repeated_field_encoding` feature can be used to control
- // the behavior.
+ // false will avoid using packed encoding.
Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
@@ -2682,12 +2205,11 @@
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// Indicate that the field value should not be printed out when using debug
// formats, e.g. when the field contains sensitive credentials.
- DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
- Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
- Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
- EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
+ DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+ Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+ Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"`
+ Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2798,6 +2320,14 @@
return FieldOptions_RETENTION_UNKNOWN
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType {
+ if x != nil && x.Target != nil {
+ return *x.Target
+ }
+ return FieldOptions_TARGET_TYPE_UNKNOWN
+}
+
func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType {
if x != nil {
return x.Targets
@@ -2805,20 +2335,6 @@
return nil
}
-func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault {
- if x != nil {
- return x.EditionDefaults
- }
- return nil
-}
-
-func (x *FieldOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2832,8 +2348,6 @@
unknownFields protoimpl.UnknownFields
extensionFields protoimpl.ExtensionFields
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2870,13 +2384,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13}
}
-func (x *OneofOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2902,13 +2409,11 @@
// and strips underscored from the fields before comparison in proto3 only.
// The new behavior takes `json_name` into account and applies to proto2 as
// well.
- // TODO Remove this legacy behavior once downstream teams have
+ // TODO(b/261750190) Remove this legacy behavior once downstream teams have
// had time to migrate.
//
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2972,13 +2477,6 @@
return false
}
-func (x *EnumOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2997,20 +2495,13 @@
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
- // Indicate that fields annotated with this enum value should not be printed
- // out when using debug formats, e.g. when the field contains sensitive
- // credentials.
- DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
// Default values for EnumValueOptions fields.
const (
- Default_EnumValueOptions_Deprecated = bool(false)
- Default_EnumValueOptions_DebugRedact = bool(false)
+ Default_EnumValueOptions_Deprecated = bool(false)
)
func (x *EnumValueOptions) Reset() {
@@ -3052,20 +2543,6 @@
return Default_EnumValueOptions_Deprecated
}
-func (x *EnumValueOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
-func (x *EnumValueOptions) GetDebugRedact() bool {
- if x != nil && x.DebugRedact != nil {
- return *x.DebugRedact
- }
- return Default_EnumValueOptions_DebugRedact
-}
-
func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -3079,8 +2556,6 @@
unknownFields protoimpl.UnknownFields
extensionFields protoimpl.ExtensionFields
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
@@ -3127,13 +2602,6 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16}
}
-func (x *ServiceOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *ServiceOptions) GetDeprecated() bool {
if x != nil && x.Deprecated != nil {
return *x.Deprecated
@@ -3160,8 +2628,6 @@
// this is a formalization for deprecating methods.
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
- // Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -3218,13 +2684,6 @@
return Default_MethodOptions_IdempotencyLevel
}
-func (x *MethodOptions) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -3335,171 +2794,6 @@
return ""
}
-// TODO Enums in C++ gencode (and potentially other languages) are
-// not well scoped. This means that each of the feature enums below can clash
-// with each other. The short names we've chosen maximize call-site
-// readability, but leave us very open to this scenario. A future feature will
-// be designed and implemented to handle this, hopefully before we ever hit a
-// conflict here.
-type FeatureSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
- FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
- EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
- RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
- Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
- MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
- JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
-}
-
-func (x *FeatureSet) Reset() {
- *x = FeatureSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FeatureSet) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FeatureSet) ProtoMessage() {}
-
-func (x *FeatureSet) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead.
-func (*FeatureSet) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence {
- if x != nil && x.FieldPresence != nil {
- return *x.FieldPresence
- }
- return FeatureSet_FIELD_PRESENCE_UNKNOWN
-}
-
-func (x *FeatureSet) GetEnumType() FeatureSet_EnumType {
- if x != nil && x.EnumType != nil {
- return *x.EnumType
- }
- return FeatureSet_ENUM_TYPE_UNKNOWN
-}
-
-func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding {
- if x != nil && x.RepeatedFieldEncoding != nil {
- return *x.RepeatedFieldEncoding
- }
- return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
-}
-
-func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation {
- if x != nil && x.Utf8Validation != nil {
- return *x.Utf8Validation
- }
- return FeatureSet_UTF8_VALIDATION_UNKNOWN
-}
-
-func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding {
- if x != nil && x.MessageEncoding != nil {
- return *x.MessageEncoding
- }
- return FeatureSet_MESSAGE_ENCODING_UNKNOWN
-}
-
-func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
- if x != nil && x.JsonFormat != nil {
- return *x.JsonFormat
- }
- return FeatureSet_JSON_FORMAT_UNKNOWN
-}
-
-// A compiled specification for the defaults of a set of features. These
-// messages are generated from FeatureSet extensions and can be used to seed
-// feature resolution. The resolution with this object becomes a simple search
-// for the closest matching edition, followed by proto merges.
-type FeatureSetDefaults struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
- // The minimum supported edition (inclusive) when this was constructed.
- // Editions before this will not have defaults.
- MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"`
- // The maximum known edition (inclusive) when this was constructed. Editions
- // after this will not have reliable defaults.
- MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
-}
-
-func (x *FeatureSetDefaults) Reset() {
- *x = FeatureSetDefaults{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FeatureSetDefaults) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FeatureSetDefaults) ProtoMessage() {}
-
-func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead.
-func (*FeatureSetDefaults) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault {
- if x != nil {
- return x.Defaults
- }
- return nil
-}
-
-func (x *FeatureSetDefaults) GetMinimumEdition() Edition {
- if x != nil && x.MinimumEdition != nil {
- return *x.MinimumEdition
- }
- return Edition_EDITION_UNKNOWN
-}
-
-func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
- if x != nil && x.MaximumEdition != nil {
- return *x.MaximumEdition
- }
- return Edition_EDITION_UNKNOWN
-}
-
// Encapsulates information about the original source file from which a
// FileDescriptorProto was generated.
type SourceCodeInfo struct {
@@ -3561,7 +2855,7 @@
func (x *SourceCodeInfo) Reset() {
*x = SourceCodeInfo{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3574,7 +2868,7 @@
func (*SourceCodeInfo) ProtoMessage() {}
func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3587,7 +2881,7 @@
// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead.
func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
}
func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
@@ -3613,7 +2907,7 @@
func (x *GeneratedCodeInfo) Reset() {
*x = GeneratedCodeInfo{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3626,7 +2920,7 @@
func (*GeneratedCodeInfo) ProtoMessage() {}
func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3639,7 +2933,7 @@
// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead.
func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
}
func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
@@ -3662,7 +2956,7 @@
func (x *DescriptorProto_ExtensionRange) Reset() {
*x = DescriptorProto_ExtensionRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3675,7 +2969,7 @@
func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3727,7 +3021,7 @@
func (x *DescriptorProto_ReservedRange) Reset() {
*x = DescriptorProto_ReservedRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3740,7 +3034,7 @@
func (*DescriptorProto_ReservedRange) ProtoMessage() {}
func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3784,6 +3078,10 @@
// Metadata.type, Declaration.type must have a leading dot for messages
// and enums.
Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"`
+ // Deprecated. Please use "repeated".
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+ IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"`
// If true, indicates that the number is reserved in the extension range,
// and any extension field with the number will fail to compile. Set this
// when a declared extension field is deleted.
@@ -3796,7 +3094,7 @@
func (x *ExtensionRangeOptions_Declaration) Reset() {
*x = ExtensionRangeOptions_Declaration{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3809,7 +3107,7 @@
func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3846,6 +3144,14 @@
return ""
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool {
+ if x != nil && x.IsRepeated != nil {
+ return *x.IsRepeated
+ }
+ return false
+}
+
func (x *ExtensionRangeOptions_Declaration) GetReserved() bool {
if x != nil && x.Reserved != nil {
return *x.Reserved
@@ -3878,7 +3184,7 @@
func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
*x = EnumDescriptorProto_EnumReservedRange{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3891,7 +3197,7 @@
func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3921,61 +3227,6 @@
return 0
}
-type FieldOptions_EditionDefault struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
-}
-
-func (x *FieldOptions_EditionDefault) Reset() {
- *x = FieldOptions_EditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FieldOptions_EditionDefault) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FieldOptions_EditionDefault) ProtoMessage() {}
-
-func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead.
-func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0}
-}
-
-func (x *FieldOptions_EditionDefault) GetEdition() Edition {
- if x != nil && x.Edition != nil {
- return *x.Edition
- }
- return Edition_EDITION_UNKNOWN
-}
-
-func (x *FieldOptions_EditionDefault) GetValue() string {
- if x != nil && x.Value != nil {
- return *x.Value
- }
- return ""
-}
-
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
@@ -3993,7 +3244,7 @@
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4006,7 +3257,7 @@
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4036,65 +3287,6 @@
return false
}
-// A map from every known edition with a unique set of defaults to its
-// defaults. Not all editions may be contained here. For a given edition,
-// the defaults at the closest matching edition ordered at or before it should
-// be used. This field must be in strict ascending order by edition.
-type FeatureSetDefaults_FeatureSetEditionDefault struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
- Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
-}
-
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
- *x = FeatureSetDefaults_FeatureSetEditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
-
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead.
-func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
-}
-
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition {
- if x != nil && x.Edition != nil {
- return *x.Edition
- }
- return Edition_EDITION_UNKNOWN
-}
-
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet {
- if x != nil {
- return x.Features
- }
- return nil
-}
-
type SourceCodeInfo_Location struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -4196,7 +3388,7 @@
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4209,7 +3401,7 @@
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4222,7 +3414,7 @@
// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead.
func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
}
func (x *SourceCodeInfo_Location) GetPath() []int32 {
@@ -4283,7 +3475,7 @@
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4296,7 +3488,7 @@
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4309,7 +3501,7 @@
// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead.
func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0}
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
}
func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 {
@@ -4358,7 +3550,7 @@
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
- 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
@@ -4396,687 +3588,527 @@
0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
- 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
- 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
+ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
+ 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
- 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
- 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
- 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
- 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a,
+ 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55,
+ 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
- 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
- 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
- 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
- 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
- 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76,
- 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b,
- 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e,
- 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d,
- 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65,
- 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04,
- 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41,
- 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45,
- 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65,
- 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74,
- 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65,
- 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65,
- 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65,
- 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a,
- 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c,
- 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41,
- 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36,
- 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54,
- 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54,
- 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58,
- 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
- 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a,
- 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10,
- 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10,
- 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34,
- 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
- 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
- 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45,
- 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51,
- 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a,
+ 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22,
+ 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01,
+ 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68,
+ 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a,
+ 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63,
+ 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65,
+ 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34,
+ 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
+ 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
+ 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06,
+ 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70,
+ 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65,
+ 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69,
+ 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f,
+ 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e,
+ 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a,
- 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
- 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61,
- 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
- 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
- 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f,
+ 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12,
+ 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04,
+ 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05,
+ 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34,
+ 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44,
+ 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f,
+ 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49,
+ 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f,
+ 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53,
+ 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42,
+ 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
+ 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
+ 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43,
+ 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c,
+ 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
+ 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12,
+ 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45,
+ 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75,
+ 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a,
+ 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72,
+ 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d,
+ 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d,
+ 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
+ 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83,
+ 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
- 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca,
- 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
- 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
- 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f,
- 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74,
- 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c,
- 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61,
- 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61,
- 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28,
- 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68,
- 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72,
- 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c,
- 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53,
- 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f,
- 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
- 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a,
- 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
- 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
- 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12,
- 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f,
- 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61,
- 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a,
- 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
- 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f,
- 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
- 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
- 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70,
- 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a,
- 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79,
- 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89,
+ 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f,
+ 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46,
+ 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61,
+ 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a,
+ 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76,
+ 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
+ 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c,
+ 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61,
+ 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18,
+ 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45,
+ 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16,
+ 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69,
+ 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74,
+ 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44,
+ 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13,
+ 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01,
+ 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47,
+ 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35,
+ 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+ 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20,
+ 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47,
+ 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
+ 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
+ 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
+ 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
+ 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
+ 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
+ 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
+ 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
+ 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
+ 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
+ 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
+ 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
+ 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
+ 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64,
+ 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
+ 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c,
+ 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08,
+ 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb,
+ 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74,
+ 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
+ 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72,
+ 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+ 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07,
+ 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a,
+ 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a,
+ 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70,
+ 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09,
+ 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f,
+ 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18,
+ 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e,
+ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a,
+ 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28,
+ 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10,
+ 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62,
+ 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
+ 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
+ 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
+ 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
+ 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
+ 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
+ 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
+ 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
+ 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+ 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
+ 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
+ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
+ 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
+ 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
+ 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+ 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a,
- 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09,
- 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44,
- 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45,
- 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
- 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e,
- 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c,
- 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69,
- 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53,
- 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f,
- 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f,
- 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
+ 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
+ 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56,
- 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67,
- 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63,
- 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02,
- 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65,
- 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e,
- 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+ 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+ 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
+ 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+ 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+ 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
- 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
- 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09,
- 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52,
- 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47,
- 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
- 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52,
- 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61,
- 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
- 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61,
- 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61,
- 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
- 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65,
- 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
- 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b,
- 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37,
- 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07,
- 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a,
- 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
- 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c,
- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35,
- 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
- 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54,
- 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d,
- 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45,
- 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e,
- 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a,
- 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
- 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01,
- 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10,
- 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41,
- 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10,
- 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47,
- 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a,
- 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
- 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41,
- 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
- 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07,
- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
- 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69,
- 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70,
- 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a,
- 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
- 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a,
- 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74,
- 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
- 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
- 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
- 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
- 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
- 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08,
- 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64,
- 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
- 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
- 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
- 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65,
- 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a,
- 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
- 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c,
- 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
- 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a,
- 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53,
- 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54,
- 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03,
- 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61,
- 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
- 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e,
- 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e,
- 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
- 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
- 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
- 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a,
- 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61,
- 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e,
- 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69,
- 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88,
- 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
- 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50,
- 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
- 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50,
- 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65,
- 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06,
- 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50,
- 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52,
- 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2,
- 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2,
- 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72,
- 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e,
+ 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a,
+ 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74,
+ 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50,
+ 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10,
+ 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c,
+ 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+ 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64,
+ 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17,
+ 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49,
+ 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a,
+ 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8,
+ 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01,
- 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07,
- 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e,
- 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78,
- 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69,
- 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01,
- 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46,
- 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e,
- 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
- 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06,
- 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42,
- 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a,
- 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e,
- 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50,
- 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44,
- 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
- 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12,
- 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52,
- 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10,
- 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a,
- 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
- 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54,
- 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
- 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50,
- 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e,
- 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f,
- 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
- 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a,
- 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a,
- 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
- 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c,
- 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01,
- 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22,
- 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a,
- 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b,
- 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10,
- 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54,
- 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
- 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
- 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
- 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44,
- 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
- 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69,
- 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d,
- 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
- 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a,
- 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a,
- 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
- 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02,
- 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e,
- 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d,
- 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64,
- 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74,
- 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
- 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64,
- 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d,
- 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61,
- 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d,
- 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e,
- 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61,
- 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
- 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d,
- 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e,
- 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e,
- 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a,
- 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e,
- 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05,
- 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a,
- 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10,
- 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30,
- 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17,
- 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54,
- 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e,
- 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
- 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
- 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
- 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a,
- 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
- 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01,
- 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63,
- 0x74, 0x69, 0x6f, 0x6e,
+ 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a,
+ 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76,
+ 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65,
+ 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43,
+ 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01,
+ 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05,
+ 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65,
+ 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e,
+ 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65,
+ 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65,
+ 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0,
+ 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
+ 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62,
+ 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03,
+ 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
+ 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73,
+ 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
+ 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10,
+ 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+ 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e,
}
var (
@@ -5091,136 +4123,103 @@
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
- (Edition)(0), // 0: google.protobuf.Edition
- (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState
- (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type
- (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label
- (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode
- (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType
- (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType
- (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention
- (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType
- (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel
- (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence
- (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType
- (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
- (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation
- (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding
- (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat
- (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 27: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption
- (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet
- (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults
- (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange
- (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault
- (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart
- (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation
+ (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState
+ (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type
+ (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label
+ (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode
+ (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType
+ (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType
+ (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention
+ (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType
+ (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 20: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption
+ (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange
+ (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart
+ (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
- 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
- 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
- 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
- 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
- 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
- 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
- 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
- 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
- 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
- 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
- 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
- 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
- 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
- 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
- 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
- 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
- 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
- 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
- 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
- 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
- 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet
- 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 71, // [71:71] is the sub-list for method output_type
- 71, // [71:71] is the sub-list for method input_type
- 71, // [71:71] is the sub-list for extension type_name
- 71, // [71:71] is the sub-list for extension extendee
- 0, // [0:71] is the sub-list for field type_name
+ 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+ 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+ 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+ 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+ 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+ 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+ 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+ 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+ 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 49, // [49:49] is the sub-list for method output_type
+ 49, // [49:49] is the sub-list for method input_type
+ 49, // [49:49] is the sub-list for extension type_name
+ 49, // [49:49] is the sub-list for extension extendee
+ 0, // [0:49] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5476,32 +4475,6 @@
}
}
file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FeatureSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FeatureSetDefaults); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceCodeInfo); i {
case 0:
return &v.state
@@ -5513,7 +4486,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GeneratedCodeInfo); i {
case 0:
return &v.state
@@ -5525,7 +4498,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DescriptorProto_ExtensionRange); i {
case 0:
return &v.state
@@ -5537,7 +4510,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DescriptorProto_ReservedRange); i {
case 0:
return &v.state
@@ -5549,7 +4522,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExtensionRangeOptions_Declaration); i {
case 0:
return &v.state
@@ -5561,7 +4534,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
case 0:
return &v.state
@@ -5573,19 +4546,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FieldOptions_EditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*UninterpretedOption_NamePart); i {
case 0:
return &v.state
@@ -5597,19 +4558,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SourceCodeInfo_Location); i {
case 0:
return &v.state
@@ -5621,7 +4570,7 @@
return nil
}
}
- file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GeneratedCodeInfo_Annotation); i {
case 0:
return &v.state
@@ -5639,8 +4588,8 @@
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
- NumEnums: 17,
- NumMessages: 32,
+ NumEnums: 10,
+ NumMessages: 28,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 9de51be..580b232 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -237,8 +237,7 @@
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
- // type.googleapis.com. As of May 2023, there are no widely used type server
- // implementations and no plans to implement one.
+ // type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7a32c27..11af739 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -4,7 +4,7 @@
# 4d63.com/gochecknoglobals v0.2.1
## explicit; go 1.15
4d63.com/gochecknoglobals/checknoglobals
-# cloud.google.com/go v0.111.0
+# cloud.google.com/go v0.110.10
## explicit; go 1.19
cloud.google.com/go
cloud.google.com/go/internal
@@ -203,7 +203,7 @@
# github.com/fatih/structtag v1.2.0
## explicit; go 1.12
github.com/fatih/structtag
-# github.com/felixge/httpsnoop v1.0.4
+# github.com/felixge/httpsnoop v1.0.3
## explicit; go 1.13
github.com/felixge/httpsnoop
# github.com/firefart/nonamedreturns v1.0.4
@@ -225,13 +225,6 @@
github.com/go-critic/go-critic/checkers/internal/lintutil
github.com/go-critic/go-critic/checkers/rulesdata
github.com/go-critic/go-critic/linter
-# github.com/go-logr/logr v1.3.0
-## explicit; go 1.18
-github.com/go-logr/logr
-github.com/go-logr/logr/funcr
-# github.com/go-logr/stdr v1.2.2
-## explicit; go 1.16
-github.com/go-logr/stdr
# github.com/go-toolsmith/astcast v1.1.0
## explicit; go 1.16
github.com/go-toolsmith/astcast
@@ -384,7 +377,7 @@
github.com/google/safehtml/internal/template/raw
github.com/google/safehtml/template
github.com/google/safehtml/uncheckedconversions
-# github.com/google/uuid v1.5.0
+# github.com/google/uuid v1.4.0
## explicit
github.com/google/uuid
# github.com/googleapis/enterprise-certificate-proxy v0.3.2
@@ -803,34 +796,6 @@
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
-## explicit; go 1.20
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
-## explicit; go 1.20
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.21.0
-## explicit; go 1.20
-go.opentelemetry.io/otel
-go.opentelemetry.io/otel/attribute
-go.opentelemetry.io/otel/baggage
-go.opentelemetry.io/otel/codes
-go.opentelemetry.io/otel/internal
-go.opentelemetry.io/otel/internal/attribute
-go.opentelemetry.io/otel/internal/baggage
-go.opentelemetry.io/otel/internal/global
-go.opentelemetry.io/otel/propagation
-go.opentelemetry.io/otel/semconv/v1.17.0
-# go.opentelemetry.io/otel/metric v1.21.0
-## explicit; go 1.20
-go.opentelemetry.io/otel/metric
-go.opentelemetry.io/otel/metric/embedded
-# go.opentelemetry.io/otel/trace v1.21.0
-## explicit; go 1.20
-go.opentelemetry.io/otel/trace
-go.opentelemetry.io/otel/trace/embedded
# go.tmz.dev/musttag v0.7.2
## explicit; go 1.19
go.tmz.dev/musttag
@@ -849,7 +814,7 @@
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.18.0
+# golang.org/x/crypto v0.16.0
## explicit; go 1.18
golang.org/x/crypto/chacha20
golang.org/x/crypto/chacha20poly1305
@@ -871,7 +836,7 @@
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.20.0
+# golang.org/x/net v0.19.0
## explicit; go 1.18
golang.org/x/net/context
golang.org/x/net/http/httpguts
@@ -880,7 +845,7 @@
golang.org/x/net/idna
golang.org/x/net/internal/timeseries
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.16.0
+# golang.org/x/oauth2 v0.15.0
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@@ -896,11 +861,11 @@
golang.org/x/perf/benchstat
golang.org/x/perf/internal/stats
golang.org/x/perf/storage/benchfmt
-# golang.org/x/sync v0.6.0
+# golang.org/x/sync v0.5.0
## explicit; go 1.18
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.16.0
+# golang.org/x/sys v0.15.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -1005,7 +970,11 @@
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/txtar
-# google.golang.org/api v0.156.0
+# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2
+## explicit; go 1.17
+golang.org/x/xerrors
+golang.org/x/xerrors/internal
+# google.golang.org/api v0.153.0
## explicit; go 1.19
google.golang.org/api/compute/v1
google.golang.org/api/googleapi
@@ -1025,7 +994,7 @@
google.golang.org/api/transport/grpc
google.golang.org/api/transport/http
google.golang.org/api/transport/http/internal/propagation
-# google.golang.org/appengine v1.6.8
+# google.golang.org/appengine v1.6.7
## explicit; go 1.11
google.golang.org/appengine
google.golang.org/appengine/internal
@@ -1057,7 +1026,7 @@
google.golang.org/appengine/v2/mail
google.golang.org/appengine/v2/memcache
google.golang.org/appengine/v2/user
-# google.golang.org/genproto v0.0.0-20231212172506-995d672761c0
+# google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17
## explicit; go 1.19
google.golang.org/genproto/googleapis/appengine/logging/v1
google.golang.org/genproto/googleapis/cloud/audit
@@ -1067,7 +1036,7 @@
google.golang.org/genproto/googleapis/type/date
google.golang.org/genproto/googleapis/type/expr
google.golang.org/genproto/internal
-# google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0
+# google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
@@ -1075,13 +1044,13 @@
google.golang.org/genproto/googleapis/api/label
google.golang.org/genproto/googleapis/api/metric
google.golang.org/genproto/googleapis/api/monitoredres
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20240102182953-50ed04b92917
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f
## explicit; go 1.19
google.golang.org/genproto/googleapis/rpc/code
google.golang.org/genproto/googleapis/rpc/context/attribute_context
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.60.1
+# google.golang.org/grpc v1.59.0
## explicit; go 1.19
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -1129,7 +1098,6 @@
google.golang.org/grpc/internal/pretty
google.golang.org/grpc/internal/resolver
google.golang.org/grpc/internal/resolver/dns
-google.golang.org/grpc/internal/resolver/dns/internal
google.golang.org/grpc/internal/resolver/passthrough
google.golang.org/grpc/internal/resolver/unix
google.golang.org/grpc/internal/serviceconfig
@@ -1141,14 +1109,13 @@
google.golang.org/grpc/metadata
google.golang.org/grpc/peer
google.golang.org/grpc/resolver
-google.golang.org/grpc/resolver/dns
google.golang.org/grpc/resolver/manual
google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.32.0
-## explicit; go 1.17
+# google.golang.org/protobuf v1.31.0
+## explicit; go 1.11
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire