diff --git a/go.mod b/go.mod index 3bebcaf8..a400b438 100644 --- a/go.mod +++ b/go.mod @@ -18,14 +18,14 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/sethvargo/go-envconfig v1.0.3 - go.opentelemetry.io/contrib/detectors/aws/lambda v0.51.0 - go.opentelemetry.io/contrib/exporters/autoexport v0.51.0 - go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 - go.opentelemetry.io/contrib/propagators/b3 v1.26.0 - go.opentelemetry.io/otel v1.26.0 - go.opentelemetry.io/otel/sdk v1.26.0 - go.opentelemetry.io/otel/trace v1.26.0 + go.opentelemetry.io/contrib/detectors/aws/lambda v0.52.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.52.0 + go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/contrib/propagators/b3 v1.27.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 golang.org/x/sync v0.7.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -38,10 +38,10 @@ require ( github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.7 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect @@ -53,29 +53,30 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.52.3 // indirect - github.com/prometheus/procfs v0.13.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.48.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.26.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.26.0 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.52.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.2.0 // indirect - golang.org/x/net v0.24.0 // indirect + golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240415151819-79826c84ba32 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240415151819-79826c84ba32 // indirect - google.golang.org/grpc v1.63.2 // indirect - google.golang.org/protobuf v1.33.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/go.sum b/go.sum index bf3a8dfa..8d444d45 100644 --- a/go.sum +++ b/go.sum @@ -22,14 +22,14 @@ github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7 h1:/FUtT3xsoHO3cfh+I/kCbcMCN98Q github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.7/go.mod h1:MaCAgWpGooQoCWZnMur97rGn5dp350w2+CeiV5406wE= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.5 h1:UsJC9BCSLG9tamqukeFs2IJUGvCnLRxhIwb8Ru9dEME= github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.5/go.mod h1:OfO65DNsDX+wgWmjljN55I+Dzo4nbhWNlNFuco5AAgw= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 h1:dZXY07Dm59TxAjJcUfNMJHLDI/gLMxTRZefn2jFAVsw= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1/go.mod h1:lVLqEtX+ezgtfalyJs7Peb0uv9dEpAQP5yuq2O26R44= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.3 h1:idREjl1I4PVmHSeRgwtvA7/xfQj/aN4rRHgHBq6pr5I= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.3/go.mod h1:uNhUf9Z3MT6Ex+u0ADa8r3MKK5zjuActEfXQPo4YqEI= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 h1:UXqEWQI0n+q0QixzU0yUUQBZXRd5037qdInTIHFTl98= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9/go.mod h1:xP6Gq6fzGZT8w/ZN+XvGMZ2RU1LeEs7b2yUP5DN8NY4= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 h1:6tayEze2Y+hiL3kdnEUxSPsP+pJsUfwLSFspFl1ru9Q= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6/go.mod h1:qVNb/9IOVsLCZh0x2lnagrBwQ9fxajUpXS7OZfIsKn0= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.8 h1:yEeIld7Fh/2iM4pYeQw8a3kH6OYcyIn6lwKlUFiVk7Y= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.8/go.mod h1:lZJMX2Z5/rQ6OlSbBnW1WWScK6ngLt43xtqM8voMm2w= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.7 h1:uO5XR6QGBcmPyo2gxofYJLFkcVQ4izOoGDNenlZhTEk= @@ -64,10 +64,12 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= @@ -78,6 +80,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -90,79 +94,89 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.52.3 h1:5f8uj6ZwHSscOGNdIQg6OiZv/ybiK2CO2q2drVZAQSA= -github.com/prometheus/common v0.52.3/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= -github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= -github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI5Ek= +github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sethvargo/go-envconfig v1.0.3 h1:ZDxFGT1M7RPX0wgDOCdZMidrEB+NrayYr6fL0/+pk4I= github.com/sethvargo/go-envconfig v1.0.3/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/contrib/detectors/aws/lambda v0.51.0 h1:3FFQT6VuxR33+NsTV02jPqv464FtXFC+Tv2r0HgZId4= -go.opentelemetry.io/contrib/detectors/aws/lambda v0.51.0/go.mod h1:FG9+5oJ2rudGXVZp3o0mH2lYpQ2X9GTQ6Y0I4w+U1ZI= -go.opentelemetry.io/contrib/exporters/autoexport v0.51.0 h1:imlL5MBzKu+NWhnJM62bHws6m+6LN8HMT3V9PcSTbaY= -go.opentelemetry.io/contrib/exporters/autoexport v0.51.0/go.mod h1:gn1wFA1uVEKIXrM3DC7SN9ee83oJ0yALY/HbUfqMszo= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 h1:FGMfzzxfkNkw+gvKJOeT8dSmBjgrSFh+ClLl+OMKPno= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0/go.mod h1:hmHUXiKhyxbIhuNfG5ZTySq9HqqxJFNxaFOfXXvoMmQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= -go.opentelemetry.io/contrib/propagators/b3 v1.26.0 h1:wgFbVA+bK2k+fGVfDOCOG4cfDAoppyr5sI2dVlh8MWM= -go.opentelemetry.io/contrib/propagators/b3 v1.26.0/go.mod h1:DDktFXxA+fyItAAM0Sbl5OBH7KOsCTjvbBdPKtoIf/k= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 h1:+hm+I+KigBy3M24/h1p/NHkUx/evbLH0PNcjpMyCHc4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0/go.mod h1:NjC8142mLvvNT6biDpaMjyz78kyEHIwAJlSX0N9P5KI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 h1:HGZWGmCVRCVyAs2GQaiHQPbDHo+ObFWeUEOd+zDnp64= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0/go.mod h1:SaH+v38LSCHddyk7RGlU9uZyQoRrKao6IBnJw6Kbn+c= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0/go.mod h1:wnJIG4fOqyynOnnQF/eQb4/16VlX2EJAHhHgqIqWfAo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= -go.opentelemetry.io/otel/exporters/prometheus v0.48.0 h1:sBQe3VNGUjY9IKWQC6z2lNqa5iGbDSxhs60ABwK4y0s= -go.opentelemetry.io/otel/exporters/prometheus v0.48.0/go.mod h1:DtrbMzoZWwQHyrQmCfLam5DZbnmorsGbOtTbYHycU5o= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.26.0 h1:5fnmgteaar1VcAA69huatudPduNFz7guRtCmfZCooZI= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.26.0/go.mod h1:lsPccfZiz1cb1AhBPmicWM2E4F1VynFXEvD8SEBS4TM= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 h1:0W5o9SzoR15ocYHEQfvfipzcNog1lBxOLfnex91Hk6s= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0/go.mod h1:zVZ8nz+VSggWmnh6tTsJqXQ7rU4xLwRtna1M4x5jq58= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= -go.opentelemetry.io/otel/sdk/metric v1.26.0 h1:cWSks5tfriHPdWFnl+qpX3P681aAYqlZHcAyHw5aU9Y= -go.opentelemetry.io/otel/sdk/metric v1.26.0/go.mod h1:ClMFFknnThJCksebJwz7KIyEDHO+nTB6gK8obLy8RyE= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/collector/pdata v1.7.0 h1:/WNsBbE6KM3TTPUb9v/5B7IDqnDkgf8GyFhVJJqu7II= +go.opentelemetry.io/collector/pdata v1.7.0/go.mod h1:ehCBBA5GoFrMZkwyZAKGY/lAVSgZf6rzUt3p9mddmPU= +go.opentelemetry.io/contrib/bridges/prometheus v0.52.0 h1:NNkEjNcUXeNcxDTNLyyAmFHefByhj8YU1AojgcPqbfs= +go.opentelemetry.io/contrib/bridges/prometheus v0.52.0/go.mod h1:Dv7d2yUvusfblvi9qMQby+youF09GiUVWRWkdogrDtE= +go.opentelemetry.io/contrib/detectors/aws/lambda v0.52.0 h1:aZAZUtzXd7dklbWDqf0s8slF2KmGR2scZAtshZ3IGTI= +go.opentelemetry.io/contrib/detectors/aws/lambda v0.52.0/go.mod h1:0oIsMo4DjOJNB/mn/bssl3ba7TEZhSgqe26vRMoxHv4= +go.opentelemetry.io/contrib/exporters/autoexport v0.52.0 h1:G/AGl5O78ZKHs63Rl65P1HyZfDnTyxjv8r7dbdZ9fB0= +go.opentelemetry.io/contrib/exporters/autoexport v0.52.0/go.mod h1:WoVWPZjJ7EB5Z9aROW1DZuRIoFEemxmhCdZJlcjY2AE= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 h1:kAytSRJYoIy4eJtDOfSGf9LOCD4QdXFN37YJs0+bYrw= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0/go.mod h1:l6VnFEqDdeMSMfwULTDDY9ewlnlVLhmvBainVT+h/Zs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= +go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 h1:bFgvUr3/O4PHj3VQcFEuYKvRZJX1SJDQ+11JXuSB3/w= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0/go.mod h1:xJntEd2KL6Qdg5lwp97HMLQDVeAhrYxmzFseAMDPQ8I= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 h1:CIHWikMsN3wO+wq1Tp5VGdVRTcON+DmOJSfDjXypKOc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0/go.mod h1:TNupZ6cxqyFEpLXAZW7On+mLFL0/g0TE3unIYL91xWc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0 h1:Er5I1g/YhfYv9Affk9nJLfH/+qCCVVg1f2R9AbJfqDQ= +go.opentelemetry.io/otel/exporters/prometheus v0.49.0/go.mod h1:KfQ1wpjf3zsHjzP149P4LyAwWRupc6c7t1ZJ9eXpKQM= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 h1:/jlt1Y8gXWiHG9FBx6cJaIC5hYx5Fe64nC8w5Cylt/0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0/go.mod h1:bmToOGOBZ4hA9ghphIc1PAf66VA8KOtsuy3+ScStG20= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/metric v1.27.0 h1:5uGNOlpXi+Hbo/DRoI31BSb1v+OGcpv2NemcCrOL8gI= +go.opentelemetry.io/otel/sdk/metric v1.27.0/go.mod h1:we7jJVrYN2kh3mVBlswtPU22K0SA+769l93J6bsyvqw= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240415151819-79826c84ba32 h1:wSV4wyEpYVUdxkO7uzZ9Fr1g7a+wllr4fuQqTaR53pw= -google.golang.org/genproto/googleapis/api v0.0.0-20240415151819-79826c84ba32/go.mod h1:wTHjrkbcS8AoQbb/0v9bFIPItZQPAsyVfgG9YPUhjAM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240415151819-79826c84ba32 h1:GRBZAfftU6zhUwoY2cfnbtig9cTfQKXH0rAdBFVxra4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240415151819-79826c84ba32/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 h1:Q2RxlXqh1cgzzUgV261vBO2jI5R/3DD1J2pM0nI4NhU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md index 0c2a6379..af594916 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.32.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.1 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.32.0 (2024-05-02) + +* **Feature**: This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs. + # v1.31.1 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go index d33688b2..f7dbfd95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go @@ -14,12 +14,16 @@ import ( // This operation allows you to perform batch reads or writes on data stored in // DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must // specify an equality condition on all key attributes. This enforces that each -// SELECT statement in a batch returns at most a single item. The entire batch must -// consist of either read statements or write statements, you cannot mix both in -// one batch. A HTTP 200 response does not mean that all statements in the +// SELECT statement in a batch returns at most a single item. +// +// The entire batch must consist of either read statements or write statements, +// you cannot mix both in one batch. +// +// A HTTP 200 response does not mean that all statements in the // BatchExecuteStatement succeeded. Error details for individual statements can be -// found under the Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error) -// field of the BatchStatementResponse for each statement. +// found under the [Error]field of the BatchStatementResponse for each statement. +// +// [Error]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error func (c *Client) BatchExecuteStatement(ctx context.Context, params *BatchExecuteStatementInput, optFns ...func(*Options)) (*BatchExecuteStatementOutput, error) { if params == nil { params = &BatchExecuteStatementInput{} @@ -44,13 +48,18 @@ type BatchExecuteStatementInput struct { // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go index bd39f8d3..106d9915 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go @@ -13,43 +13,57 @@ import ( ) // The BatchGetItem operation returns the attributes of one or more items from one -// or more tables. You identify requested items by primary key. A single operation -// can retrieve up to 16 MB of data, which can contain as many as 100 items. -// BatchGetItem returns a partial result if the response size limit is exceeded, -// the table's provisioned throughput is exceeded, more than 1MB per partition is -// requested, or an internal processing failure occurs. If a partial result is -// returned, the operation returns a value for UnprocessedKeys . You can use this -// value to retry the operation starting with the next item to get. If you request -// more than 100 items, BatchGetItem returns a ValidationException with the -// message "Too many items requested for the BatchGetItem call." For example, if -// you ask to retrieve 100 items, but each individual item is 300 KB in size, the -// system returns 52 items (so as not to exceed the 16 MB limit). It also returns -// an appropriate UnprocessedKeys value so you can get the next page of results. -// If desired, your application can include its own logic to assemble the pages of -// results into one dataset. If none of the items can be processed due to -// insufficient provisioned throughput on all of the tables in the request, then -// BatchGetItem returns a ProvisionedThroughputExceededException . If at least one -// of the items is successfully processed, then BatchGetItem completes -// successfully, while returning the keys of the unread items in UnprocessedKeys . +// or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as many +// as 100 items. BatchGetItem returns a partial result if the response size limit +// is exceeded, the table's provisioned throughput is exceeded, more than 1MB per +// partition is requested, or an internal processing failure occurs. If a partial +// result is returned, the operation returns a value for UnprocessedKeys . You can +// use this value to retry the operation starting with the next item to get. +// +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." +// +// For example, if you ask to retrieve 100 items, but each individual item is 300 +// KB in size, the system returns 52 items (so as not to exceed the 16 MB limit). +// It also returns an appropriate UnprocessedKeys value so you can get the next +// page of results. If desired, your application can include its own logic to +// assemble the pages of results into one dataset. +// +// If none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchGetItem returns a +// ProvisionedThroughputExceededException . If at least one of the items is +// successfully processed, then BatchGetItem completes successfully, while +// returning the keys of the unread items in UnprocessedKeys . +// // If DynamoDB returns any unprocessed items, you should retry the batch operation // on those items. However, we strongly recommend that you use an exponential // backoff algorithm. If you retry the batch operation immediately, the underlying // read or write requests can still fail due to throttling on the individual // tables. If you delay the batch operation using exponential backoff, the -// individual requests in the batch are much more likely to succeed. For more -// information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) -// in the Amazon DynamoDB Developer Guide. By default, BatchGetItem performs -// eventually consistent reads on every table in the request. If you want strongly -// consistent reads instead, you can set ConsistentRead to true for any or all -// tables. In order to minimize response latency, BatchGetItem may retrieve items -// in parallel. When designing your application, keep in mind that DynamoDB does -// not return items in any particular order. To help parse the response by item, -// include the primary key values for the items in your request in the -// ProjectionExpression parameter. If a requested item does not exist, it is not -// returned in the result. Requests for nonexistent items consume the minimum read -// capacity units according to the type of read. For more information, see Working -// with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) -// in the Amazon DynamoDB Developer Guide. +// individual requests in the batch are much more likely to succeed. +// +// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every table in +// the request. If you want strongly consistent reads instead, you can set +// ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem may retrieve items in +// parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include the +// primary key values for the items in your request in the ProjectionExpression +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to the +// type of read. For more information, see [Working with Tables]in the Amazon DynamoDB Developer Guide. +// +// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations +// [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations func (c *Client) BatchGetItem(ctx context.Context, params *BatchGetItemInput, optFns ...func(*Options)) (*BatchGetItemOutput, error) { if params == nil { params = &BatchGetItemInput{} @@ -70,59 +84,87 @@ type BatchGetItemInput struct { // A map of one or more table names or table ARNs and, for each table, a map that // describes one or more items to retrieve from that table. Each table name or ARN - // can be used only once per BatchGetItem request. Each element in the map of - // items to retrieve consists of the following: + // can be used only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // // - ConsistentRead - If true , a strongly consistent read is used; if false (the // default), an eventually consistent read is used. + // // - ExpressionAttributeNames - One or more substitution tokens for attribute // names in the ProjectionExpression parameter. The following are some use cases // for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being - // misinterpreted in an expression. Use the # character in an expression to - // dereference an attribute name. For example, consider the following attribute - // name: - // - Percentile The name of this attribute conflicts with a reserved word, so it - // cannot be used directly in an expression. (For the complete list of reserved - // words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : - // - {"#P":"Percentile"} You could then use this substitution in an expression, - // as in this example: - // - #P = :val Tokens that begin with the : character are expression attribute - // values, which are placeholders for the actual value at runtime. For more - // information about expression attribute names, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // misinterpreted in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // - Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be used + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in + // the Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // + // - {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // - #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see [Accessing Item Attributes]in the Amazon + // DynamoDB Developer Guide. + // // - Keys - An array of primary key attribute values that define specific items // in the table. For each primary key, you must provide all of the key attributes. // For example, with a simple primary key, you only need to provide the partition // key value. For a composite key, you must provide both the partition key value // and the sort key value. + // // - ProjectionExpression - A string that identifies one or more attributes to // retrieve from the table. These attributes can include scalars, sets, or elements // of a JSON document. The attributes in the expression must be separated by - // commas. If no attribute names are specified, then all attributes are returned. - // If any of the requested attributes are not found, they do not appear in the - // result. For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // commas. + // + // If no attribute names are specified, then all attributes are returned. If any + // of the requested attributes are not found, they do not appear in the result. + // + // For more information, see [Accessing Item Attributes]in the Amazon DynamoDB Developer Guide. + // // - AttributesToGet - This is a legacy parameter. Use ProjectionExpression - // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // instead. For more information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html // // This member is required. RequestItems map[string]types.KeysAndAttributes // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -132,9 +174,12 @@ type BatchGetItemInput struct { // Represents the output of a BatchGetItem operation. type BatchGetItemOutput struct { - // The read capacity units consumed by the entire BatchGetItem operation. Each - // element consists of: + // The read capacity units consumed by the entire BatchGetItem operation. + // + // Each element consists of: + // // - TableName - The table that consumed the provisioned throughput. + // // - CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []types.ConsumedCapacity @@ -146,16 +191,21 @@ type BatchGetItemOutput struct { // A map of tables and their respective keys that were not processed with the // current response. The UnprocessedKeys value is in the same form as RequestItems // , so the value can be provided directly to a subsequent BatchGetItem operation. - // For more information, see RequestItems in the Request Parameters section. Each - // element consists of: + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // // - Keys - An array of primary key attribute values that define specific items // in the table. + // // - ProjectionExpression - One or more attributes to be retrieved from the table // or index. By default, all attributes are returned. If a requested attribute is // not found, it does not appear in the result. + // // - ConsistentRead - The consistency of a read operation. If set to true , then // a strongly consistent read is used; otherwise, an eventually consistent read is // used. + // // If there are no unprocessed keys remaining, the response contains an empty // UnprocessedKeys map. UnprocessedKeys map[string]types.KeysAndAttributes diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go index cae206a0..08614148 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go @@ -17,57 +17,78 @@ import ( // the network, consisting of up to 25 item put or delete operations. While // individual items can be up to 400 KB once stored, it's important to note that an // item's representation might be greater than 400KB while being sent in DynamoDB's -// JSON format for the API call. For more details on this distinction, see Naming -// Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html) -// . BatchWriteItem cannot update items. If you perform a BatchWriteItem operation +// JSON format for the API call. For more details on this distinction, see [Naming Rules and Data Types]. +// +// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation // on an existing item, that item's values will be overwritten by the operation and // it will appear like it was updated. To update items, we recommend you use the -// UpdateItem action. The individual PutItem and DeleteItem operations specified -// in BatchWriteItem are atomic; however BatchWriteItem as a whole is not. If any -// requested operations fail because the table's provisioned throughput is exceeded -// or an internal processing failure occurs, the failed operations are returned in -// the UnprocessedItems response parameter. You can investigate and optionally -// resend the requests. Typically, you would call BatchWriteItem in a loop. Each +// UpdateItem action. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested +// operations fail because the table's provisioned throughput is exceeded or an +// internal processing failure occurs, the failed operations are returned in the +// UnprocessedItems response parameter. You can investigate and optionally resend +// the requests. Typically, you would call BatchWriteItem in a loop. Each // iteration would check for unprocessed items and submit a new BatchWriteItem -// request with those unprocessed items until all items have been processed. If -// none of the items can be processed due to insufficient provisioned throughput on -// all of the tables in the request, then BatchWriteItem returns a -// ProvisionedThroughputExceededException . If DynamoDB returns any unprocessed -// items, you should retry the batch operation on those items. However, we strongly -// recommend that you use an exponential backoff algorithm. If you retry the batch -// operation immediately, the underlying read or write requests can still fail due -// to throttling on the individual tables. If you delay the batch operation using -// exponential backoff, the individual requests in the batch are much more likely -// to succeed. For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) -// in the Amazon DynamoDB Developer Guide. With BatchWriteItem , you can -// efficiently write or delete large amounts of data, such as from Amazon EMR, or -// copy data from another database into DynamoDB. In order to improve performance -// with these large-scale operations, BatchWriteItem does not behave in the same -// way as individual PutItem and DeleteItem calls would. For example, you cannot -// specify conditions on individual put and delete requests, and BatchWriteItem -// does not return deleted items in the response. If you use a programming language -// that supports concurrency, you can use threads to write items in parallel. Your -// application must include the necessary logic to manage the threads. With -// languages that don't support threading, you must update or delete the specified -// items one at a time. In both situations, BatchWriteItem performs the specified -// put and delete operations in parallel, giving you the power of the thread pool -// approach without having to introduce complexity into your application. Parallel -// processing reduces latency, but each specified put and delete request consumes -// the same number of write capacity units whether it is processed in parallel or -// not. Delete operations on nonexistent items consume one write capacity unit. If -// one or more of the following is true, DynamoDB rejects the entire batch write -// operation: +// request with those unprocessed items until all items have been processed. +// +// If none of the items can be processed due to insufficient provisioned +// throughput on all of the tables in the request, then BatchWriteItem returns a +// ProvisionedThroughputExceededException . +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the +// individual requests in the batch are much more likely to succeed. +// +// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem , you can efficiently write or delete large amounts of data, +// such as from Amazon EMR, or copy data from another database into DynamoDB. In +// order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, you +// must update or delete the specified items one at a time. In both situations, +// BatchWriteItem performs the specified put and delete operations in parallel, +// giving you the power of the thread pool approach without having to introduce +// complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed in +// parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// // - One or more tables specified in the BatchWriteItem request does not exist. +// // - Primary key attributes specified on an item in the request do not match // those in the corresponding table's primary key schema. +// // - You try to perform multiple operations on the same item in the same // BatchWriteItem request. For example, you cannot put and delete the same item // in the same BatchWriteItem request. +// // - Your request contains at least two items with identical hash and range keys // (which essentially is two put operations). +// // - There are more than 25 requests in the batch. +// // - Any individual item in a batch exceeds 400 KB. +// // - The total request size exceeds 16 MB. +// +// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations +// [Naming Rules and Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html func (c *Client) BatchWriteItem(ctx context.Context, params *BatchWriteItemInput, optFns ...func(*Options)) (*BatchWriteItemOutput, error) { if params == nil { params = &BatchWriteItemInput{} @@ -89,36 +110,47 @@ type BatchWriteItemInput struct { // A map of one or more table names or table ARNs and, for each table, a list of // operations to be performed ( DeleteRequest or PutRequest ). Each element in the // map consists of the following: + // // - DeleteRequest - Perform a DeleteItem operation on the specified item. The // item to be deleted is identified by a Key subelement: + // // - Key - A map of primary key attribute values that uniquely identify the item. // Each entry in this map consists of an attribute name and an attribute value. For // each primary key, you must provide all of the key attributes. For example, with // a simple primary key, you only need to provide a value for the partition key. // For a composite primary key, you must provide values for both the partition key // and the sort key. + // // - PutRequest - Perform a PutItem operation on the specified item. The item to // be put is identified by an Item subelement: + // // - Item - A map of attributes and their values. Each entry in this map consists // of an attribute name and an attribute value. Attribute values must not be null; // string and binary type attributes must have lengths greater than zero; and set // type attributes must not be empty. Requests that contain empty values are - // rejected with a ValidationException exception. If you specify any attributes - // that are part of an index key, then the data types for those attributes must - // match those of the schema in the table's attribute definition. + // rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. // // This member is required. RequestItems map[string][]types.WriteRequest // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -134,46 +166,62 @@ type BatchWriteItemInput struct { // Represents the output of a BatchWriteItem operation. type BatchWriteItemOutput struct { - // The capacity units consumed by the entire BatchWriteItem operation. Each - // element consists of: + // The capacity units consumed by the entire BatchWriteItem operation. + // + // Each element consists of: + // // - TableName - The table that consumed the provisioned throughput. + // // - CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []types.ConsumedCapacity // A list of tables that were processed by BatchWriteItem and, for each table, // information about any item collections that were affected by individual - // DeleteItem or PutItem operations. Each entry consists of the following - // subelements: + // DeleteItem or PutItem operations. + // + // Each entry consists of the following subelements: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item. + // // - SizeEstimateRangeGB - An estimate of item collection size, expressed in GB. // This is a two-element array containing a lower bound and an upper bound for the // estimate. The estimate includes the size of all the items in the table, plus the // size of all attributes projected into all of the local secondary indexes on the // table. Use this estimate to measure whether a local secondary index is - // approaching its size limit. The estimate is subject to change over time; - // therefore, do not rely on the precision or accuracy of the estimate. + // approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics map[string][]types.ItemCollectionMetrics // A map of tables and requests against those tables that were not processed. The // UnprocessedItems value is in the same form as RequestItems , so you can provide // this value directly to a subsequent BatchWriteItem operation. For more - // information, see RequestItems in the Request Parameters section. Each - // UnprocessedItems entry consists of a table name or table ARN and, for that + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name or table ARN and, for that // table, a list of operations to perform ( DeleteRequest or PutRequest ). + // // - DeleteRequest - Perform a DeleteItem operation on the specified item. The // item to be deleted is identified by a Key subelement: + // // - Key - A map of primary key attribute values that uniquely identify the item. // Each entry in this map consists of an attribute name and an attribute value. + // // - PutRequest - Perform a PutItem operation on the specified item. The item to // be put is identified by an Item subelement: + // // - Item - A map of attributes and their values. Each entry in this map consists // of an attribute name and an attribute value. Attribute values must not be null; // string and binary type attributes must have lengths greater than zero; and set // type attributes must not be empty. Requests that contain empty values will be - // rejected with a ValidationException exception. If you specify any attributes - // that are part of an index key, then the data types for those attributes must - // match those of the schema in the table's attribute definition. + // rejected with a ValidationException exception. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // // If there are no unprocessed items remaining, the response contains an empty // UnprocessedItems map. UnprocessedItems map[string][]types.WriteRequest diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go index 19161558..1a92e723 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go @@ -12,23 +12,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a backup for an existing table. Each time you create an on-demand -// backup, the entire table data is backed up. There is no limit to the number of -// on-demand backups that can be taken. When you create an on-demand backup, a time -// marker of the request is cataloged, and the backup is created asynchronously, by -// applying all changes until the time of the request to the last full table -// snapshot. Backup requests are processed instantaneously and become available for -// restore within minutes. You can call CreateBackup at a maximum rate of 50 times -// per second. All backups in DynamoDB work without consuming any provisioned -// throughput on the table. If you submit a backup request on 2018-12-14 at -// 14:25:00, the backup is guaranteed to contain all data committed to the table up -// to 14:24:00, and data committed after 14:26:00 will not be. The backup might -// contain data modifications made between 14:24:00 and 14:26:00. On-demand backup -// does not support causal consistency. Along with data, the following are also -// included on the backups: +// Creates a backup for an existing table. +// +// Each time you create an on-demand backup, the entire table data is backed up. +// There is no limit to the number of on-demand backups that can be taken. +// +// When you create an on-demand backup, a time marker of the request is cataloged, +// and the backup is created asynchronously, by applying all changes until the time +// of the request to the last full table snapshot. Backup requests are processed +// instantaneously and become available for restore within minutes. +// +// You can call CreateBackup at a maximum rate of 50 times per second. +// +// All backups in DynamoDB work without consuming any provisioned throughput on +// the table. +// +// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is +// guaranteed to contain all data committed to the table up to 14:24:00, and data +// committed after 14:26:00 will not be. The backup might contain data +// modifications made between 14:24:00 and 14:26:00. On-demand backup does not +// support causal consistency. +// +// Along with data, the following are also included on the backups: +// // - Global secondary indexes (GSIs) +// // - Local secondary indexes (LSIs) +// // - Streams +// // - Provisioned read and write capacity func (c *Client) CreateBackup(ctx context.Context, params *CreateBackupInput, optFns ...func(*Options)) (*CreateBackupOutput, error) { if params == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go index 693a39d1..2e46ef1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go @@ -14,41 +14,56 @@ import ( // Creates a global table from an existing table. A global table creates a // replication relationship between two or more DynamoDB tables with the same table -// name in the provided Regions. This operation only applies to Version 2017.11.29 -// (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// name in the provided Regions. +// +// This operation only applies to [Version 2017.11.29 (Legacy)] of global tables. We recommend using [Version 2019.11.21 (Current)] when +// creating new global tables, as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . If you want to add a new replica table to a global table, each of the -// following conditions must be true: +// determine which version you are using, see [Determining the version]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Updating global tables]. +// +// If you want to add a new replica table to a global table, each of the following +// conditions must be true: +// // - The table must have the same primary key as all of the other replicas. +// // - The table must have the same name as all of the other replicas. +// // - The table must have DynamoDB Streams enabled, with the stream containing // both the new and the old images of the item. +// // - None of the replica tables in the global table can contain any data. // // If global secondary indexes are specified, then the following conditions must // also be met: +// // - The global secondary indexes must have the same name. +// // - The global secondary indexes must have the same hash key and sort key (if // present). // // If local secondary indexes are specified, then the following conditions must // also be met: +// // - The local secondary indexes must have the same name. +// // - The local secondary indexes must have the same hash key and sort key (if // present). // // Write capacity settings should be set consistently across your replica tables // and secondary indexes. DynamoDB strongly recommends enabling auto scaling to // manage the write capacity settings for all of your global tables replicas and -// indexes. If you prefer to manage write capacity settings manually, you should -// provision equal replicated write capacity units to your replica tables. You -// should also provision equal replicated write capacity units to matching -// secondary indexes across your global table. +// indexes. +// +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should also +// provision equal replicated write capacity units to matching secondary indexes +// across your global table. +// +// [Updating global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Version 2017.11.29 (Legacy)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Determining the version]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) CreateGlobalTable(ctx context.Context, params *CreateGlobalTableInput, optFns ...func(*Options)) (*CreateGlobalTableOutput, error) { if params == nil { params = &CreateGlobalTableInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go index 92975ec6..62167604 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go @@ -15,15 +15,19 @@ import ( // The CreateTable operation adds a new table to your account. In an Amazon Web // Services account, table names must be unique within each Region. That is, you // can have two tables with same name if you create the tables in different -// Regions. CreateTable is an asynchronous operation. Upon receiving a CreateTable -// request, DynamoDB immediately returns a response with a TableStatus of CREATING -// . After the table is created, DynamoDB sets the TableStatus to ACTIVE . You can -// perform read and write operations only on an ACTIVE table. You can optionally -// define secondary indexes on the new table, as part of the CreateTable -// operation. If you want to create multiple tables with secondary indexes on them, -// you must create the tables sequentially. Only one table with secondary indexes -// can be in the CREATING state at any given time. You can use the DescribeTable -// action to check the table status. +// Regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING . After +// the table is created, DynamoDB sets the TableStatus to ACTIVE . You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of the +// CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table with +// secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable action to check the table status. func (c *Client) CreateTable(ctx context.Context, params *CreateTableInput, optFns ...func(*Options)) (*CreateTableOutput, error) { if params == nil { params = &CreateTableInput{} @@ -49,25 +53,38 @@ type CreateTableInput struct { // Specifies the attributes that make up the primary key for a table or an index. // The attributes in KeySchema must also be defined in the AttributeDefinitions - // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) - // in the Amazon DynamoDB Developer Guide. Each KeySchemaElement in the array is - // composed of: + // array. For more information, see [Data Model]in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // // - AttributeName - The name of this key attribute. + // // - KeyType - The role that the key attribute will assume: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from the DynamoDB usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // // For a simple primary key (partition key), you must provide exactly one element - // with a KeyType of HASH . For a composite primary key (partition key and sort - // key), you must provide exactly two elements, in this order: The first element - // must have a KeyType of HASH , and the second element must have a KeyType of - // RANGE . For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) - // in the Amazon DynamoDB Developer Guide. + // with a KeyType of HASH . + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType of + // HASH , and the second element must have a KeyType of RANGE . + // + // For more information, see [Working with Tables] in the Amazon DynamoDB Developer Guide. + // + // [Data Model]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html + // [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key // // This member is required. KeySchema []types.KeySchemaElement @@ -80,12 +97,15 @@ type CreateTableInput struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. + // // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) - // . + // PROVISIONED sets the billing mode to [Provisioned Mode]. + // // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand) - // . + // workloads. PAY_PER_REQUEST sets the billing mode to [On-Demand Mode]. + // + // [On-Demand Mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand + // [Provisioned Mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual BillingMode types.BillingMode // Indicates whether deletion protection is to be enabled (true) or disabled @@ -94,23 +114,32 @@ type CreateTableInput struct { // One or more global secondary indexes (the maximum is 20) to be created on the // table. Each global secondary index in the array includes the following: + // // - IndexName - The name of the global secondary index. Must be unique only for // this table. + // // - KeySchema - Specifies the key schema for the global secondary index. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - Only the specified table attributes are projected into the index. // The list of projected attributes is in NonKeyAttributes . + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed // 100. If you project the same attribute into two different indexes, this counts // as two distinct attributes when determining the total. + // // - ProvisionedThroughput - The provisioned throughput settings for the global // secondary index, consisting of read and write capacity units. GlobalSecondaryIndexes []types.GlobalSecondaryIndex @@ -118,21 +147,30 @@ type CreateTableInput struct { // One or more local secondary indexes (the maximum is 5) to be created on the // table. Each index is scoped to a given partition key value. There is a 10 GB // size limit per partition key value; otherwise, the size of a local secondary - // index is unconstrained. Each local secondary index in the array includes the - // following: + // index is unconstrained. + // + // Each local secondary index in the array includes the following: + // // - IndexName - The name of the local secondary index. Must be unique only for // this table. + // // - KeySchema - Specifies the key schema for the local secondary index. The key // schema must begin with the same partition key as the table. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - Only the specified table attributes are projected into the index. // The list of projected attributes is in NonKeyAttributes . + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed @@ -140,41 +178,58 @@ type CreateTableInput struct { // as two distinct attributes when determining the total. LocalSecondaryIndexes []types.LocalSecondaryIndex + // Sets the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify + // MaxReadRequestUnits , MaxWriteRequestUnits , or both. + OnDemandThroughput *types.OnDemandThroughput + // Represents the provisioned throughput settings for a specified table or index. - // The settings can be modified using the UpdateTable operation. If you set - // BillingMode as PROVISIONED , you must specify this property. If you set - // BillingMode as PAY_PER_REQUEST , you cannot specify this property. For current - // minimum and maximum provisioned throughput values, see Service, Account, and - // Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // The settings can be modified using the UpdateTable operation. + // + // If you set BillingMode as PROVISIONED , you must specify this property. If you + // set BillingMode as PAY_PER_REQUEST , you cannot specify this property. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *types.ProvisionedThroughput // An Amazon Web Services resource-based policy document in JSON format that will - // be attached to the table. When you attach a resource-based policy while creating - // a table, the policy creation is strongly consistent. The maximum size supported - // for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when - // calculating the size of a policy against this limit. You can’t request an - // increase for this limit. For a full list of all considerations that you should - // keep in mind while attaching a resource-based policy, see Resource-based policy - // considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) - // . + // be attached to the table. + // + // When you attach a resource-based policy while creating a table, the policy + // application is strongly consistent. + // + // The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against this + // limit. For a full list of all considerations that apply for resource-based + // policies, see [Resource-based policy considerations]. + // + // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html ResourcePolicy *string // Represents the settings used to enable server-side encryption. SSESpecification *types.SSESpecification // The settings for DynamoDB Streams on the table. These settings consist of: + // // - StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true) // or disabled (false). + // // - StreamViewType - When an item in the table is modified, StreamViewType // determines what information is written to the table's stream. Valid values for // StreamViewType are: + // // - KEYS_ONLY - Only the key attributes of the modified item are written to the // stream. + // // - NEW_IMAGE - The entire item, as it appears after it was modified, is written // to the stream. + // // - OLD_IMAGE - The entire item, as it appeared before it was modified, is // written to the stream. + // // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are // written to the stream. StreamSpecification *types.StreamSpecification @@ -183,9 +238,9 @@ type CreateTableInput struct { // STANDARD_INFREQUENT_ACCESS . TableClass types.TableClass - // A list of key-value pairs to label the table. For more information, see Tagging - // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) - // . + // A list of key-value pairs to label the table. For more information, see [Tagging for DynamoDB]. + // + // [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html Tags []types.Tag noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go index 0d847ed0..26f5dbed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go @@ -12,8 +12,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an existing backup of a table. You can call DeleteBackup at a maximum -// rate of 10 times per second. +// Deletes an existing backup of a table. +// +// You can call DeleteBackup at a maximum rate of 10 times per second. func (c *Client) DeleteBackup(ctx context.Context, params *DeleteBackupInput, optFns ...func(*Options)) (*DeleteBackupOutput, error) { if params == nil { params = &DeleteBackupInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go index eab33851..b111cb62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go @@ -14,13 +14,18 @@ import ( // Deletes a single item in a table by primary key. You can perform a conditional // delete operation that deletes the item if it exists, or if it has an expected -// attribute value. In addition to deleting an item, you can also return the item's -// attribute values in the same operation, using the ReturnValues parameter. +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// // Unless you specify conditions, the DeleteItem is an idempotent operation; // running it multiple times on the same item or attribute does not result in an -// error response. Conditional deletes are useful for deleting items only if -// specific conditions are met. If those conditions are met, DynamoDB performs the -// delete. Otherwise, the item is not deleted. +// error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. func (c *Client) DeleteItem(ctx context.Context, params *DeleteItemInput, optFns ...func(*Options)) (*DeleteItemOutput, error) { if params == nil { params = &DeleteItemInput{} @@ -40,10 +45,12 @@ func (c *Client) DeleteItem(ctx context.Context, params *DeleteItemInput, optFns type DeleteItemInput struct { // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to delete. For the primary key, you must provide all of the key - // attributes. For example, with a simple primary key, you only need to provide a - // value for the partition key. For a composite primary key, you must provide - // values for both the partition key and the sort key. + // key of the item to delete. + // + // For the primary key, you must provide all of the key attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. // // This member is required. Key map[string]types.AttributeValue @@ -55,70 +62,111 @@ type DeleteItemInput struct { TableName *string // A condition that must be satisfied in order for a conditional DeleteItem to - // succeed. An expression can contain any of the following: + // succeed. + // + // An expression can contain any of the following: + // // - Functions: attribute_exists | attribute_not_exists | attribute_type | - // contains | begins_with | size These function names are case-sensitive. + // contains | begins_with | size + // + // These function names are case-sensitive. + // // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // // - Logical operators: AND | OR | NOT - // For more information about condition expressions, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // + // For more information about condition expressions, see [Condition Expressions] in the Amazon DynamoDB + // Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ConditionExpression *string // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html Expected map[string]types.ExpectedAttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -130,20 +178,26 @@ type DeleteItemInput struct { // Use ReturnValues if you want to get the item attributes as they appeared before // they were deleted. For DeleteItem , the valid values are: + // // - NONE - If ReturnValues is not specified, or if its value is NONE , then // nothing is returned. (This setting is the default for ReturnValues .) + // // - ALL_OLD - The content of the old item is returned. + // // There is no additional cost associated with requesting a return value aside // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. The ReturnValues parameter is used by - // several DynamoDB operations; however, DeleteItem does not recognize any values - // other than NONE or ALL_OLD . + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD . ReturnValues types.ReturnValue // An optional parameter that returns the item attributes for a DeleteItem - // operation that failed a condition check. There is no additional cost associated - // with requesting a return value aside from the small network and processing - // overhead of receiving a larger response. No read capacity units are consumed. + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -161,24 +215,30 @@ type DeleteItemOutput struct { // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Provisioned Throughput]in the Amazon DynamoDB Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html ConsumedCapacity *types.ConsumedCapacity // Information about item collections, if any, that were affected by the DeleteItem // operation. ItemCollectionMetrics is only returned if the // ReturnItemCollectionMetrics parameter was specified. If the table does not have // any local secondary indexes, this information is not returned in the response. + // // Each ItemCollectionMetrics element consists of: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item itself. + // // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the table, // plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary - // index is approaching its size limit. The estimate is subject to change over - // time; therefore, do not rely on the precision or accuracy of the estimate. + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics *types.ItemCollectionMetrics // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go index cd6720b0..64436f59 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go @@ -12,13 +12,17 @@ import ( ) // Deletes the resource-based policy attached to the resource, which can be a -// table or stream. DeleteResourcePolicy is an idempotent operation; running it -// multiple times on the same resource doesn't result in an error response, unless -// you specify an ExpectedRevisionId , which will then return a -// PolicyNotFoundException . To make sure that you don't inadvertently lock -// yourself out of your own resources, the root principal in your Amazon Web -// Services account can perform DeleteResourcePolicy requests, even if your -// resource-based policy explicitly denies the root principal's access. +// table or stream. +// +// DeleteResourcePolicy is an idempotent operation; running it multiple times on +// the same resource doesn't result in an error response, unless you specify an +// ExpectedRevisionId , which will then return a PolicyNotFoundException . +// +// To make sure that you don't inadvertently lock yourself out of your own +// resources, the root principal in your Amazon Web Services account can perform +// DeleteResourcePolicy requests, even if your resource-based policy explicitly +// denies the root principal's access. +// // DeleteResourcePolicy is an asynchronous operation. If you issue a // GetResourcePolicy request immediately after running the DeleteResourcePolicy // request, DynamoDB might still return the deleted policy. This is because the @@ -61,9 +65,11 @@ type DeleteResourcePolicyInput struct { type DeleteResourcePolicyOutput struct { - // A unique string that represents the revision ID of the policy. If you are - // comparing revision IDs, make sure to always use string comparison logic. This - // value will be empty if you make a request against a resource without a policy. + // A unique string that represents the revision ID of the policy. If you're + // comparing revision IDs, make sure to always use string comparison logic. + // + // This value will be empty if you make a request against a resource without a + // policy. RevisionId *string // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go index 00d36f83..4fdcac82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go @@ -18,15 +18,23 @@ import ( // delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns // a ResourceInUseException . If the specified table does not exist, DynamoDB // returns a ResourceNotFoundException . If table is already in the DELETING -// state, no error is returned. This operation only applies to Version 2019.11.21 -// (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. DynamoDB might continue to accept data read and write -// operations, such as GetItem and PutItem , on a table in the DELETING state -// until the table deletion is complete. When you delete a table, any indexes on -// that table are also deleted. If you have DynamoDB Streams enabled on the table, -// then the corresponding stream on that table goes into the DISABLED state, and -// the stream is automatically deleted after 24 hours. Use the DescribeTable -// action to check the status of the table. +// state, no error is returned. +// +// This operation only applies to [Version 2019.11.21 (Current)] of global tables. +// +// DynamoDB might continue to accept data read and write operations, such as +// GetItem and PutItem , on a table in the DELETING state until the table deletion +// is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is +// automatically deleted after 24 hours. +// +// Use the DescribeTable action to check the status of the table. +// +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html func (c *Client) DeleteTable(ctx context.Context, params *DeleteTableInput, optFns ...func(*Options)) (*DeleteTableOutput, error) { if params == nil { params = &DeleteTableInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go index 845ccd7b..82b098a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go @@ -12,8 +12,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes an existing backup of a table. You can call DescribeBackup at a -// maximum rate of 10 times per second. +// Describes an existing backup of a table. +// +// You can call DescribeBackup at a maximum rate of 10 times per second. func (c *Client) DescribeBackup(ctx context.Context, params *DescribeBackupInput, optFns ...func(*Options)) (*DescribeBackupOutput, error) { if params == nil { params = &DescribeBackupInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go index 2f60a65d..fc3980f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go @@ -15,12 +15,16 @@ import ( // Checks the status of continuous backups and point in time recovery on the // specified table. Continuous backups are ENABLED on all tables at table // creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will -// be set to ENABLED. After continuous backups and point in time recovery are -// enabled, you can restore to any point in time within EarliestRestorableDateTime -// and LatestRestorableDateTime . LatestRestorableDateTime is typically 5 minutes -// before the current time. You can restore your table to any point in time during -// the last 35 days. You can call DescribeContinuousBackups at a maximum rate of -// 10 times per second. +// be set to ENABLED. +// +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and +// LatestRestorableDateTime . +// +// LatestRestorableDateTime is typically 5 minutes before the current time. You +// can restore your table to any point in time during the last 35 days. +// +// You can call DescribeContinuousBackups at a maximum rate of 10 times per second. func (c *Client) DescribeContinuousBackups(ctx context.Context, params *DescribeContinuousBackupsInput, optFns ...func(*Options)) (*DescribeContinuousBackupsOutput, error) { if params == nil { params = &DescribeContinuousBackupsInput{} @@ -39,8 +43,10 @@ func (c *Client) DescribeContinuousBackups(ctx context.Context, params *Describe type DescribeContinuousBackupsInput struct { // Name of the table for which the customer wants to check the continuous backups - // and point in time recovery settings. You can also provide the Amazon Resource - // Name (ARN) of the table in this parameter. + // and point in time recovery settings. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go index c983b639..dd6babda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go @@ -51,15 +51,20 @@ type DescribeContributorInsightsOutput struct { // Current status of contributor insights. ContributorInsightsStatus types.ContributorInsightsStatus - // Returns information about the last failure that was encountered. The most - // common exceptions for a FAILED status are: + // Returns information about the last failure that was encountered. + // + // The most common exceptions for a FAILED status are: + // // - LimitExceededException - Per-account Amazon CloudWatch Contributor Insights // rule limit reached. Please disable Contributor Insights for other tables/indexes // OR disable Contributor Insights rules before retrying. + // // - AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot // be modified due to insufficient permissions. + // // - AccessDeniedException - Failed to create service-linked role for // Contributor Insights due to insufficient permissions. + // // - InternalServerError - Failed to create Amazon CloudWatch Contributor // Insights rules. Please retry request. FailureException *types.FailureException diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go index b542bfcc..fec3a663 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go @@ -12,8 +12,9 @@ import ( ) // Returns the regional endpoint information. For more information on policy -// permissions, please see Internetwork traffic privacy (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints) -// . +// permissions, please see [Internetwork traffic privacy]. +// +// [Internetwork traffic privacy]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints func (c *Client) DescribeEndpoints(ctx context.Context, params *DescribeEndpointsInput, optFns ...func(*Options)) (*DescribeEndpointsOutput, error) { if params == nil { params = &DescribeEndpointsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go index 541772f6..b1edcd0b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go @@ -12,15 +12,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns information about the specified global table. This operation only -// applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// Returns information about the specified global table. +// +// This operation only applies to [Version 2017.11.29 (Legacy)] of global tables. We recommend using [Version 2019.11.21 (Current)] when +// creating new global tables, as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// determine which version you are using, see [Determining the version]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Updating global tables]. +// +// [Updating global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Version 2017.11.29 (Legacy)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Determining the version]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) DescribeGlobalTable(ctx context.Context, params *DescribeGlobalTableInput, optFns ...func(*Options)) (*DescribeGlobalTableOutput, error) { if params == nil { params = &DescribeGlobalTableInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go index 031d4381..51923dd1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go @@ -12,15 +12,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Describes Region-specific settings for a global table. This operation only -// applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// Describes Region-specific settings for a global table. +// +// This operation only applies to [Version 2017.11.29 (Legacy)] of global tables. We recommend using [Version 2019.11.21 (Current)] when +// creating new global tables, as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// determine which version you are using, see [Determining the version]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Updating global tables]. +// +// [Updating global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Version 2017.11.29 (Legacy)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Determining the version]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) DescribeGlobalTableSettings(ctx context.Context, params *DescribeGlobalTableSettingsInput, optFns ...func(*Options)) (*DescribeGlobalTableSettingsOutput, error) { if params == nil { params = &DescribeGlobalTableSettingsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go index 2b396158..d61fd09d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go @@ -29,7 +29,7 @@ func (c *Client) DescribeImport(ctx context.Context, params *DescribeImportInput type DescribeImportInput struct { - // The Amazon Resource Name (ARN) associated with the table you're importing to. + // The Amazon Resource Name (ARN) associated with the table you're importing to. // // This member is required. ImportArn *string @@ -39,7 +39,7 @@ type DescribeImportInput struct { type DescribeImportOutput struct { - // Represents the properties of the table created for the import, and parameters + // Represents the properties of the table created for the import, and parameters // of the import. The import parameters include import status, how many items were // processed, and how many errors were encountered. // diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go index cd60d4f8..cfd9fbf7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go @@ -13,42 +13,64 @@ import ( // Returns the current provisioned-capacity quotas for your Amazon Web Services // account in a Region, both for the Region as a whole and for any one DynamoDB -// table that you create there. When you establish an Amazon Web Services account, -// the account has initial quotas on the maximum read capacity units and write -// capacity units that you can provision across all of your DynamoDB tables in a -// given Region. Also, there are per-table quotas that apply when you create a -// table there. For more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) -// page in the Amazon DynamoDB Developer Guide. Although you can increase these -// quotas by filing a case at Amazon Web Services Support Center (https://console.aws.amazon.com/support/home#/) -// , obtaining the increase is not instantaneous. The DescribeLimits action lets -// you write code to compare the capacity you are currently using to those quotas -// imposed by your account so that you have enough time to apply for an increase -// before you hit a quota. For example, you could use one of the Amazon Web -// Services SDKs to do the following: +// table that you create there. +// +// When you establish an Amazon Web Services account, the account has initial +// quotas on the maximum read capacity units and write capacity units that you can +// provision across all of your DynamoDB tables in a given Region. Also, there are +// per-table quotas that apply when you create a table there. For more information, +// see [Service, Account, and Table Quotas]page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these quotas by filing a case at [Amazon Web Services Support Center], obtaining the +// increase is not instantaneous. The DescribeLimits action lets you write code to +// compare the capacity you are currently using to those quotas imposed by your +// account so that you have enough time to apply for an increase before you hit a +// quota. +// +// For example, you could use one of the Amazon Web Services SDKs to do the +// following: +// // - Call DescribeLimits for a particular Region to obtain your current account // quotas on provisioned capacity there. +// // - Create a variable to hold the aggregate read capacity units provisioned for // all your tables in that Region, and one to hold the aggregate write capacity // units. Zero them both. +// // - Call ListTables to obtain a list of all your DynamoDB tables. +// // - For each table name listed by ListTables , do the following: +// // - Call DescribeTable with the table name. +// // - Use the data returned by DescribeTable to add the read capacity units and // write capacity units provisioned for the table itself to your variables. +// // - If the table has one or more global secondary indexes (GSIs), loop over // these GSIs and add their provisioned capacity values to your variables as well. +// // - Report the account quotas for that Region returned by DescribeLimits , along // with the total current provisioned capacity levels you have calculated. // // This will let you see whether you are getting close to your account-level -// quotas. The per-table quotas apply only when you are creating a new table. They +// quotas. +// +// The per-table quotas apply only when you are creating a new table. They // restrict the sum of the provisioned capacity of the new table itself and all its -// global secondary indexes. For existing tables and their GSIs, DynamoDB doesn't -// let you increase provisioned capacity extremely rapidly, but the only quota that -// applies is that the aggregate provisioned capacity over all your tables and GSIs -// cannot exceed either of the per-account quotas. DescribeLimits should only be -// called periodically. You can expect throttling errors if you call it more than -// once in a minute. The DescribeLimits Request element has no content. +// global secondary indexes. +// +// For existing tables and their GSIs, DynamoDB doesn't let you increase +// provisioned capacity extremely rapidly, but the only quota that applies is that +// the aggregate provisioned capacity over all your tables and GSIs cannot exceed +// either of the per-account quotas. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +// +// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html +// [Amazon Web Services Support Center]: https://console.aws.amazon.com/support/home#/ func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) { if params == nil { params = &DescribeLimitsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go index 29861d9b..089fa9af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go @@ -18,13 +18,17 @@ import ( ) // Returns information about the table, including the current status of the table, -// when it was created, the primary key schema, and any indexes on the table. This -// operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. If you issue a DescribeTable request immediately after a -// CreateTable request, DynamoDB might return a ResourceNotFoundException . This is -// because DescribeTable uses an eventually consistent query, and the metadata for -// your table might not be available at that moment. Wait for a few seconds, and -// then try the DescribeTable request again. +// when it was created, the primary key schema, and any indexes on the table. +// +// This operation only applies to [Version 2019.11.21 (Current)] of global tables. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException . This is because +// DescribeTable uses an eventually consistent query, and the metadata for your +// table might not be available at that moment. Wait for a few seconds, and then +// try the DescribeTable request again. +// +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html func (c *Client) DescribeTable(ctx context.Context, params *DescribeTableInput, optFns ...func(*Options)) (*DescribeTableOutput, error) { if params == nil { params = &DescribeTableInput{} @@ -234,12 +238,13 @@ type TableExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error) } @@ -412,12 +417,13 @@ type TableNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go index 0008f9d1..77aa2993 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go @@ -12,8 +12,10 @@ import ( ) // Describes auto scaling settings across replicas of the global table at once. -// This operation only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// +// This operation only applies to [Version 2019.11.21 (Current)] of global tables. +// +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html func (c *Client) DescribeTableReplicaAutoScaling(ctx context.Context, params *DescribeTableReplicaAutoScalingInput, optFns ...func(*Options)) (*DescribeTableReplicaAutoScalingOutput, error) { if params == nil { params = &DescribeTableReplicaAutoScalingInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go index df979dfe..30445956 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go @@ -12,12 +12,15 @@ import ( ) // This operation allows you to perform reads and singleton writes on data stored -// in DynamoDB, using PartiQL. For PartiQL reads ( SELECT statement), if the total -// number of processed items exceeds the maximum dataset size limit of 1 MB, the -// read stops and results are returned to the user as a LastEvaluatedKey value to -// continue the read in a subsequent operation. If the filter criteria in WHERE -// clause does not match any data, the read will return an empty result set. A -// single SELECT statement response can return up to the maximum number of items +// in DynamoDB, using PartiQL. +// +// For PartiQL reads ( SELECT statement), if the total number of processed items +// exceeds the maximum dataset size limit of 1 MB, the read stops and results are +// returned to the user as a LastEvaluatedKey value to continue the read in a +// subsequent operation. If the filter criteria in WHERE clause does not match any +// data, the read will return an empty result set. +// +// A single SELECT statement response can return up to the maximum number of items // (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any // filtering to the results using WHERE clause). If LastEvaluatedKey is present in // the response, you need to paginate the result set. If NextToken is present, you @@ -67,20 +70,27 @@ type ExecuteStatementInput struct { // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity // An optional parameter that returns the item attributes for an ExecuteStatement - // operation that failed a condition check. There is no additional cost associated - // with requesting a return value aside from the small network and processing - // overhead of receiving a larger response. No read capacity units are consumed. + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -91,8 +101,10 @@ type ExecuteStatementOutput struct { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table and // any indexes involved in the operation. ConsumedCapacity is only returned if the - // request asked for it. For more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. + // request asked for it. For more information, see [Provisioned Throughput]in the Amazon DynamoDB + // Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html ConsumedCapacity *types.ConsumedCapacity // If a read operation was used, this property will contain the result of the read diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go index 44b71fbc..7c44e2b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go @@ -12,12 +12,14 @@ import ( ) // This operation allows you to perform transactional reads or writes on data -// stored in DynamoDB, using PartiQL. The entire transaction must consist of either -// read statements or write statements, you cannot mix both in one transaction. The -// EXISTS function is an exception and can be used to check the condition of -// specific attributes of the item in a similar manner to ConditionCheck in the -// TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems) -// API. +// stored in DynamoDB, using PartiQL. +// +// The entire transaction must consist of either read statements or write +// statements, you cannot mix both in one transaction. The EXISTS function is an +// exception and can be used to check the condition of specific attributes of the +// item in a similar manner to ConditionCheck in the [TransactWriteItems] API. +// +// [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems func (c *Client) ExecuteTransaction(ctx context.Context, params *ExecuteTransactionInput, optFns ...func(*Options)) (*ExecuteTransactionOutput, error) { if params == nil { params = &ExecuteTransactionInput{} @@ -45,10 +47,10 @@ type ExecuteTransactionInput struct { ClientRequestToken *string // Determines the level of detail about either provisioned or on-demand throughput - // consumption that is returned in the response. For more information, see - // TransactGetItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html) - // and TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html) - // . + // consumption that is returned in the response. For more information, see [TransactGetItems]and [TransactWriteItems]. + // + // [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html + // [TransactGetItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html ReturnConsumedCapacity types.ReturnConsumedCapacity noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go index 297f6de3..97b57a51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go @@ -44,12 +44,15 @@ type ExportTableToPointInTimeInput struct { // Providing a ClientToken makes the call to ExportTableToPointInTimeInput // idempotent, meaning that multiple identical calls have the same effect as one - // single call. A client token is valid for 8 hours after the first request that - // uses it is completed. After 8 hours, any request with the same client token is - // treated as a new request. Do not resubmit the same request with the same client - // token for more than 8 hours, or the result might not be idempotent. If you - // submit a request with the same client token but a change in other parameters - // within the 8-hour idempotency window, DynamoDB returns an + // single call. + // + // A client token is valid for 8 hours after the first request that uses it is + // completed. After 8 hours, any request with the same client token is treated as a + // new request. Do not resubmit the same request with the same client token for + // more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an // ImportConflictException . ClientToken *string @@ -72,8 +75,10 @@ type ExportTableToPointInTimeInput struct { IncrementalExportSpecification *types.IncrementalExportSpecification // The ID of the Amazon Web Services account that owns the bucket the export will - // be stored in. S3BucketOwner is a required parameter when exporting to a S3 - // bucket in another account. + // be stored in. + // + // S3BucketOwner is a required parameter when exporting to a S3 bucket in another + // account. S3BucketOwner *string // The Amazon S3 bucket prefix to use as the file name and path of the exported @@ -82,7 +87,9 @@ type ExportTableToPointInTimeInput struct { // Type of encryption used on the bucket where export data will be stored. Valid // values for S3SseAlgorithm are: + // // - AES256 - server-side encryption with Amazon S3 managed keys + // // - KMS - server-side encryption with KMS managed keys S3SseAlgorithm types.S3SseAlgorithm diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go index 6d5ef8ec..bc59518f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go @@ -14,11 +14,12 @@ import ( // The GetItem operation returns a set of attributes for the item with the given // primary key. If there is no matching item, GetItem does not return any data and -// there will be no Item element in the response. GetItem provides an eventually -// consistent read by default. If your application requires a strongly consistent -// read, set ConsistentRead to true . Although a strongly consistent read might -// take more time than an eventually consistent read, it always returns the last -// updated value. +// there will be no Item element in the response. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true . Although a +// strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. func (c *Client) GetItem(ctx context.Context, params *GetItemInput, optFns ...func(*Options)) (*GetItemOutput, error) { if params == nil { params = &GetItemInput{} @@ -38,10 +39,12 @@ func (c *Client) GetItem(ctx context.Context, params *GetItemInput, optFns ...fu type GetItemInput struct { // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to retrieve. For the primary key, you must provide all of the - // attributes. For example, with a simple primary key, you only need to provide a - // value for the partition key. For a composite primary key, you must provide - // values for both the partition key and the sort key. + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. // // This member is required. Key map[string]types.AttributeValue @@ -53,8 +56,9 @@ type GetItemInput struct { TableName *string // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html AttributesToGet []string // Determines the read consistency model: If set to true , then the operation uses @@ -64,46 +68,67 @@ type GetItemInput struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string // A string that identifies one or more attributes to retrieve from the table. // These attributes can include scalars, sets, or elements of a JSON document. The - // attributes in the expression must be separated by commas. If no attribute names - // are specified, then all attributes are returned. If any of the requested - // attributes are not found, they do not appear in the result. For more - // information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes are returned. If any + // of the requested attributes are not found, they do not appear in the result. + // + // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -117,8 +142,9 @@ type GetItemOutput struct { // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) - // in the Amazon DynamoDB Developer Guide. + // information, see [Provisioned Throughput]in the Amazon DynamoDB Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads ConsumedCapacity *types.ConsumedCapacity // A map of attribute names to AttributeValue objects, as specified by diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go index baadeb90..f578473d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go @@ -12,28 +12,35 @@ import ( ) // Returns the resource-based policy document attached to the resource, which can -// be a table or stream, in JSON format. GetResourcePolicy follows an eventually -// consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) -// model. The following list describes the outcomes when you issue the -// GetResourcePolicy request immediately after issuing another request: +// be a table or stream, in JSON format. +// +// GetResourcePolicy follows an [eventually consistent] model. The following list describes the outcomes +// when you issue the GetResourcePolicy request immediately after issuing another +// request: +// // - If you issue a GetResourcePolicy request immediately after a // PutResourcePolicy request, DynamoDB might return a PolicyNotFoundException . +// // - If you issue a GetResourcePolicy request immediately after a // DeleteResourcePolicy request, DynamoDB might return the policy that was // present before the deletion request. +// // - If you issue a GetResourcePolicy request immediately after a CreateTable // request, which includes a resource-based policy, DynamoDB might return a // ResourceNotFoundException or a PolicyNotFoundException . // // Because GetResourcePolicy uses an eventually consistent query, the metadata for // your policy or table might not be available at that moment. Wait for a few -// seconds, and then retry the GetResourcePolicy request. After a GetResourcePolicy -// request returns a policy created using the PutResourcePolicy request, you can -// assume the policy will start getting applied in the authorization of requests to -// the resource. Because this process is eventually consistent, it will take some -// time to apply the policy to all requests to a resource. Policies that you attach -// while creating a table using the CreateTable request will always be applied to -// all requests for that table. +// seconds, and then retry the GetResourcePolicy request. +// +// After a GetResourcePolicy request returns a policy created using the +// PutResourcePolicy request, the policy will be applied in the authorization of +// requests to the resource. Because this process is eventually consistent, it will +// take some time to apply the policy to all requests to a resource. Policies that +// you attach while creating a table using the CreateTable request will always be +// applied to all requests for that table. +// +// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { if params == nil { params = &GetResourcePolicyInput{} @@ -66,7 +73,7 @@ type GetResourcePolicyOutput struct { // table or stream, in JSON format. Policy *string - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. RevisionId *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go index fd134a68..fb838667 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go @@ -29,13 +29,13 @@ func (c *Client) ImportTable(ctx context.Context, params *ImportTableInput, optF type ImportTableInput struct { - // The format of the source data. Valid values for ImportFormat are CSV , + // The format of the source data. Valid values for ImportFormat are CSV , // DYNAMODB_JSON or ION . // // This member is required. InputFormat types.InputFormat - // The S3 bucket that provides the source for the import. + // The S3 bucket that provides the source for the import. // // This member is required. S3BucketSource *types.S3BucketSource @@ -46,19 +46,22 @@ type ImportTableInput struct { TableCreationParameters *types.TableCreationParameters // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning - // that multiple identical calls have the same effect as one single call. A client - // token is valid for 8 hours after the first request that uses it is completed. - // After 8 hours, any request with the same client token is treated as a new - // request. Do not resubmit the same request with the same client token for more - // than 8 hours, or the result might not be idempotent. If you submit a request - // with the same client token but a change in other parameters within the 8-hour - // idempotency window, DynamoDB returns an IdempotentParameterMismatch exception. + // that multiple identical calls have the same effect as one single call. + // + // A client token is valid for 8 hours after the first request that uses it is + // completed. After 8 hours, any request with the same client token is treated as a + // new request. Do not resubmit the same request with the same client token for + // more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an + // IdempotentParameterMismatch exception. ClientToken *string - // Type of compression to be used on the input coming from the imported table. + // Type of compression to be used on the input coming from the imported table. InputCompressionType types.InputCompressionType - // Additional properties that specify how the input is formatted, + // Additional properties that specify how the input is formatted, InputFormatOptions *types.InputFormatOptions noSmithyDocumentSerde @@ -66,7 +69,7 @@ type ImportTableInput struct { type ImportTableOutput struct { - // Represents the properties of the table created for the import, and parameters + // Represents the properties of the table created for the import, and parameters // of the import. The import parameters include import status, how many items were // processed, and how many errors were encountered. // diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go index 50a5ca3f..531fa624 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go @@ -17,12 +17,17 @@ import ( // and weren't made with Amazon Web Services Backup. To list these backups for a // given table, specify TableName . ListBackups returns a paginated list of // results with at most 1 MB worth of items in a page. You can also specify a -// maximum number of entries to be returned in a page. In the request, start time -// is inclusive, but end time is exclusive. Note that these boundaries are for the -// time at which the original backup was requested. You can call ListBackups a -// maximum of five times per second. If you want to retrieve the complete list of -// backups made with Amazon Web Services Backup, use the Amazon Web Services -// Backup list API. (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html) +// maximum number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note that +// these boundaries are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. +// +// If you want to retrieve the complete list of backups made with Amazon Web +// Services Backup, use the [Amazon Web Services Backup list API.] +// +// [Amazon Web Services Backup list API.]: https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html func (c *Client) ListBackups(ctx context.Context, params *ListBackupsInput, optFns ...func(*Options)) (*ListBackupsOutput, error) { if params == nil { params = &ListBackupsInput{} @@ -40,11 +45,15 @@ func (c *Client) ListBackups(ctx context.Context, params *ListBackupsInput, optF type ListBackupsInput struct { - // The backups from the table specified by BackupType are listed. Where BackupType - // can be: + // The backups from the table specified by BackupType are listed. + // + // Where BackupType can be: + // // - USER - On-demand backup created by you. (The default setting if no other // backup types are specified.) + // // - SYSTEM - On-demand backup automatically created by DynamoDB. + // // - ALL - All types of on-demand backups (USER and SYSTEM). BackupType types.BackupTypeFilter @@ -78,14 +87,17 @@ type ListBackupsOutput struct { // List of BackupSummary objects. BackupSummaries []types.BackupSummary - // The ARN of the backup last evaluated when the current page of results was + // The ARN of the backup last evaluated when the current page of results was // returned, inclusive of the current page of results. This value may be specified // as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch - // the next page of results. If LastEvaluatedBackupArn is empty, then the last - // page of results has been processed and there are no more results to be - // retrieved. If LastEvaluatedBackupArn is not empty, this may or may not indicate - // that there is more data to be returned. All results are guaranteed to have been - // returned if and only if no value for LastEvaluatedBackupArn is returned. + // the next page of results. + // + // If LastEvaluatedBackupArn is empty, then the last page of results has been + // processed and there are no more results to be retrieved. + // + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that there + // is more data to be returned. All results are guaranteed to have been returned if + // and only if no value for LastEvaluatedBackupArn is returned. LastEvaluatedBackupArn *string // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go index 75fb71ef..083679e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go @@ -12,15 +12,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists all global tables that have a replica in the specified Region. This -// operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// Lists all global tables that have a replica in the specified Region. +// +// This operation only applies to [Version 2017.11.29 (Legacy)] of global tables. We recommend using [Version 2019.11.21 (Current)] when +// creating new global tables, as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// determine which version you are using, see [Determining the version]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Updating global tables]. +// +// [Updating global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Version 2017.11.29 (Legacy)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Determining the version]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) ListGlobalTables(ctx context.Context, params *ListGlobalTablesInput, optFns ...func(*Options)) (*ListGlobalTablesOutput, error) { if params == nil { params = &ListGlobalTablesInput{} @@ -42,10 +45,12 @@ type ListGlobalTablesInput struct { ExclusiveStartGlobalTableName *string // The maximum number of table names to return, if the parameter is not specified - // DynamoDB defaults to 100. If the number of global tables DynamoDB finds reaches - // this limit, it stops the operation and returns the table names collected up to - // that point, with a table name in the LastEvaluatedGlobalTableName to apply in a - // subsequent operation to the ExclusiveStartGlobalTableName parameter. + // DynamoDB defaults to 100. + // + // If the number of global tables DynamoDB finds reaches this limit, it stops the + // operation and returns the table names collected up to that point, with a table + // name in the LastEvaluatedGlobalTableName to apply in a subsequent operation to + // the ExclusiveStartGlobalTableName parameter. Limit *int32 // Lists the global tables in a specific Region. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go index 7d4e34fd..fe30fd4a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go @@ -29,15 +29,15 @@ func (c *Client) ListImports(ctx context.Context, params *ListImportsInput, optF type ListImportsInput struct { - // An optional string that, if supplied, must be copied from the output of a + // An optional string that, if supplied, must be copied from the output of a // previous call to ListImports . When provided in this manner, the API fetches the // next page of results. NextToken *string - // The number of ImportSummary objects returned in a single page. + // The number of ImportSummary objects returned in a single page. PageSize *int32 - // The Amazon Resource Name (ARN) associated with the table that was imported to. + // The Amazon Resource Name (ARN) associated with the table that was imported to. TableArn *string noSmithyDocumentSerde @@ -45,10 +45,10 @@ type ListImportsInput struct { type ListImportsOutput struct { - // A list of ImportSummary objects. + // A list of ImportSummary objects. ImportSummaryList []types.ImportSummary - // If this value is returned, there are additional results to be displayed. To + // If this value is returned, there are additional results to be displayed. To // retrieve them, call ListImports again, with NextToken set to this value. NextToken *string @@ -149,7 +149,7 @@ var _ ListImportsAPIClient = (*Client)(nil) // ListImportsPaginatorOptions is the paginator options for ListImports type ListImportsPaginatorOptions struct { - // The number of ImportSummary objects returned in a single page. + // The number of ImportSummary objects returned in a single page. Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go index f59311d6..ffed1105 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go @@ -49,16 +49,18 @@ type ListTablesOutput struct { // The name of the last table in the current page of results. Use this value as // the ExclusiveStartTableName in a new request to obtain the next page of - // results, until all the table names are returned. If you do not receive a - // LastEvaluatedTableName value in the response, this means that there are no more - // table names to be retrieved. + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. LastEvaluatedTableName *string // The names of the tables associated with the current account at the current - // endpoint. The maximum size of this array is 100. If LastEvaluatedTableName also - // appears in the output, you can use this value as the ExclusiveStartTableName - // parameter in a subsequent ListTables request and obtain the next page of - // results. + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value as + // the ExclusiveStartTableName parameter in a subsequent ListTables request and + // obtain the next page of results. TableNames []string // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go index 8f604bb0..700093d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go @@ -13,9 +13,12 @@ import ( ) // List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource -// up to 10 times per second, per account. For an overview on tagging DynamoDB -// resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// up to 10 times per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html func (c *Client) ListTagsOfResource(ctx context.Context, params *ListTagsOfResourceInput, optFns ...func(*Options)) (*ListTagsOfResourceOutput, error) { if params == nil { params = &ListTagsOfResourceInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go index 39336006..a186e341 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go @@ -18,18 +18,28 @@ import ( // conditional put operation (add a new item if one with the specified primary key // doesn't exist), or replace an existing item if it has certain attribute values. // You can return the item's attribute values in the same operation, using the -// ReturnValues parameter. When you add an item, the primary key attributes are the -// only required attributes. Empty String and Binary attribute values are allowed. -// Attribute values of type String and Binary must have a length greater than zero -// if the attribute is used as a key attribute for a table or index. Set type -// attributes cannot be empty. Invalid Requests with empty values will be rejected -// with a ValidationException exception. To prevent a new item from replacing an -// existing item, use a conditional expression that contains the -// attribute_not_exists function with the name of the attribute being used as the -// partition key for the table. Since every record must contain that attribute, the -// attribute_not_exists function will only succeed if no matching item exists. For -// more information about PutItem , see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) -// in the Amazon DynamoDB Developer Guide. +// ReturnValues parameter. +// +// When you add an item, the primary key attributes are the only required +// attributes. +// +// Empty String and Binary attribute values are allowed. Attribute values of type +// String and Binary must have a length greater than zero if the attribute is used +// as a key attribute for a table or index. Set type attributes cannot be empty. +// +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name of the +// attribute being used as the partition key for the table. Since every record must +// contain that attribute, the attribute_not_exists function will only succeed if +// no matching item exists. +// +// For more information about PutItem , see [Working with Items] in the Amazon DynamoDB Developer +// Guide. +// +// [Working with Items]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html func (c *Client) PutItem(ctx context.Context, params *PutItemInput, optFns ...func(*Options)) (*PutItemOutput, error) { if params == nil { params = &PutItemInput{} @@ -50,18 +60,27 @@ type PutItemInput struct { // A map of attribute name/value pairs, one for each attribute. Only the primary // key attributes are required; you can optionally provide other attribute - // name-value pairs for the item. You must provide all of the attributes for the - // primary key. For example, with a simple primary key, you only need to provide a - // value for the partition key. For a composite primary key, you must provide both - // values for both the partition key and the sort key. If you specify any - // attributes that are part of an index key, then the data types for those - // attributes must match those of the schema in the table's attribute definition. + // name-value pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, with a + // simple primary key, you only need to provide a value for the partition key. For + // a composite primary key, you must provide both values for both the partition key + // and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // // Empty String and Binary attribute values are allowed. Attribute values of type // String and Binary must have a length greater than zero if the attribute is used - // as a key attribute for a table or index. For more information about primary - // keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) - // in the Amazon DynamoDB Developer Guide. Each element in the Item map is an - // AttributeValue object. + // as a key attribute for a table or index. + // + // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer + // Guide. + // + // Each element in the Item map is an AttributeValue object. + // + // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey // // This member is required. Item map[string]types.AttributeValue @@ -73,70 +92,111 @@ type PutItemInput struct { TableName *string // A condition that must be satisfied in order for a conditional PutItem operation - // to succeed. An expression can contain any of the following: + // to succeed. + // + // An expression can contain any of the following: + // // - Functions: attribute_exists | attribute_not_exists | attribute_type | - // contains | begins_with | size These function names are case-sensitive. + // contains | begins_with | size + // + // These function names are case-sensitive. + // // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // // - Logical operators: AND | OR | NOT - // For more information on condition expressions, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // + // For more information on condition expressions, see [Condition Expressions] in the Amazon DynamoDB + // Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ConditionExpression *string // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html Expected map[string]types.ExpectedAttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -148,21 +208,29 @@ type PutItemInput struct { // Use ReturnValues if you want to get the item attributes as they appeared before // they were updated with the PutItem request. For PutItem , the valid values are: + // // - NONE - If ReturnValues is not specified, or if its value is NONE , then // nothing is returned. (This setting is the default for ReturnValues .) + // // - ALL_OLD - If PutItem overwrote an attribute name-value pair, then the // content of the old item is returned. - // The values returned are strongly consistent. There is no additional cost - // associated with requesting a return value aside from the small network and - // processing overhead of receiving a larger response. No read capacity units are - // consumed. The ReturnValues parameter is used by several DynamoDB operations; - // however, PutItem does not recognize any values other than NONE or ALL_OLD . + // + // The values returned are strongly consistent. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD . ReturnValues types.ReturnValue // An optional parameter that returns the item attributes for a PutItem operation - // that failed a condition check. There is no additional cost associated with - // requesting a return value aside from the small network and processing overhead - // of receiving a larger response. No read capacity units are consumed. + // that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -180,24 +248,30 @@ type PutItemOutput struct { // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Provisioned Throughput]in the Amazon DynamoDB Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html ConsumedCapacity *types.ConsumedCapacity // Information about item collections, if any, that were affected by the PutItem // operation. ItemCollectionMetrics is only returned if the // ReturnItemCollectionMetrics parameter was specified. If the table does not have // any local secondary indexes, this information is not returned in the response. + // // Each ItemCollectionMetrics element consists of: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item itself. + // // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the table, // plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary - // index is approaching its size limit. The estimate is subject to change over - // time; therefore, do not rely on the precision or accuracy of the estimate. + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics *types.ItemCollectionMetrics // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go index 064a2682..8a847f51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go @@ -13,17 +13,21 @@ import ( // Attaches a resource-based policy document to the resource, which can be a table // or stream. When you attach a resource-based policy using this API, the policy -// application is eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) -// . PutResourcePolicy is an idempotent operation; running it multiple times on -// the same resource using the same policy document will return the same revision -// ID. If you specify an ExpectedRevisionId which doesn't match the current -// policy's RevisionId , the PolicyNotFoundException will be returned. +// application is [eventually consistent]. +// +// PutResourcePolicy is an idempotent operation; running it multiple times on the +// same resource using the same policy document will return the same revision ID. +// If you specify an ExpectedRevisionId that doesn't match the current policy's +// RevisionId , the PolicyNotFoundException will be returned. +// // PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy // request immediately after a PutResourcePolicy request, DynamoDB might return // your previous policy, if there was one, or return the PolicyNotFoundException . // This is because GetResourcePolicy uses an eventually consistent query, and the // metadata for your policy or table might not be available at that moment. Wait // for a few seconds, and then try the GetResourcePolicy request again. +// +// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { if params == nil { params = &PutResourcePolicyInput{} @@ -41,24 +45,33 @@ func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolic type PutResourcePolicyInput struct { - // An Amazon Web Services resource-based policy document in JSON format. The - // maximum size supported for a resource-based policy document is 20 KB. DynamoDB - // counts whitespaces when calculating the size of a policy against this limit. For - // a full list of all considerations that you should keep in mind while attaching a - // resource-based policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) - // . + // An Amazon Web Services resource-based policy document in JSON format. + // + // - The maximum size supported for a resource-based policy document is 20 KB. + // DynamoDB counts whitespaces when calculating the size of a policy against this + // limit. + // + // - Within a resource-based policy, if the action for a DynamoDB service-linked + // role (SLR) to replicate data for a global table is denied, adding or deleting a + // replica will fail with an error. + // + // For a full list of all considerations that apply while attaching a + // resource-based policy, see [Resource-based policy considerations]. + // + // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html // // This member is required. Policy *string // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy - // will be attached. The resources you can specify include tables and streams. You - // can control index permissions using the base table's policy. To specify the same - // permission level for your table and its indexes, you can provide both the table - // and index Amazon Resource Name (ARN)s in the Resource field of a given Statement - // in your policy document. Alternatively, to specify different permissions for - // your table, indexes, or both, you can define multiple Statement fields in your - // policy document. + // will be attached. The resources you can specify include tables and streams. + // + // You can control index permissions using the base table's policy. To specify the + // same permission level for your table and its indexes, you can provide both the + // table and index Amazon Resource Name (ARN)s in the Resource field of a given + // Statement in your policy document. Alternatively, to specify different + // permissions for your table, indexes, or both, you can define multiple Statement + // fields in your policy document. // // This member is required. ResourceArn *string @@ -69,11 +82,14 @@ type PutResourcePolicyInput struct { // A string value that you can use to conditionally update your policy. You can // provide the revision ID of your existing policy to make mutating requests - // against that policy. When you provide an expected revision ID, if the revision - // ID of the existing policy on the resource doesn't match or if there's no policy - // attached to the resource, your request will be rejected with a - // PolicyNotFoundException . To conditionally put a policy when no policy exists - // for the resource, specify NO_POLICY for the revision ID. + // against that policy. + // + // When you provide an expected revision ID, if the revision ID of the existing + // policy on the resource doesn't match or if there's no policy attached to the + // resource, your request will be rejected with a PolicyNotFoundException . + // + // To conditionally attach a policy when no policy exists for the resource, + // specify NO_POLICY for the revision ID. ExpectedRevisionId *string noSmithyDocumentSerde @@ -81,7 +97,7 @@ type PutResourcePolicyInput struct { type PutResourcePolicyOutput struct { - // A unique string that represents the revision ID of the policy. If you are + // A unique string that represents the revision ID of the policy. If you're // comparing revision IDs, make sure to always use string comparison logic. RevisionId *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go index 83805f8d..a2b02897 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go @@ -15,40 +15,53 @@ import ( // You must provide the name of the partition key attribute and a single value for // that attribute. Query returns all items with that partition key value. // Optionally, you can provide a sort key attribute and use a comparison operator -// to refine the search results. Use the KeyConditionExpression parameter to -// provide a specific value for the partition key. The Query operation will return -// all of the items from the table or index with that partition key value. You can -// optionally narrow the scope of the Query operation by specifying a sort key -// value and a comparison operator in KeyConditionExpression . To further refine -// the Query results, you can optionally provide a FilterExpression . A -// FilterExpression determines which items within the results should be returned to -// you. All of the other results are discarded. A Query operation always returns a -// result set. If no matching items are found, the result set will be empty. -// Queries that do not return results consume the minimum number of read capacity -// units for that type of read operation. DynamoDB calculates the number of read -// capacity units consumed based on item size, not on the amount of data that is -// returned to an application. The number of capacity units consumed will be the -// same whether you request all of the attributes (the default behavior) or just -// some of them (using a projection expression). The number will also be the same -// whether or not you use a FilterExpression . Query results are always sorted by -// the sort key value. If the data type of the sort key is Number, the results are -// returned in numeric order; otherwise, the results are returned in order of UTF-8 -// bytes. By default, the sort order is ascending. To reverse the order, set the -// ScanIndexForward parameter to false. A single Query operation will read up to -// the maximum number of items set (if using the Limit parameter) or a maximum of -// 1 MB of data and then apply any filtering to the results using FilterExpression -// . If LastEvaluatedKey is present in the response, you will need to paginate the -// result set. For more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) -// in the Amazon DynamoDB Developer Guide. FilterExpression is applied after a -// Query finishes, but before the results are returned. A FilterExpression cannot -// contain partition key or sort key attributes. You need to specify those -// attributes in the KeyConditionExpression . A Query operation can return an -// empty result set and a LastEvaluatedKey if all the items read for the page of -// results are filtered out. You can query a table, a local secondary index, or a -// global secondary index. For a query on a table or on a local secondary index, -// you can set the ConsistentRead parameter to true and obtain a strongly -// consistent result. Global secondary indexes support eventually consistent reads -// only, so do not specify ConsistentRead when querying a global secondary index. +// to refine the search results. +// +// Use the KeyConditionExpression parameter to provide a specific value for the +// partition key. The Query operation will return all of the items from the table +// or index with that partition key value. You can optionally narrow the scope of +// the Query operation by specifying a sort key value and a comparison operator in +// KeyConditionExpression . To further refine the Query results, you can +// optionally provide a FilterExpression . A FilterExpression determines which +// items within the results should be returned to you. All of the other results are +// discarded. +// +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume the +// minimum number of read capacity units for that type of read operation. +// +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a +// FilterExpression . +// +// Query results are always sorted by the sort key value. If the data type of the +// sort key is Number, the results are returned in numeric order; otherwise, the +// results are returned in order of UTF-8 bytes. By default, the sort order is +// ascending. To reverse the order, set the ScanIndexForward parameter to false. +// +// A single Query operation will read up to the maximum number of items set (if +// using the Limit parameter) or a maximum of 1 MB of data and then apply any +// filtering to the results using FilterExpression . If LastEvaluatedKey is +// present in the response, you will need to paginate the result set. For more +// information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide. +// +// FilterExpression is applied after a Query finishes, but before the results are +// returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression . +// +// A Query operation can return an empty result set and a LastEvaluatedKey if all +// the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the +// ConsistentRead parameter to true and obtain a strongly consistent result. +// Global secondary indexes support eventually consistent reads only, so do not +// specify ConsistentRead when querying a global secondary index. +// +// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination func (c *Client) Query(ctx context.Context, params *QueryInput, optFns ...func(*Options)) (*QueryOutput, error) { if params == nil { params = &QueryInput{} @@ -74,72 +87,106 @@ type QueryInput struct { TableName *string // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html AttributesToGet []string // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // Determines the read consistency model: If set to true , then the operation uses // strongly consistent reads; otherwise, the operation uses eventually consistent - // reads. Strongly consistent reads are not supported on global secondary indexes. - // If you query a global secondary index with ConsistentRead set to true , you will + // reads. + // + // Strongly consistent reads are not supported on global secondary indexes. If you + // query a global secondary index with ConsistentRead set to true , you will // receive a ValidationException . ConsistentRead *bool // The primary key of the first item that this operation will evaluate. Use the - // value that was returned for LastEvaluatedKey in the previous operation. The - // data type for ExclusiveStartKey must be String, Number, or Binary. No set data - // types are allowed. + // value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number, or Binary. No set + // data types are allowed. ExclusiveStartKey map[string]types.AttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Specifying Conditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Specifying Conditions] in the Amazon + // DynamoDB Developer Guide. + // + // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // A string that contains conditions that DynamoDB applies after the Query // operation, but before the data is returned to you. Items that do not satisfy the - // FilterExpression criteria are not returned. A FilterExpression does not allow - // key attributes. You cannot define a filter expression based on a partition key - // or a sort key. A FilterExpression is applied after the items have already been - // read; the process of filtering does not consume any additional read capacity - // units. For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html) - // in the Amazon DynamoDB Developer Guide. + // FilterExpression criteria are not returned. + // + // A FilterExpression does not allow key attributes. You cannot define a filter + // expression based on a partition key or a sort key. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide. + // + // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html FilterExpression *string // The name of an index to query. This index can be any local secondary index or @@ -148,16 +195,26 @@ type QueryInput struct { IndexName *string // The condition that specifies the key values for items to be retrieved by the - // Query action. The condition must perform an equality test on a single partition - // key value. The condition can optionally perform one of several comparison tests - // on a single sort key value. This allows Query to retrieve one item with a given + // Query action. + // + // The condition must perform an equality test on a single partition key value. + // + // The condition can optionally perform one of several comparison tests on a + // single sort key value. This allows Query to retrieve one item with a given // partition key value and sort key value, or several items that have the same - // partition key value but different sort key values. The partition key equality - // test is required, and must be specified in the following format: - // partitionKeyName = :partitionkeyval If you also want to provide a condition for - // the sort key, it must be combined using AND with the condition for the sort - // key. Following is an example, using the = comparison operator for the sort key: - // partitionKeyName + // partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName + // // = // // :partitionkeyval @@ -167,45 +224,64 @@ type QueryInput struct { // sortKeyName // // = - // :sortkeyval Valid comparisons for the sort key condition are as follows: + // + // :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // // - sortKeyName = :sortkeyval - true if the sort key value is equal to // :sortkeyval . + // // - sortKeyName < :sortkeyval - true if the sort key value is less than // :sortkeyval . + // // - sortKeyName <= :sortkeyval - true if the sort key value is less than or // equal to :sortkeyval . + // // - sortKeyName > :sortkeyval - true if the sort key value is greater than // :sortkeyval . + // // - sortKeyName >= :sortkeyval - true if the sort key value is greater than or // equal to :sortkeyval . + // // - sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key // value is greater than or equal to :sortkeyval1 , and less than or equal to // :sortkeyval2 . + // // - begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value // begins with a particular operand. (You cannot use this function with a sort key // that is of type Number.) Note that the function name begins_with is // case-sensitive. + // // Use the ExpressionAttributeValues parameter to replace tokens such as - // :partitionval and :sortval with actual values at runtime. You can optionally - // use the ExpressionAttributeNames parameter to replace the names of the - // partition key and sort key with placeholder tokens. This option might be - // necessary if an attribute name conflicts with a DynamoDB reserved word. For - // example, the following KeyConditionExpression parameter causes an error because - // Size is a reserved word: + // :partitionval and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace the + // names of the partition key and sort key with placeholder tokens. This option + // might be necessary if an attribute name conflicts with a DynamoDB reserved word. + // For example, the following KeyConditionExpression parameter causes an error + // because Size is a reserved word: + // // - Size = :myval + // // To work around this, define a placeholder (such a #S ) to represent the // attribute name Size. KeyConditionExpression then is as follows: + // // - #S = :myval - // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide. For more information on - // ExpressionAttributeNames and ExpressionAttributeValues , see Using Placeholders - // for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) - // in the Amazon DynamoDB Developer Guide. + // + // For a list of reserved words, see [Reserved Words] in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues , + // see [Using Placeholders for Attribute Names and Values]in the Amazon DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Using Placeholders for Attribute Names and Values]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html KeyConditionExpression *string // This is a legacy parameter. Use KeyConditionExpression instead. For more - // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [KeyConditions]in the Amazon DynamoDB Developer Guide. + // + // [KeyConditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html KeyConditions map[string]types.Condition // The maximum number of items to evaluate (not necessarily the number of matching @@ -216,82 +292,104 @@ type QueryInput struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide. + // + // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit *int32 // A string that identifies one or more attributes to retrieve from the table. // These attributes can include scalars, sets, or elements of a JSON document. The - // attributes in the expression must be separated by commas. If no attribute names - // are specified, then all attributes will be returned. If any of the requested - // attributes are not found, they will not appear in the result. For more - // information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string // This is a legacy parameter. Use FilterExpression instead. For more information, - // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html) - // in the Amazon DynamoDB Developer Guide. + // see [QueryFilter]in the Amazon DynamoDB Developer Guide. + // + // [QueryFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html QueryFilter map[string]types.Condition // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity // Specifies the order for index traversal: If true (default), the traversal is // performed in ascending order; if false , the traversal is performed in - // descending order. Items with the same partition key value are stored in sorted - // order by sort key. If the sort key data type is Number, the results are stored - // in numeric order. For type String, the results are stored in order of UTF-8 - // bytes. For type Binary, DynamoDB treats each byte of the binary data as - // unsigned. If ScanIndexForward is true , DynamoDB returns the results in the - // order in which they are stored (by sort key value). This is the default - // behavior. If ScanIndexForward is false , DynamoDB reads the results in reverse - // order by sort key value, and then returns the results to the client. + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort key. + // If the sort key data type is Number, the results are stored in numeric order. + // For type String, the results are stored in order of UTF-8 bytes. For type + // Binary, DynamoDB treats each byte of the binary data as unsigned. + // + // If ScanIndexForward is true , DynamoDB returns the results in the order in which + // they are stored (by sort key value). This is the default behavior. If + // ScanIndexForward is false , DynamoDB reads the results in reverse order by sort + // key value, and then returns the results to the client. ScanIndexForward *bool // The attributes to be returned in the result. You can retrieve all item // attributes, specific item attributes, the count of matching items, or in the // case of an index, some or all of the attributes projected into the index. + // // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table // or index. If you query a local secondary index, then for each matching item in // the index, DynamoDB fetches the entire item from the parent table. If the index // is configured to project all item attributes, then all of the data can be // obtained from the local secondary index, and no fetching is required. + // // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is // configured to project all attributes, this return value is equivalent to // specifying ALL_ATTRIBUTES . + // // - COUNT - Returns the number of matching items, rather than the matching items // themselves. Note that this uses the same quantity of read capacity units as // getting the items, and is subject to the same item size calculations. + // // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in // ProjectionExpression . This return value is equivalent to specifying - // ProjectionExpression without specifying any value for Select . If you query or - // scan a local secondary index and request only attributes that are projected into - // that index, the operation will read only the index and not the table. If any of - // the requested attributes are not projected into the local secondary index, - // DynamoDB fetches each of these attributes from the parent table. This extra - // fetching incurs additional throughput cost and latency. If you query or scan a - // global secondary index, you can only request attributes that are projected into - // the index. Global secondary index queries cannot fetch attributes from the - // parent table. + // ProjectionExpression without specifying any value for Select . + // + // If you query or scan a local secondary index and request only attributes that + // are projected into that index, the operation will read only the index and not + // the table. If any of the requested attributes are not projected into the local + // secondary index, DynamoDB fetches each of these attributes from the parent + // table. This extra fetching incurs additional throughput cost and latency. + // + // If you query or scan a global secondary index, you can only request attributes + // that are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when // accessing an index. You cannot use both Select and ProjectionExpression // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES // . (This usage is equivalent to specifying ProjectionExpression without any - // value for Select .) If you use the ProjectionExpression parameter, then the - // value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select - // will return an error. + // value for Select .) + // + // If you use the ProjectionExpression parameter, then the value for Select can + // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. Select types.Select noSmithyDocumentSerde @@ -303,15 +401,19 @@ type QueryOutput struct { // The capacity units consumed by the Query operation. The data returned includes // the total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned if - // the ReturnConsumedCapacity parameter was specified. For more information, see - // Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // the ReturnConsumedCapacity parameter was specified. For more information, see [Provisioned Throughput] // in the Amazon DynamoDB Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html ConsumedCapacity *types.ConsumedCapacity - // The number of items in the response. If you used a QueryFilter in the request, - // then Count is the number of items returned after the filter was applied, and - // ScannedCount is the number of matching items before the filter was applied. If - // you did not use a filter in the request, then Count and ScannedCount are the + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount are the // same. Count int32 @@ -321,18 +423,24 @@ type QueryOutput struct { // The primary key of the item where the operation stopped, inclusive of the // previous result set. Use this value to start a new operation, excluding this - // value in the new request. If LastEvaluatedKey is empty, then the "last page" of - // results has been processed and there is no more data to be retrieved. If - // LastEvaluatedKey is not empty, it does not necessarily mean that there is more - // data in the result set. The only way to know when you have reached the end of - // the result set is when LastEvaluatedKey is empty. + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there is + // more data in the result set. The only way to know when you have reached the end + // of the result set is when LastEvaluatedKey is empty. LastEvaluatedKey map[string]types.AttributeValue // The number of items evaluated, before any QueryFilter is applied. A high // ScannedCount value with few, or no, Count results indicates an inefficient Query - // operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) - // in the Amazon DynamoDB Developer Guide. If you did not use a filter in the - // request, then ScannedCount is the same as Count . + // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same as + // Count . + // + // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count ScannedCount int32 // Metadata pertaining to the operation's result. @@ -489,8 +597,9 @@ type QueryPaginatorOptions struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide. + // + // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit int32 } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go index 0bba0c74..7799828c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go @@ -13,14 +13,22 @@ import ( ) // Creates a new table from an existing backup. Any number of users can execute up -// to 50 concurrent restores (any type of restore) in a given account. You can call -// RestoreTableFromBackup at a maximum rate of 10 times per second. You must -// manually set up the following on the restored table: +// to 50 concurrent restores (any type of restore) in a given account. +// +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// // - Auto scaling policies +// // - IAM policies +// // - Amazon CloudWatch metrics and alarms +// // - Tags +// // - Stream settings +// // - Time to Live (TTL) settings func (c *Client) RestoreTableFromBackup(ctx context.Context, params *RestoreTableFromBackupInput, optFns ...func(*Options)) (*RestoreTableFromBackupOutput, error) { if params == nil { @@ -62,6 +70,11 @@ type RestoreTableFromBackupInput struct { // of the indexes at the time of restore. LocalSecondaryIndexOverride []types.LocalSecondaryIndex + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughputOverride *types.OnDemandThroughput + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *types.ProvisionedThroughput diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go index 9049d3d3..33d4b831 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go @@ -17,23 +17,40 @@ import ( // EarliestRestorableDateTime and LatestRestorableDateTime . You can restore your // table to any point in time during the last 35 days. Any number of users can // execute up to 50 concurrent restores (any type of restore) in a given account. -// When you restore using point in time recovery, DynamoDB restores your table data -// to the state based on the selected date and time (day:hour:minute:second) to a -// new table. Along with data, the following are also included on the new restored -// table using point in time recovery: +// +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. +// +// Along with data, the following are also included on the new restored table +// using point in time recovery: +// // - Global secondary indexes (GSIs) +// // - Local secondary indexes (LSIs) +// // - Provisioned read and write capacity -// - Encryption settings All these settings come from the current settings of -// the source table at the time of restore. +// +// - Encryption settings +// +// All these settings come from the current settings of the source table at the +// +// time of restore. // // You must manually set up the following on the restored table: +// // - Auto scaling policies +// // - IAM policies +// // - Amazon CloudWatch metrics and alarms +// // - Tags +// // - Stream settings +// // - Time to Live (TTL) settings +// // - Point in time recovery settings func (c *Client) RestoreTableToPointInTime(ctx context.Context, params *RestoreTableToPointInTimeInput, optFns ...func(*Options)) (*RestoreTableToPointInTimeOutput, error) { if params == nil { @@ -70,6 +87,11 @@ type RestoreTableToPointInTimeInput struct { // of the indexes at the time of restore. LocalSecondaryIndexOverride []types.LocalSecondaryIndex + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughputOverride *types.OnDemandThroughput + // Provisioned throughput settings for the restored table. ProvisionedThroughputOverride *types.ProvisionedThroughput diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go index 4a538728..6da34c76 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go @@ -14,37 +14,48 @@ import ( // The Scan operation returns one or more items and item attributes by accessing // every item in a table or a secondary index. To have DynamoDB return fewer items, -// you can provide a FilterExpression operation. If the total size of scanned -// items exceeds the maximum dataset size limit of 1 MB, the scan completes and -// results are returned to the user. The LastEvaluatedKey value is also returned -// and the requestor can use the LastEvaluatedKey to continue the scan in a -// subsequent operation. Each scan response also includes number of items that were -// scanned (ScannedCount) as part of the request. If using a FilterExpression , a -// scan result can result in no items meeting the criteria and the Count will -// result in zero. If you did not use a FilterExpression in the scan request, then -// Count is the same as ScannedCount . Count and ScannedCount only return the -// count of items specific to a single scan request and, unless the table is less -// than 1MB, do not represent the total number of items in the table. A single Scan -// operation first reads up to the maximum number of items set (if using the Limit -// parameter) or a maximum of 1 MB of data and then applies any filtering to the -// results if a FilterExpression is provided. If LastEvaluatedKey is present in -// the response, pagination is required to complete the full table scan. For more -// information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) -// in the Amazon DynamoDB Developer Guide. Scan operations proceed sequentially; -// however, for faster performance on a large table or secondary index, -// applications can request a parallel Scan operation by providing the Segment and -// TotalSegments parameters. For more information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) -// in the Amazon DynamoDB Developer Guide. By default, a Scan uses eventually -// consistent reads when accessing the items in a table. Therefore, the results -// from an eventually consistent Scan may not include the latest item changes at -// the time the scan iterates through each item in the table. If you require a -// strongly consistent read of each item as the scan iterates through the items in -// the table, you can set the ConsistentRead parameter to true. Strong consistency -// only relates to the consistency of the read at the item level. DynamoDB does not -// provide snapshot isolation for a scan operation when the ConsistentRead -// parameter is set to true. Thus, a DynamoDB scan operation does not guarantee -// that all reads in a scan see a consistent snapshot of the table when the scan -// operation was requested. +// you can provide a FilterExpression operation. +// +// If the total size of scanned items exceeds the maximum dataset size limit of 1 +// MB, the scan completes and results are returned to the user. The +// LastEvaluatedKey value is also returned and the requestor can use the +// LastEvaluatedKey to continue the scan in a subsequent operation. Each scan +// response also includes number of items that were scanned (ScannedCount) as part +// of the request. If using a FilterExpression , a scan result can result in no +// items meeting the criteria and the Count will result in zero. If you did not +// use a FilterExpression in the scan request, then Count is the same as +// ScannedCount . +// +// Count and ScannedCount only return the count of items specific to a single scan +// request and, unless the table is less than 1MB, do not represent the total +// number of items in the table. +// +// A single Scan operation first reads up to the maximum number of items set (if +// using the Limit parameter) or a maximum of 1 MB of data and then applies any +// filtering to the results if a FilterExpression is provided. If LastEvaluatedKey +// is present in the response, pagination is required to complete the full table +// scan. For more information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide. +// +// Scan operations proceed sequentially; however, for faster performance on a +// large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see [Parallel Scan]in the Amazon DynamoDB Developer Guide. +// +// By default, a Scan uses eventually consistent reads when accessing the items in +// a table. Therefore, the results from an eventually consistent Scan may not +// include the latest item changes at the time the scan iterates through each item +// in the table. If you require a strongly consistent read of each item as the scan +// iterates through the items in the table, you can set the ConsistentRead +// parameter to true. Strong consistency only relates to the consistency of the +// read at the item level. +// +// DynamoDB does not provide snapshot isolation for a scan operation when the +// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does +// not guarantee that all reads in a scan see a consistent snapshot of the table +// when the scan operation was requested. +// +// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination +// [Parallel Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Options)) (*ScanOutput, error) { if params == nil { params = &ScanInput{} @@ -64,83 +75,123 @@ func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Op type ScanInput struct { // The name of the table containing the requested items or if you provide IndexName - // , the name of the table to which that index belongs. You can also provide the - // Amazon Resource Name (ARN) of the table in this parameter. + // , the name of the table to which that index belongs. + // + // You can also provide the Amazon Resource Name (ARN) of the table in this + // parameter. // // This member is required. TableName *string // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide. + // + // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html AttributesToGet []string // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // A Boolean value that determines the read consistency model during the scan: + // // - If ConsistentRead is false , then the data returned from Scan might not // contain the results from other recently completed write operations ( PutItem , // UpdateItem , or DeleteItem ). + // // - If ConsistentRead is true , then all of the write operations that completed // before the Scan began are guaranteed to be contained in the Scan response. - // The default setting for ConsistentRead is false . The ConsistentRead parameter - // is not supported on global secondary indexes. If you scan a global secondary - // index with ConsistentRead set to true, you will receive a ValidationException . + // + // The default setting for ConsistentRead is false . + // + // The ConsistentRead parameter is not supported on global secondary indexes. If + // you scan a global secondary index with ConsistentRead set to true, you will + // receive a ValidationException . ConsistentRead *bool // The primary key of the first item that this operation will evaluate. Use the - // value that was returned for LastEvaluatedKey in the previous operation. The - // data type for ExclusiveStartKey must be String, Number or Binary. No set data - // types are allowed. In a parallel scan, a Scan request that includes - // ExclusiveStartKey must specify the same segment whose previous Scan returned - // the corresponding value of LastEvaluatedKey . + // value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No set + // data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify + // the same segment whose previous Scan returned the corresponding value of + // LastEvaluatedKey . ExclusiveStartKey map[string]types.AttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // A string that contains conditions that DynamoDB applies after the Scan // operation, but before the data is returned to you. Items that do not satisfy the - // FilterExpression criteria are not returned. A FilterExpression is applied after - // the items have already been read; the process of filtering does not consume any - // additional read capacity units. For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression) - // in the Amazon DynamoDB Developer Guide. + // FilterExpression criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide. + // + // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression FilterExpression *string // The name of a secondary index to scan. This index can be any local secondary @@ -156,92 +207,120 @@ type ScanInput struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide. + // + // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit *int32 // A string that identifies one or more attributes to retrieve from the specified // table or index. These attributes can include scalars, sets, or elements of a - // JSON document. The attributes in the expression must be separated by commas. If - // no attribute names are specified, then all attributes will be returned. If any - // of the requested attributes are not found, they will not appear in the result. - // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity // This is a legacy parameter. Use FilterExpression instead. For more information, - // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html) - // in the Amazon DynamoDB Developer Guide. + // see [ScanFilter]in the Amazon DynamoDB Developer Guide. + // + // [ScanFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html ScanFilter map[string]types.Condition // For a parallel Scan request, Segment identifies an individual segment to be - // scanned by an application worker. Segment IDs are zero-based, so the first - // segment is always 0. For example, if you want to use four application threads to - // scan a table or an index, then the first thread specifies a Segment value of 0, - // the second thread specifies 1, and so on. The value of LastEvaluatedKey - // returned from a parallel Scan request must be used as ExclusiveStartKey with - // the same segment ID in a subsequent Scan operation. The value for Segment must - // be greater than or equal to 0, and less than the value provided for - // TotalSegments . If you provide Segment , you must also provide TotalSegments . + // scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, if + // you want to use four application threads to scan a table or an index, then the + // first thread specifies a Segment value of 0, the second thread specifies 1, and + // so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must be + // used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than the + // value provided for TotalSegments . + // + // If you provide Segment , you must also provide TotalSegments . Segment *int32 // The attributes to be returned in the result. You can retrieve all item // attributes, specific item attributes, the count of matching items, or in the // case of an index, some or all of the attributes projected into the index. + // // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table // or index. If you query a local secondary index, then for each matching item in // the index, DynamoDB fetches the entire item from the parent table. If the index // is configured to project all item attributes, then all of the data can be // obtained from the local secondary index, and no fetching is required. + // // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is // configured to project all attributes, this return value is equivalent to // specifying ALL_ATTRIBUTES . + // // - COUNT - Returns the number of matching items, rather than the matching items // themselves. Note that this uses the same quantity of read capacity units as // getting the items, and is subject to the same item size calculations. + // // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in // ProjectionExpression . This return value is equivalent to specifying - // ProjectionExpression without specifying any value for Select . If you query or - // scan a local secondary index and request only attributes that are projected into - // that index, the operation reads only the index and not the table. If any of the - // requested attributes are not projected into the local secondary index, DynamoDB - // fetches each of these attributes from the parent table. This extra fetching - // incurs additional throughput cost and latency. If you query or scan a global - // secondary index, you can only request attributes that are projected into the - // index. Global secondary index queries cannot fetch attributes from the parent - // table. + // ProjectionExpression without specifying any value for Select . + // + // If you query or scan a local secondary index and request only attributes that + // are projected into that index, the operation reads only the index and not the + // table. If any of the requested attributes are not projected into the local + // secondary index, DynamoDB fetches each of these attributes from the parent + // table. This extra fetching incurs additional throughput cost and latency. + // + // If you query or scan a global secondary index, you can only request attributes + // that are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when // accessing an index. You cannot use both Select and ProjectionExpression // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES // . (This usage is equivalent to specifying ProjectionExpression without any - // value for Select .) If you use the ProjectionExpression parameter, then the - // value for Select can only be SPECIFIC_ATTRIBUTES . Any other value for Select - // will return an error. + // value for Select .) + // + // If you use the ProjectionExpression parameter, then the value for Select can + // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error. Select types.Select // For a parallel Scan request, TotalSegments represents the total number of // segments into which the Scan operation will be divided. The value of // TotalSegments corresponds to the number of application workers that will perform // the parallel scan. For example, if you want to use four application threads to - // scan a table or an index, specify a TotalSegments value of 4. The value for - // TotalSegments must be greater than or equal to 1, and less than or equal to - // 1000000. If you specify a TotalSegments value of 1, the Scan operation will be - // sequential rather than parallel. If you specify TotalSegments , you must also - // specify Segment . + // scan a table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less than + // or equal to 1000000. If you specify a TotalSegments value of 1, the Scan + // operation will be sequential rather than parallel. + // + // If you specify TotalSegments , you must also specify Segment . TotalSegments *int32 noSmithyDocumentSerde @@ -253,15 +332,20 @@ type ScanOutput struct { // The capacity units consumed by the Scan operation. The data returned includes // the total provisioned throughput consumed, along with statistics for the table // and any indexes involved in the operation. ConsumedCapacity is only returned if - // the ReturnConsumedCapacity parameter was specified. For more information, see - // Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) + // the ReturnConsumedCapacity parameter was specified. For more information, see [Provisioned Throughput] // in the Amazon DynamoDB Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads ConsumedCapacity *types.ConsumedCapacity - // The number of items in the response. If you set ScanFilter in the request, then - // Count is the number of items returned after the filter was applied, and - // ScannedCount is the number of matching items before the filter was applied. If - // you did not use a filter in the request, then Count is the same as ScannedCount . + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as + // ScannedCount . Count int32 // An array of item attributes that match the scan criteria. Each element in this @@ -270,18 +354,24 @@ type ScanOutput struct { // The primary key of the item where the operation stopped, inclusive of the // previous result set. Use this value to start a new operation, excluding this - // value in the new request. If LastEvaluatedKey is empty, then the "last page" of - // results has been processed and there is no more data to be retrieved. If - // LastEvaluatedKey is not empty, it does not necessarily mean that there is more - // data in the result set. The only way to know when you have reached the end of - // the result set is when LastEvaluatedKey is empty. + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been + // processed and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there is + // more data in the result set. The only way to know when you have reached the end + // of the result set is when LastEvaluatedKey is empty. LastEvaluatedKey map[string]types.AttributeValue // The number of items evaluated, before any ScanFilter is applied. A high // ScannedCount value with few, or no, Count results indicates an inefficient Scan - // operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) - // in the Amazon DynamoDB Developer Guide. If you did not use a filter in the - // request, then ScannedCount is the same as Count . + // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same as + // Count . + // + // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count ScannedCount int32 // Metadata pertaining to the operation's result. @@ -438,8 +528,9 @@ type ScanPaginatorOptions struct { // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the // operation and returns the matching values up to the limit, and a key in // LastEvaluatedKey to apply in a subsequent operation to continue the operation. - // For more information, see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) - // in the Amazon DynamoDB Developer Guide. + // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide. + // + // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html Limit int32 } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go index 9c953eba..188c8495 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go @@ -15,9 +15,12 @@ import ( // Associate a set of tags with an Amazon DynamoDB resource. You can then activate // these user-defined tags so that they appear on the Billing and Cost Management // console for cost allocation tracking. You can call TagResource up to five times -// per second, per account. For an overview on tagging DynamoDB resources, see -// Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { if params == nil { params = &TagResourceInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go index 311145a5..6cdddbfc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go @@ -18,12 +18,18 @@ import ( // each of which contains a Get structure that specifies an item to retrieve from // a table in the account and Region. A call to TransactGetItems cannot retrieve // items from tables in more than one Amazon Web Services account or Region. The -// aggregate size of the items in the transaction cannot exceed 4 MB. DynamoDB -// rejects the entire TransactGetItems request if any of the following is true: +// aggregate size of the items in the transaction cannot exceed 4 MB. +// +// DynamoDB rejects the entire TransactGetItems request if any of the following is +// true: +// // - A conflicting operation is in the process of updating an item to be read. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - There is a user error, such as an invalid data format. +// // - The aggregate size of the items in the transaction exceeded 4 MB. func (c *Client) TransactGetItems(ctx context.Context, params *TransactGetItemsInput, optFns ...func(*Options)) (*TransactGetItemsOutput, error) { if params == nil { @@ -67,10 +73,11 @@ type TransactGetItemsOutput struct { // An ordered array of up to 100 ItemResponse objects, each of which corresponds // to the TransactGetItem object in the same position in the TransactItems array. // Each ItemResponse object contains a Map of the name-value pairs that are the - // projected attributes of the requested item. If a requested item could not be - // retrieved, the corresponding ItemResponse object is Null, or if the requested - // item has no projected attributes, the corresponding ItemResponse object is an - // empty Map. + // projected attributes of the requested item. + // + // If a requested item could not be retrieved, the corresponding ItemResponse + // object is Null, or if the requested item has no projected attributes, the + // corresponding ItemResponse object is an empty Map. Responses []types.ItemResponse // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go index 0582d6f0..37c6a871 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go @@ -16,25 +16,30 @@ import ( // action requests. These actions can target items in different tables, but not in // different Amazon Web Services accounts or Regions, and no two actions can target // the same item. For example, you cannot both ConditionCheck and Update the same -// item. The aggregate size of the items in the transaction cannot exceed 4 MB. The -// actions are completed atomically so that either all of them succeed, or all of -// them fail. They are defined by the following objects: +// item. The aggregate size of the items in the transaction cannot exceed 4 MB. +// +// The actions are completed atomically so that either all of them succeed, or all +// of them fail. They are defined by the following objects: +// // - Put — Initiates a PutItem operation to write a new item. This structure // specifies the primary key of the item to be written, the name of the table to // write it in, an optional condition expression that must be satisfied for the // write to succeed, a list of the item's attributes, and a field indicating // whether to retrieve the item's attributes if the condition is not met. +// // - Update — Initiates an UpdateItem operation to update an existing item. This // structure specifies the primary key of the item to be updated, the name of the // table where it resides, an optional condition expression that must be satisfied // for the update to succeed, an expression that defines one or more attributes to // be updated, and a field indicating whether to retrieve the item's attributes if // the condition is not met. +// // - Delete — Initiates a DeleteItem operation to delete an existing item. This // structure specifies the primary key of the item to be deleted, the name of the // table where it resides, an optional condition expression that must be satisfied // for the deletion to succeed, and a field indicating whether to retrieve the // item's attributes if the condition is not met. +// // - ConditionCheck — Applies a condition to an item that is not being modified // by the transaction. This structure specifies the primary key of the item to be // checked, the name of the table where it resides, a condition expression that @@ -43,14 +48,20 @@ import ( // // DynamoDB rejects the entire TransactWriteItems request if any of the following // is true: +// // - A condition in one of the condition expressions is not met. +// // - An ongoing operation is in the process of updating the same item. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - An item size becomes too large (bigger than 400 KB), a local secondary // index (LSI) becomes too large, or a similar validation error occurs because of // changes made by the transaction. +// // - The aggregate size of the items in the transaction exceeds 4 MB. +// // - There is a user error, such as an invalid data format. func (c *Client) TransactWriteItems(ctx context.Context, params *TransactWriteItemsInput, optFns ...func(*Options)) (*TransactWriteItemsOutput, error) { if params == nil { @@ -79,30 +90,39 @@ type TransactWriteItemsInput struct { // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, // meaning that multiple identical calls have the same effect as one single call. + // // Although multiple identical calls using the same client request token produce // the same result on the server (no side effects), the responses to the calls // might not be the same. If the ReturnConsumedCapacity parameter is set, then the // initial TransactWriteItems call returns the amount of write capacity units // consumed in making the changes. Subsequent TransactWriteItems calls with the // same client token return the number of read capacity units consumed in reading - // the item. A client request token is valid for 10 minutes after the first request - // that uses it is completed. After 10 minutes, any request with the same client - // token is treated as a new request. Do not resubmit the same request with the - // same client token for more than 10 minutes, or the result might not be - // idempotent. If you submit a request with the same client token but a change in - // other parameters within the 10-minute idempotency window, DynamoDB returns an + // the item. + // + // A client request token is valid for 10 minutes after the first request that + // uses it is completed. After 10 minutes, any request with the same client token + // is treated as a new request. Do not resubmit the same request with the same + // client token for more than 10 minutes, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 10-minute idempotency window, DynamoDB returns an // IdempotentParameterMismatch exception. ClientRequestToken *string // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go index 03d7cc90..ff90cf16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go @@ -12,9 +12,12 @@ import ( ) // Removes the association of tags from an Amazon DynamoDB resource. You can call -// UntagResource up to five times per second, per account. For an overview on -// tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// UntagResource up to five times per second, per account. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { if params == nil { params = &UntagResourceInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go index 8ced17d6..f59bee4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go @@ -16,11 +16,14 @@ import ( // specified table. A successful UpdateContinuousBackups call returns the current // ContinuousBackupsDescription . Continuous backups are ENABLED on all tables at // table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus -// will be set to ENABLED. Once continuous backups and point in time recovery are -// enabled, you can restore to any point in time within EarliestRestorableDateTime -// and LatestRestorableDateTime . LatestRestorableDateTime is typically 5 minutes -// before the current time. You can restore your table to any point in time during -// the last 35 days. +// will be set to ENABLED. +// +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and +// LatestRestorableDateTime . +// +// LatestRestorableDateTime is typically 5 minutes before the current time. You +// can restore your table to any point in time during the last 35 days. func (c *Client) UpdateContinuousBackups(ctx context.Context, params *UpdateContinuousBackupsInput, optFns ...func(*Options)) (*UpdateContinuousBackupsOutput, error) { if params == nil { params = &UpdateContinuousBackupsInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go index 767af7c3..15041b03 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go @@ -16,25 +16,39 @@ import ( // already exist to be able to use this operation. Any replica to be added must be // empty, have the same name as the global table, have the same key schema, have // DynamoDB Streams enabled, and have the same provisioned and maximum write -// capacity units. This operation only applies to Version 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// capacity units. +// +// This operation only applies to [Version 2017.11.29 (Legacy)] of global tables. We recommend using [Version 2019.11.21 (Current)] when +// creating new global tables, as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// you can use UpdateTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html) -// instead. Although you can use UpdateGlobalTable to add replicas and remove -// replicas in a single request, for simplicity we recommend that you issue -// separate requests for adding or removing replicas. If global secondary indexes -// are specified, then the following conditions must also be met: +// determine which version you are using, see [Determining the version]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Updating global tables]. +// +// This operation only applies to [Version 2017.11.29] of global tables. If you are using global +// tables [Version 2019.11.21]you can use [UpdateTable] instead. +// +// Although you can use UpdateGlobalTable to add replicas and remove replicas in a +// single request, for simplicity we recommend that you issue separate requests for +// adding or removing replicas. +// +// If global secondary indexes are specified, then the following conditions must +// also be met: +// // - The global secondary indexes must have the same name. +// // - The global secondary indexes must have the same hash key and sort key (if // present). +// // - The global secondary indexes must have the same provisioned and maximum // write capacity units. +// +// [Updating global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [UpdateTable]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Version 2017.11.29 (Legacy)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Version 2017.11.29]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Version 2019.11.21]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Determining the version]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) UpdateGlobalTable(ctx context.Context, params *UpdateGlobalTableInput, optFns ...func(*Options)) (*UpdateGlobalTableOutput, error) { if params == nil { params = &UpdateGlobalTableInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go index 2b8b9c1b..06ba2374 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go @@ -12,15 +12,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates settings for a global table. This operation only applies to Version -// 2017.11.29 (Legacy) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. We recommend using Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// when creating new global tables, as it provides greater flexibility, higher +// Updates settings for a global table. +// +// This operation only applies to [Version 2017.11.29 (Legacy)] of global tables. We recommend using [Version 2019.11.21 (Current)] when +// creating new global tables, as it provides greater flexibility, higher // efficiency and consumes less write capacity than 2017.11.29 (Legacy). To -// determine which version you are using, see Determining the version (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html) -// . To update existing global tables from version 2017.11.29 (Legacy) to version -// 2019.11.21 (Current), see Updating global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html) -// . +// determine which version you are using, see [Determining the version]. To update existing global tables +// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Updating global tables]. +// +// [Updating global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html +// [Version 2017.11.29 (Legacy)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html +// [Determining the version]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html func (c *Client) UpdateGlobalTableSettings(ctx context.Context, params *UpdateGlobalTableSettingsInput, optFns ...func(*Options)) (*UpdateGlobalTableSettingsOutput, error) { if params == nil { params = &UpdateGlobalTableSettingsInput{} @@ -45,12 +48,15 @@ type UpdateGlobalTableSettingsInput struct { // The billing mode of the global table. If GlobalTableBillingMode is not // specified, the global table defaults to PROVISIONED capacity billing mode. + // // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) - // . + // PROVISIONED sets the billing mode to [Provisioned Mode]. + // // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand) - // . + // workloads. PAY_PER_REQUEST sets the billing mode to [On-Demand Mode]. + // + // [On-Demand Mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand + // [Provisioned Mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual GlobalTableBillingMode types.BillingMode // Represents the settings of a global secondary index for a global table that diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go index 12b11218..4c6119bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go @@ -16,9 +16,10 @@ import ( // not already exist. You can put, delete, or add attribute values. You can also // perform a conditional update on an existing item (insert a new attribute // name-value pair if it doesn't exist, or replace an existing name-value pair if -// it has certain expected attribute values). You can also return the item's -// attribute values in the same UpdateItem operation using the ReturnValues -// parameter. +// it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem +// operation using the ReturnValues parameter. func (c *Client) UpdateItem(ctx context.Context, params *UpdateItemInput, optFns ...func(*Options)) (*UpdateItemOutput, error) { if params == nil { params = &UpdateItemInput{} @@ -38,10 +39,12 @@ func (c *Client) UpdateItem(ctx context.Context, params *UpdateItemInput, optFns type UpdateItemInput struct { // The primary key of the item to be updated. Each element consists of an - // attribute name and a value for that attribute. For the primary key, you must - // provide all of the attributes. For example, with a simple primary key, you only - // need to provide a value for the partition key. For a composite primary key, you - // must provide values for both the partition key and the sort key. + // attribute name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, with + // a simple primary key, you only need to provide a value for the partition key. + // For a composite primary key, you must provide values for both the partition key + // and the sort key. // // This member is required. Key map[string]types.AttributeValue @@ -53,75 +56,116 @@ type UpdateItemInput struct { TableName *string // This is a legacy parameter. Use UpdateExpression instead. For more information, - // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html) - // in the Amazon DynamoDB Developer Guide. + // see [AttributeUpdates]in the Amazon DynamoDB Developer Guide. + // + // [AttributeUpdates]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html AttributeUpdates map[string]types.AttributeValueUpdate - // A condition that must be satisfied in order for a conditional update to - // succeed. An expression can contain any of the following: + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // // - Functions: attribute_exists | attribute_not_exists | attribute_type | - // contains | begins_with | size These function names are case-sensitive. + // contains | begins_with | size + // + // These function names are case-sensitive. + // // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // // - Logical operators: AND | OR | NOT - // For more information about condition expressions, see Specifying Conditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // + // For more information about condition expressions, see [Specifying Conditions] in the Amazon DynamoDB + // Developer Guide. + // + // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ConditionExpression *string // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide. + // + // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html ConditionalOperator types.ConditionalOperator // This is a legacy parameter. Use ConditionExpression instead. For more - // information, see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expected]in the Amazon DynamoDB Developer Guide. + // + // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html Expected map[string]types.ExpectedAttributeValue // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide.) To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information about - // expression attribute names, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see [Specifying Item Attributes] in the Amazon + // DynamoDB Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string - // One or more values that can be substituted in an expression. Use the : (colon) - // character in an expression to dereference an attribute value. For example, - // suppose that you wanted to check whether the value of the ProductStatus - // attribute was one of the following: Available | Backordered | Discontinued You - // would first need to specify ExpressionAttributeValues as follows: { - // ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, - // ":disc":{"S":"Discontinued"} } You could then use these values in an expression, - // such as this: ProductStatus IN (:avail, :back, :disc) For more information on - // expression attribute values, see Condition Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute value. + // For example, suppose that you wanted to check whether the value of the + // ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, + // ":disc":{"S":"Discontinued"} } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see [Condition Expressions] in the Amazon + // DynamoDB Developer Guide. + // + // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html ExpressionAttributeValues map[string]types.AttributeValue // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: + // // - INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index - // that was accessed. Note that some operations, such as GetItem and BatchGetItem - // , do not access any indexes at all. In these cases, specifying INDEXES will - // only return ConsumedCapacity information for table(s). + // that was accessed. + // + // Note that some operations, such as GetItem and BatchGetItem , do not access any + // indexes at all. In these cases, specifying INDEXES will only return + // ConsumedCapacity information for table(s). + // // - TOTAL - The response includes only the aggregate ConsumedCapacity for the // operation. + // // - NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity types.ReturnConsumedCapacity @@ -133,78 +177,110 @@ type UpdateItemInput struct { // Use ReturnValues if you want to get the item attributes as they appear before // or after they are successfully updated. For UpdateItem , the valid values are: + // // - NONE - If ReturnValues is not specified, or if its value is NONE , then // nothing is returned. (This setting is the default for ReturnValues .) + // // - ALL_OLD - Returns all of the attributes of the item, as they appeared before // the UpdateItem operation. + // // - UPDATED_OLD - Returns only the updated attributes, as they appeared before // the UpdateItem operation. + // // - ALL_NEW - Returns all of the attributes of the item, as they appear after // the UpdateItem operation. + // // - UPDATED_NEW - Returns only the updated attributes, as they appear after the // UpdateItem operation. + // // There is no additional cost associated with requesting a return value aside // from the small network and processing overhead of receiving a larger response. - // No read capacity units are consumed. The values returned are strongly - // consistent. + // No read capacity units are consumed. + // + // The values returned are strongly consistent. ReturnValues types.ReturnValue // An optional parameter that returns the item attributes for an UpdateItem - // operation that failed a condition check. There is no additional cost associated - // with requesting a return value aside from the small network and processing - // overhead of receiving a larger response. No read capacity units are consumed. + // operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure // An expression that defines one or more attributes to be updated, the action to - // be performed on them, and new values for them. The following action values are - // available for UpdateExpression . + // be performed on them, and new values for them. + // + // The following action values are available for UpdateExpression . + // // - SET - Adds one or more attributes and values to an item. If any of these // attributes already exist, they are replaced by the new values. You can also use // SET to add or subtract from an attribute that is of type Number. For example: - // SET myNum = myNum + :val SET supports the following functions: + // SET myNum = myNum + :val + // + // SET supports the following functions: + // // - if_not_exists (path, operand) - if the item does not contain an attribute at // the specified path, then if_not_exists evaluates to operand; otherwise, it // evaluates to path. You can use this function to avoid overwriting an attribute // that may already be present in the item. + // // - list_append (operand, operand) - evaluates to a list with a new element // added to it. You can append the new element to the start or the end of the list - // by reversing the order of the operands. These function names are - // case-sensitive. + // by reversing the order of the operands. + // + // These function names are case-sensitive. + // // - REMOVE - Removes one or more attributes from an item. + // // - ADD - Adds the specified value to the item, if the attribute does not // already exist. If the attribute does exist, then the behavior of ADD depends // on the data type of the attribute: + // // - If the existing attribute is a number, and if Value is also a number, then // Value is mathematically added to the existing attribute. If Value is a - // negative number, then it is subtracted from the existing attribute. If you use - // ADD to increment or decrement a number value for an item that doesn't exist - // before the update, DynamoDB uses 0 as the initial value. Similarly, if you use - // ADD for an existing item to increment or decrement an attribute value that - // doesn't exist before the update, DynamoDB uses 0 as the initial value. For - // example, suppose that the item you want to update doesn't have an attribute - // named itemcount , but you decide to ADD the number 3 to this attribute anyway. - // DynamoDB will create the itemcount attribute, set its initial value to 0 , and - // finally add 3 to it. The result will be a new itemcount attribute in the item, - // with a value of 3 . + // negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. + // + // Similarly, if you use ADD for an existing item to increment or decrement an + // attribute value that doesn't exist before the update, DynamoDB uses 0 as the + // initial value. For example, suppose that the item you want to update doesn't + // have an attribute named itemcount , but you decide to ADD the number 3 to this + // attribute anyway. DynamoDB will create the itemcount attribute, set its + // initial value to 0 , and finally add 3 to it. The result will be a new + // itemcount attribute in the item, with a value of 3 . + // // - If the existing data type is a set and if Value is also a set, then Value is // added to the existing set. For example, if the attribute value is the set // [1,2] , and the ADD action specified [3] , then the final attribute value is // [1,2,3] . An error occurs if an ADD action is specified for a set attribute - // and the attribute type specified does not match the existing set type. Both sets - // must have the same primitive data type. For example, if the existing data type - // is a set of strings, the Value must also be a set of strings. The ADD action - // only supports Number and set data types. In addition, ADD can only be used on - // top-level attributes, not nested attributes. - // - DELETE - Deletes an element from a set. If a set of values is specified, - // then those values are subtracted from the old set. For example, if the attribute - // value was the set [a,b,c] and the DELETE action specifies [a,c] , then the - // final attribute value is [b] . Specifying an empty set is an error. The DELETE - // action only supports set data types. In addition, DELETE can only be used on - // top-level attributes, not nested attributes. + // and the attribute type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. + // + // The ADD action only supports Number and set data types. In addition, ADD can + // only be used on top-level attributes, not nested attributes. + // + // - DELETE - Deletes an element from a set. + // + // If a set of values is specified, then those values are subtracted from the old + // set. For example, if the attribute value was the set [a,b,c] and the DELETE + // action specifies [a,c] , then the final attribute value is [b] . Specifying an + // empty set is an error. + // + // The DELETE action only supports set data types. In addition, DELETE can only be + // used on top-level attributes, not nested attributes. + // // You can have many actions in a single expression, such as the following: SET - // a=:value1, b=:value2 DELETE :value3, :value4, :value5 For more information on - // update expressions, see Modifying Items and Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) - // in the Amazon DynamoDB Developer Guide. + // a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see [Modifying Items and Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Modifying Items and Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html UpdateExpression *string noSmithyDocumentSerde @@ -214,33 +290,41 @@ type UpdateItemInput struct { type UpdateItemOutput struct { // A map of attribute values as they appear before or after the UpdateItem - // operation, as determined by the ReturnValues parameter. The Attributes map is - // only present if the update was successful and ReturnValues was specified as - // something other than NONE in the request. Each element represents one attribute. + // operation, as determined by the ReturnValues parameter. + // + // The Attributes map is only present if the update was successful and ReturnValues + // was specified as something other than NONE in the request. Each element + // represents one attribute. Attributes map[string]types.AttributeValue // The capacity units consumed by the UpdateItem operation. The data returned // includes the total provisioned throughput consumed, along with statistics for // the table and any indexes involved in the operation. ConsumedCapacity is only // returned if the ReturnConsumedCapacity parameter was specified. For more - // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads) - // in the Amazon DynamoDB Developer Guide. + // information, see [Provisioned Throughput]in the Amazon DynamoDB Developer Guide. + // + // [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html#ItemSizeCalculations.Reads ConsumedCapacity *types.ConsumedCapacity // Information about item collections, if any, that were affected by the UpdateItem // operation. ItemCollectionMetrics is only returned if the // ReturnItemCollectionMetrics parameter was specified. If the table does not have // any local secondary indexes, this information is not returned in the response. + // // Each ItemCollectionMetrics element consists of: + // // - ItemCollectionKey - The partition key value of the item collection. This is // the same as the partition key value of the item itself. + // // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the table, // plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary - // index is approaching its size limit. The estimate is subject to change over - // time; therefore, do not rely on the precision or accuracy of the estimate. + // index is approaching its size limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. ItemCollectionMetrics *types.ItemCollectionMetrics // Metadata pertaining to the operation's result. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go index a51de71d..fa8e770c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go @@ -13,11 +13,16 @@ import ( ) // Modifies the provisioned throughput settings, global secondary indexes, or -// DynamoDB Streams settings for a given table. This operation only applies to -// Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. You can only perform one of the following operations at once: +// DynamoDB Streams settings for a given table. +// +// This operation only applies to [Version 2019.11.21 (Current)] of global tables. +// +// You can only perform one of the following operations at once: +// // - Modify the provisioned throughput settings of the table. +// // - Remove a global secondary index from the table. +// // - Create a new global secondary index on the table. After the index begins // backfilling, you can use UpdateTable to perform other operations. // @@ -25,6 +30,8 @@ import ( // status changes from ACTIVE to UPDATING . While it's UPDATING , you can't issue // another UpdateTable request. When the table returns to the ACTIVE state, the // UpdateTable operation is complete. +// +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optFns ...func(*Options)) (*UpdateTableOutput, error) { if params == nil { params = &UpdateTableInput{} @@ -59,12 +66,15 @@ type UpdateTableInput struct { // provisioned capacity values must be set. The initial provisioned capacity values // are estimated based on the consumed read and write capacity of your table and // global secondary indexes over the past 30 minutes. + // // - PROVISIONED - We recommend using PROVISIONED for predictable workloads. - // PROVISIONED sets the billing mode to Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual) - // . + // PROVISIONED sets the billing mode to [Provisioned Mode]. + // // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable - // workloads. PAY_PER_REQUEST sets the billing mode to On-Demand Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand) - // . + // workloads. PAY_PER_REQUEST sets the billing mode to [On-Demand Mode]. + // + // [On-Demand Mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.OnDemand + // [Provisioned Mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html#HowItWorks.ProvisionedThroughput.Manual BillingMode types.BillingMode // Indicates whether deletion protection is to be enabled (true) or disabled @@ -73,29 +83,45 @@ type UpdateTableInput struct { // An array of one or more global secondary indexes for the table. For each index // in the array, you can request one action: + // // - Create - add a new global secondary index to the table. + // // - Update - modify the provisioned throughput settings of an existing global // secondary index. + // // - Delete - remove a global secondary index from the table. + // // You can create or delete only one global secondary index per UpdateTable - // operation. For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) - // in the Amazon DynamoDB Developer Guide. + // operation. + // + // For more information, see [Managing Global Secondary Indexes] in the Amazon DynamoDB Developer Guide. + // + // [Managing Global Secondary Indexes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html GlobalSecondaryIndexUpdates []types.GlobalSecondaryIndexUpdate + // Updates the maximum number of read and write units for the specified table in + // on-demand capacity mode. If you use this parameter, you must specify + // MaxReadRequestUnits , MaxWriteRequestUnits , or both. + OnDemandThroughput *types.OnDemandThroughput + // The new provisioned throughput settings for the specified table or index. ProvisionedThroughput *types.ProvisionedThroughput // A list of replica update actions (create, delete, or update) for the table. - // This property only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) - // of global tables. + // + // This property only applies to [Version 2019.11.21 (Current)] of global tables. + // + // [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html ReplicaUpdates []types.ReplicationGroupUpdate // The new server-side encryption settings for the specified table. SSESpecification *types.SSESpecification - // Represents the DynamoDB Streams configuration for the table. You receive a - // ValidationException if you try to enable a stream on a table that already has a - // stream, or if you try to disable a stream on a table that doesn't have a stream. + // Represents the DynamoDB Streams configuration for the table. + // + // You receive a ValidationException if you try to enable a stream on a table that + // already has a stream, or if you try to disable a stream on a table that doesn't + // have a stream. StreamSpecification *types.StreamSpecification // The table class of the table to be updated. Valid values are STANDARD and diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go index ff84b122..505b558e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go @@ -11,9 +11,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Updates auto scaling settings on your global tables at once. This operation -// only applies to Version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// Updates auto scaling settings on your global tables at once. +// +// This operation only applies to [Version 2019.11.21 (Current)] of global tables. +// +// [Version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html func (c *Client) UpdateTableReplicaAutoScaling(ctx context.Context, params *UpdateTableReplicaAutoScalingInput, optFns ...func(*Options)) (*UpdateTableReplicaAutoScalingOutput, error) { if params == nil { params = &UpdateTableReplicaAutoScalingInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go index e6f79845..dac4591a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go @@ -16,20 +16,31 @@ import ( // specified table. A successful UpdateTimeToLive call returns the current // TimeToLiveSpecification . It can take up to one hour for the change to fully // process. Any additional UpdateTimeToLive calls for the same table during this -// one hour duration result in a ValidationException . TTL compares the current -// time in epoch time format to the time stored in the TTL attribute of an item. If -// the epoch time value stored in the attribute is less than the current time, the -// item is marked as expired and subsequently deleted. The epoch time format is the -// number of seconds elapsed since 12:00:00 AM January 1, 1970 UTC. DynamoDB -// deletes expired items on a best-effort basis to ensure availability of -// throughput for other data operations. DynamoDB typically deletes expired items -// within two days of expiration. The exact duration within which an item gets -// deleted after expiration is specific to the nature of the workload. Items that -// have expired and not been deleted will still show up in reads, queries, and -// scans. As items are deleted, they are removed from any local secondary index and +// one hour duration result in a ValidationException . +// +// TTL compares the current time in epoch time format to the time stored in the +// TTL attribute of an item. If the epoch time value stored in the attribute is +// less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability of +// throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific to +// the nature of the workload. Items that have expired and not been deleted will +// still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and // global secondary index immediately in the same eventually consistent way as a -// standard delete operation. For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) -// in the Amazon DynamoDB Developer Guide. +// standard delete operation. +// +// For more information, see [Time To Live] in the Amazon DynamoDB Developer Guide. +// +// [Time To Live]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html func (c *Client) UpdateTimeToLive(ctx context.Context, params *UpdateTimeToLiveInput, optFns ...func(*Options)) (*UpdateTimeToLiveOutput, error) { if params == nil { params = &UpdateTimeToLiveInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go index be3f239b..3bbd7230 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go @@ -10628,6 +10628,11 @@ func awsAwsjson10_deserializeDocumentGlobalSecondaryIndex(v **types.GlobalSecond return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "Projection": if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { return err @@ -10736,6 +10741,11 @@ func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescription(v **types.G return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "Projection": if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { return err @@ -10859,6 +10869,11 @@ func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexInfo(v **types.GlobalSe return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "Projection": if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil { return err @@ -12986,6 +13001,107 @@ func awsAwsjson10_deserializeDocumentNumberSetAttributeValue(v *[]string, value return nil } +func awsAwsjson10_deserializeDocumentOnDemandThroughput(v **types.OnDemandThroughput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OnDemandThroughput + if *v == nil { + sv = &types.OnDemandThroughput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxReadRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxReadRequestUnits = ptr.Int64(i64) + } + + case "MaxWriteRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxWriteRequestUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(v **types.OnDemandThroughputOverride, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.OnDemandThroughputOverride + if *v == nil { + sv = &types.OnDemandThroughputOverride{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MaxReadRequestUnits": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.MaxReadRequestUnits = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentPartiQLBatchResponse(v *[]types.BatchStatementResponse, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13744,6 +13860,11 @@ func awsAwsjson10_deserializeDocumentReplicaDescription(v **types.ReplicaDescrip sv.KMSMasterKeyId = ptr.String(jtv) } + case "OnDemandThroughputOverride": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil { + return err + } + case "ProvisionedThroughputOverride": if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil { return err @@ -13973,6 +14094,11 @@ func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescription(v ** sv.IndexName = ptr.String(jtv) } + case "OnDemandThroughputOverride": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil { + return err + } + case "ProvisionedThroughputOverride": if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil { return err @@ -14684,6 +14810,11 @@ func awsAwsjson10_deserializeDocumentSourceTableDetails(v **types.SourceTableDet return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "ProvisionedThroughput": if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { return err @@ -15223,6 +15354,11 @@ func awsAwsjson10_deserializeDocumentTableCreationParameters(v **types.TableCrea return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "ProvisionedThroughput": if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil { return err @@ -15368,6 +15504,11 @@ func awsAwsjson10_deserializeDocumentTableDescription(v **types.TableDescription return err } + case "OnDemandThroughput": + if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil { + return err + } + case "ProvisionedThroughput": if err := awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(&sv.ProvisionedThroughput, value); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go index 45ddcae3..53f36085 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go @@ -3,19 +3,24 @@ // Package dynamodb provides the API client, operations, and parameter types for // Amazon DynamoDB. // -// Amazon DynamoDB Amazon DynamoDB is a fully managed NoSQL database service that -// provides fast and predictable performance with seamless scalability. DynamoDB -// lets you offload the administrative burdens of operating and scaling a -// distributed database, so that you don't have to worry about hardware -// provisioning, setup and configuration, replication, software patching, or -// cluster scaling. With DynamoDB, you can create database tables that can store -// and retrieve any amount of data, and serve any level of request traffic. You can -// scale up or scale down your tables' throughput capacity without downtime or -// performance degradation, and use the Amazon Web Services Management Console to -// monitor resource utilization and performance metrics. DynamoDB automatically -// spreads the data and traffic for your tables over a sufficient number of servers -// to handle your throughput and storage requirements, while maintaining consistent -// and fast performance. All of your data is stored on solid state disks (SSDs) and -// automatically replicated across multiple Availability Zones in an Amazon Web -// Services Region, providing built-in high availability and data durability. +// # Amazon DynamoDB +// +// Amazon DynamoDB is a fully managed NoSQL database service that provides fast +// and predictable performance with seamless scalability. DynamoDB lets you offload +// the administrative burdens of operating and scaling a distributed database, so +// that you don't have to worry about hardware provisioning, setup and +// configuration, replication, software patching, or cluster scaling. +// +// With DynamoDB, you can create database tables that can store and retrieve any +// amount of data, and serve any level of request traffic. You can scale up or +// scale down your tables' throughput capacity without downtime or performance +// degradation, and use the Amazon Web Services Management Console to monitor +// resource utilization and performance metrics. +// +// DynamoDB automatically spreads the data and traffic for your tables over a +// sufficient number of servers to handle your throughput and storage requirements, +// while maintaining consistent and fast performance. All of your data is stored on +// solid state disks (SSDs) and automatically replicated across multiple +// Availability Zones in an Amazon Web Services Region, providing built-in high +// availability and data durability. package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go index b60959f5..496ae774 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go @@ -3,4 +3,4 @@ package dynamodb // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.31.1" +const goModuleVersion = "1.32.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go index bf88e3b7..199f2cf6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go @@ -61,8 +61,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -85,17 +87,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -112,8 +117,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -158,6 +164,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go index 0a5c1e81..eb9d1999 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go @@ -3547,6 +3547,13 @@ func awsAwsjson10_serializeDocumentCreateGlobalSecondaryIndexAction(v *types.Cre } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.Projection != nil { ok := object.Key("Projection") if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { @@ -3592,6 +3599,13 @@ func awsAwsjson10_serializeDocumentCreateReplicationGroupMemberAction(v *types.C ok.String(*v.KMSMasterKeyId) } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { @@ -3880,6 +3894,13 @@ func awsAwsjson10_serializeDocumentGlobalSecondaryIndex(v *types.GlobalSecondary } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.Projection != nil { ok := object.Key("Projection") if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil { @@ -4263,6 +4284,35 @@ func awsAwsjson10_serializeDocumentNumberSetAttributeValue(v []string, value smi return nil } +func awsAwsjson10_serializeDocumentOnDemandThroughput(v *types.OnDemandThroughput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxReadRequestUnits != nil { + ok := object.Key("MaxReadRequestUnits") + ok.Long(*v.MaxReadRequestUnits) + } + + if v.MaxWriteRequestUnits != nil { + ok := object.Key("MaxWriteRequestUnits") + ok.Long(*v.MaxWriteRequestUnits) + } + + return nil +} + +func awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v *types.OnDemandThroughputOverride, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.MaxReadRequestUnits != nil { + ok := object.Key("MaxReadRequestUnits") + ok.Long(*v.MaxReadRequestUnits) + } + + return nil +} + func awsAwsjson10_serializeDocumentParameterizedStatement(v *types.ParameterizedStatement, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -4522,6 +4572,13 @@ func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndex(v *types.ReplicaG ok.String(*v.IndexName) } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { @@ -4853,6 +4910,13 @@ func awsAwsjson10_serializeDocumentTableCreationParameters(v *types.TableCreatio } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { @@ -5065,6 +5129,13 @@ func awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v *types.Upd ok.String(*v.IndexName) } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { @@ -5103,6 +5174,13 @@ func awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v *types.U ok.String(*v.KMSMasterKeyId) } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil { @@ -5297,6 +5375,13 @@ func awsAwsjson10_serializeOpDocumentCreateTableInput(v *CreateTableInput, value } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { @@ -6276,6 +6361,13 @@ func awsAwsjson10_serializeOpDocumentRestoreTableFromBackupInput(v *RestoreTable } } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil { @@ -6321,6 +6413,13 @@ func awsAwsjson10_serializeOpDocumentRestoreTableToPointInTimeInput(v *RestoreTa } } + if v.OnDemandThroughputOverride != nil { + ok := object.Key("OnDemandThroughputOverride") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil { + return err + } + } + if v.ProvisionedThroughputOverride != nil { ok := object.Key("ProvisionedThroughputOverride") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil { @@ -6783,6 +6882,13 @@ func awsAwsjson10_serializeOpDocumentUpdateTableInput(v *UpdateTableInput, value } } + if v.OnDemandThroughput != nil { + ok := object.Key("OnDemandThroughput") + if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil { + return err + } + } + if v.ProvisionedThroughput != nil { ok := object.Key("ProvisionedThroughput") if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go index ec617f93..73e4795b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go @@ -12,8 +12,9 @@ const ( // Values returns all known values for ApproximateCreationDateTimePrecision. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ApproximateCreationDateTimePrecision) Values() []ApproximateCreationDateTimePrecision { return []ApproximateCreationDateTimePrecision{ "MILLISECOND", @@ -31,8 +32,9 @@ const ( ) // Values returns all known values for AttributeAction. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (AttributeAction) Values() []AttributeAction { return []AttributeAction{ "ADD", @@ -51,8 +53,9 @@ const ( ) // Values returns all known values for BackupStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BackupStatus) Values() []BackupStatus { return []BackupStatus{ "CREATING", @@ -71,8 +74,9 @@ const ( ) // Values returns all known values for BackupType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BackupType) Values() []BackupType { return []BackupType{ "USER", @@ -92,8 +96,9 @@ const ( ) // Values returns all known values for BackupTypeFilter. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BackupTypeFilter) Values() []BackupTypeFilter { return []BackupTypeFilter{ "USER", @@ -122,6 +127,7 @@ const ( // Values returns all known values for BatchStatementErrorCodeEnum. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (BatchStatementErrorCodeEnum) Values() []BatchStatementErrorCodeEnum { return []BatchStatementErrorCodeEnum{ @@ -148,8 +154,9 @@ const ( ) // Values returns all known values for BillingMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BillingMode) Values() []BillingMode { return []BillingMode{ "PROVISIONED", @@ -177,8 +184,9 @@ const ( ) // Values returns all known values for ComparisonOperator. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ComparisonOperator) Values() []ComparisonOperator { return []ComparisonOperator{ "EQ", @@ -206,8 +214,9 @@ const ( ) // Values returns all known values for ConditionalOperator. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ConditionalOperator) Values() []ConditionalOperator { return []ConditionalOperator{ "AND", @@ -224,8 +233,9 @@ const ( ) // Values returns all known values for ContinuousBackupsStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ContinuousBackupsStatus) Values() []ContinuousBackupsStatus { return []ContinuousBackupsStatus{ "ENABLED", @@ -243,6 +253,7 @@ const ( // Values returns all known values for ContributorInsightsAction. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ContributorInsightsAction) Values() []ContributorInsightsAction { return []ContributorInsightsAction{ @@ -264,6 +275,7 @@ const ( // Values returns all known values for ContributorInsightsStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ContributorInsightsStatus) Values() []ContributorInsightsStatus { return []ContributorInsightsStatus{ @@ -288,8 +300,9 @@ const ( ) // Values returns all known values for DestinationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DestinationStatus) Values() []DestinationStatus { return []DestinationStatus{ "ENABLING", @@ -310,8 +323,9 @@ const ( ) // Values returns all known values for ExportFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportFormat) Values() []ExportFormat { return []ExportFormat{ "DYNAMODB_JSON", @@ -329,8 +343,9 @@ const ( ) // Values returns all known values for ExportStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportStatus) Values() []ExportStatus { return []ExportStatus{ "IN_PROGRESS", @@ -348,8 +363,9 @@ const ( ) // Values returns all known values for ExportType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportType) Values() []ExportType { return []ExportType{ "FULL_EXPORT", @@ -366,8 +382,9 @@ const ( ) // Values returns all known values for ExportViewType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExportViewType) Values() []ExportViewType { return []ExportViewType{ "NEW_IMAGE", @@ -386,8 +403,9 @@ const ( ) // Values returns all known values for GlobalTableStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (GlobalTableStatus) Values() []GlobalTableStatus { return []GlobalTableStatus{ "CREATING", @@ -409,8 +427,9 @@ const ( ) // Values returns all known values for ImportStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ImportStatus) Values() []ImportStatus { return []ImportStatus{ "IN_PROGRESS", @@ -432,8 +451,9 @@ const ( ) // Values returns all known values for IndexStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (IndexStatus) Values() []IndexStatus { return []IndexStatus{ "CREATING", @@ -453,8 +473,9 @@ const ( ) // Values returns all known values for InputCompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InputCompressionType) Values() []InputCompressionType { return []InputCompressionType{ "GZIP", @@ -473,8 +494,9 @@ const ( ) // Values returns all known values for InputFormat. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InputFormat) Values() []InputFormat { return []InputFormat{ "DYNAMODB_JSON", @@ -492,8 +514,9 @@ const ( ) // Values returns all known values for KeyType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (KeyType) Values() []KeyType { return []KeyType{ "HASH", @@ -511,6 +534,7 @@ const ( // Values returns all known values for PointInTimeRecoveryStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (PointInTimeRecoveryStatus) Values() []PointInTimeRecoveryStatus { return []PointInTimeRecoveryStatus{ @@ -529,8 +553,9 @@ const ( ) // Values returns all known values for ProjectionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ProjectionType) Values() []ProjectionType { return []ProjectionType{ "ALL", @@ -553,8 +578,9 @@ const ( ) // Values returns all known values for ReplicaStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicaStatus) Values() []ReplicaStatus { return []ReplicaStatus{ "CREATING", @@ -577,8 +603,9 @@ const ( ) // Values returns all known values for ReturnConsumedCapacity. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReturnConsumedCapacity) Values() []ReturnConsumedCapacity { return []ReturnConsumedCapacity{ "INDEXES", @@ -597,6 +624,7 @@ const ( // Values returns all known values for ReturnItemCollectionMetrics. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ReturnItemCollectionMetrics) Values() []ReturnItemCollectionMetrics { return []ReturnItemCollectionMetrics{ @@ -617,8 +645,9 @@ const ( ) // Values returns all known values for ReturnValue. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReturnValue) Values() []ReturnValue { return []ReturnValue{ "NONE", @@ -639,8 +668,9 @@ const ( // Values returns all known values for ReturnValuesOnConditionCheckFailure. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReturnValuesOnConditionCheckFailure) Values() []ReturnValuesOnConditionCheckFailure { return []ReturnValuesOnConditionCheckFailure{ "ALL_OLD", @@ -657,8 +687,9 @@ const ( ) // Values returns all known values for S3SseAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (S3SseAlgorithm) Values() []S3SseAlgorithm { return []S3SseAlgorithm{ "AES256", @@ -676,8 +707,9 @@ const ( ) // Values returns all known values for ScalarAttributeType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ScalarAttributeType) Values() []ScalarAttributeType { return []ScalarAttributeType{ "S", @@ -697,8 +729,9 @@ const ( ) // Values returns all known values for Select. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Select) Values() []Select { return []Select{ "ALL_ATTRIBUTES", @@ -720,8 +753,9 @@ const ( ) // Values returns all known values for SSEStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SSEStatus) Values() []SSEStatus { return []SSEStatus{ "ENABLING", @@ -741,8 +775,9 @@ const ( ) // Values returns all known values for SSEType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SSEType) Values() []SSEType { return []SSEType{ "AES256", @@ -761,8 +796,9 @@ const ( ) // Values returns all known values for StreamViewType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StreamViewType) Values() []StreamViewType { return []StreamViewType{ "NEW_IMAGE", @@ -781,8 +817,9 @@ const ( ) // Values returns all known values for TableClass. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TableClass) Values() []TableClass { return []TableClass{ "STANDARD", @@ -804,8 +841,9 @@ const ( ) // Values returns all known values for TableStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TableStatus) Values() []TableStatus { return []TableStatus{ "CREATING", @@ -829,8 +867,9 @@ const ( ) // Values returns all known values for TimeToLiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TimeToLiveStatus) Values() []TimeToLiveStatus { return []TimeToLiveStatus{ "ENABLING", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go index d11b3b8b..97bf7dbd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go @@ -116,7 +116,8 @@ func (e *ContinuousBackupsUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// There was an attempt to insert an item with the same primary key as an item +// There was an attempt to insert an item with the same primary key as an item +// // that already exists in the DynamoDB table. type DuplicateItemException struct { Message *string @@ -276,7 +277,8 @@ func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// There was a conflict when importing from the specified S3 source. This can +// There was a conflict when importing from the specified S3 source. This can +// // occur when the current import conflicts with a previous import request that had // the same client token. type ImportConflictException struct { @@ -490,17 +492,26 @@ func (e *ItemCollectionSizeLimitExceededException) ErrorFault() smithy.ErrorFaul } // There is no limit to the number of daily on-demand backups that can be taken. +// // For most purposes, up to 500 simultaneous table operations are allowed per // account. These operations include CreateTable , UpdateTable , DeleteTable , -// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . When -// you are creating a table with one or more secondary indexes, you can have up to -// 250 such requests running at a time. However, if the table or index +// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime . +// +// When you are creating a table with one or more secondary indexes, you can have +// up to 250 such requests running at a time. However, if the table or index // specifications are complex, then DynamoDB might temporarily reduce the number of -// concurrent operations. When importing into DynamoDB, up to 50 simultaneous -// import table operations are allowed per account. There is a soft account quota -// of 2,500 tables. GetRecords was called with a value of more than 1000 for the -// limit request parameter. More than 2 processes are reading from the same streams -// shard at the same time. Exceeding this limit may result in request throttling. +// concurrent operations. +// +// When importing into DynamoDB, up to 50 simultaneous import table operations are +// allowed per account. +// +// There is a soft account quota of 2,500 tables. +// +// GetRecords was called with a value of more than 1000 for the limit request +// parameter. +// +// More than 2 processes are reading from the same streams shard at the same time. +// Exceeding this limit may result in request throttling. type LimitExceededException struct { Message *string @@ -554,9 +565,10 @@ func (e *PointInTimeRecoveryUnavailableException) ErrorFault() smithy.ErrorFault return smithy.FaultClient } -// The operation tried to access a nonexistent resource-based policy. If you -// specified an ExpectedRevisionId , it's possible that a policy is present for the -// resource but its revision ID didn't match the expected value. +// The operation tried to access a nonexistent resource-based policy. +// +// If you specified an ExpectedRevisionId , it's possible that a policy is present +// for the resource but its revision ID didn't match the expected value. type PolicyNotFoundException struct { Message *string @@ -586,8 +598,9 @@ func (e *PolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy // automatically retry requests that receive this exception. Your request is // eventually successful, unless your retry queue is too large to finish. Reduce // the frequency of requests and use exponential backoff. For more information, go -// to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// to [Error Retries and Exponential Backoff]in the Amazon DynamoDB Developer Guide. +// +// [Error Retries and Exponential Backoff]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff type ProvisionedThroughputExceededException struct { Message *string @@ -668,8 +681,9 @@ func (e *ReplicaNotFoundException) ErrorCode() string { func (e *ReplicaNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// contact [Amazon Web Services Support]to request a quota increase. +// +// [Amazon Web Services Support]: https://aws.amazon.com/support type RequestLimitExceeded struct { Message *string @@ -830,92 +844,150 @@ func (e *TableNotFoundException) ErrorCode() string { } func (e *TableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The entire transaction request was canceled. DynamoDB cancels a -// TransactWriteItems request under the following circumstances: +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// // - A condition in one of the condition expressions is not met. +// // - A table in the TransactWriteItems request is in a different account or // region. +// // - More than one action in the TransactWriteItems operation targets the same // item. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - An item size becomes too large (larger than 400 KB), or a local secondary // index (LSI) becomes too large, or a similar validation error occurs because of // changes made by the transaction. +// // - There is a user error, such as an invalid data format. +// // - There is an ongoing TransactWriteItems operation that conflicts with a // concurrent TransactWriteItems request. In this case the TransactWriteItems // operation fails with a TransactionCanceledException . // // DynamoDB cancels a TransactGetItems request under the following circumstances: +// // - There is an ongoing TransactGetItems operation that conflicts with a // concurrent PutItem , UpdateItem , DeleteItem or TransactWriteItems request. In // this case the TransactGetItems operation fails with a // TransactionCanceledException . +// // - A table in the TransactGetItems request is in a different account or region. +// // - There is insufficient provisioned capacity for the transaction to be // completed. +// // - There is a user error, such as an invalid data format. // // If using Java, DynamoDB lists the cancellation reasons on the // CancellationReasons property. This property is not set for other languages. // Transaction cancellation reasons are ordered in the order of requested items, if -// an item has no error it will have None code and Null message. Cancellation -// reason codes and possible error messages: +// an item has no error it will have None code and Null message. +// +// Cancellation reason codes and possible error messages: +// // - No Errors: +// // - Code: None +// // - Message: null +// // - Conditional Check Failed: +// // - Code: ConditionalCheckFailed +// // - Message: The conditional request failed. +// // - Item Collection Size Limit Exceeded: +// // - Code: ItemCollectionSizeLimitExceeded +// // - Message: Collection size exceeded. +// // - Transaction Conflict: +// // - Code: TransactionConflict +// // - Message: Transaction is ongoing for the item. +// // - Provisioned Throughput Exceeded: +// // - Code: ProvisionedThroughputExceeded +// // - Messages: +// // - The level of configured provisioned throughput for the table was exceeded. -// Consider increasing your provisioning level with the UpdateTable API. This -// Message is received when provisioned throughput is exceeded is on a provisioned -// DynamoDB table. -// - The level of configured provisioned throughput for one or more global -// secondary indexes of the table was exceeded. Consider increasing your -// provisioning level for the under-provisioned global secondary indexes with the -// UpdateTable API. This message is returned when provisioned throughput is -// exceeded is on a provisioned GSI. -// - Throttling Error: -// - Code: ThrottlingError -// - Messages: -// - Throughput exceeds the current capacity of your table or index. DynamoDB is -// automatically scaling your table or index so please try again shortly. If -// exceptions persist, check if you have a hot key: -// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table as -// DynamoDB is automatically scaling the table. -// - Throughput exceeds the current capacity for one or more global secondary -// indexes. DynamoDB is automatically scaling your index so please try again -// shortly. This message is returned when writes get throttled on an On-Demand GSI -// as DynamoDB is automatically scaling the GSI. -// - Validation Error: -// - Code: ValidationError -// - Messages: -// - One or more parameter values were invalid. -// - The update expression attempted to update the secondary index key beyond -// allowed size limits. -// - The update expression attempted to update the secondary index key to -// unsupported type. -// - An operand in the update expression has an incorrect data type. -// - Item size to update has exceeded the maximum allowed size. -// - Number overflow. Attempting to store a number with magnitude larger than -// supported range. -// - Type mismatch for attribute to update. -// - Nesting Levels have exceeded supported limits. -// - The document path provided in the update expression is invalid for update. -// - The provided expression refers to an attribute that does not exist in the -// item. +// Consider increasing your provisioning level with the UpdateTable API. +// +// This Message is received when provisioned throughput is exceeded is on a +// +// provisioned DynamoDB table. +// +// - The level of configured provisioned throughput for one or more global +// secondary indexes of the table was exceeded. Consider increasing your +// provisioning level for the under-provisioned global secondary indexes with the +// UpdateTable API. +// +// This message is returned when provisioned throughput is exceeded is on a +// +// provisioned GSI. +// +// - Throttling Error: +// +// - Code: ThrottlingError +// +// - Messages: +// +// - Throughput exceeds the current capacity of your table or index. DynamoDB is +// automatically scaling your table or index so please try again shortly. If +// exceptions persist, check if you have a hot key: +// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// +// This message is returned when writes get throttled on an On-Demand table as +// +// DynamoDB is automatically scaling the table. +// +// - Throughput exceeds the current capacity for one or more global secondary +// indexes. DynamoDB is automatically scaling your index so please try again +// shortly. +// +// This message is returned when writes get throttled on an On-Demand GSI as +// +// DynamoDB is automatically scaling the GSI. +// +// - Validation Error: +// +// - Code: ValidationError +// +// - Messages: +// +// - One or more parameter values were invalid. +// +// - The update expression attempted to update the secondary index key beyond +// allowed size limits. +// +// - The update expression attempted to update the secondary index key to +// unsupported type. +// +// - An operand in the update expression has an incorrect data type. +// +// - Item size to update has exceeded the maximum allowed size. +// +// - Number overflow. Attempting to store a number with magnitude larger than +// supported range. +// +// - Type mismatch for attribute to update. +// +// - Nesting Levels have exceeded supported limits. +// +// - The document path provided in the update expression is invalid for update. +// +// - The provided expression refers to an attribute that does not exist in the +// item. type TransactionCanceledException struct { Message *string @@ -970,31 +1042,48 @@ func (e *TransactionConflictException) ErrorCode() string { func (e *TransactionConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The transaction with the given request token is already in progress. -// Recommended Settings This is a general recommendation for handling the -// TransactionInProgressException . These settings help ensure that the client -// retries will trigger completion of the ongoing TransactWriteItems request. +// +// # Recommended Settings +// +// This is a general recommendation for handling the TransactionInProgressException +// . These settings help ensure that the client retries will trigger completion of +// the ongoing TransactWriteItems request. +// // - Set clientExecutionTimeout to a value that allows at least one retry to be // processed after 5 seconds have elapsed since the first attempt for the // TransactWriteItems operation. +// // - Set socketTimeout to a value a little lower than the requestTimeout setting. +// // - requestTimeout should be set based on the time taken for the individual // retries of a single HTTP request for your use case, but setting it to 1 second // or higher should work well to reduce chances of retries and // TransactionInProgressException errors. +// // - Use exponential backoff when retrying and tune backoff if needed. // -// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97) -// , example timeout settings based on the guidelines above are as follows: Example -// timeline: +// Assuming [default retry policy], example timeout settings based on the guidelines above are as +// follows: +// +// Example timeline: +// // - 0-1000 first attempt +// // - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base delay // for 4xx errors) +// // - 1500-2500 second attempt +// // - 2500-3500 second sleep/delay (500 * 2, exponential backoff) +// // - 3500-4500 third attempt +// // - 4500-6500 third sleep/delay (500 * 2^2) +// // - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds // have elapsed since the first attempt reached TC) +// +// [default retry policy]: https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97 type TransactionInProgressException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go index 437acc5d..fa7b9ee5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go @@ -20,6 +20,7 @@ type ArchivalSummary struct { ArchivalDateTime *time.Time // The reason DynamoDB archived the table. Currently, the only possible value is: + // // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to the // table's KMS key being inaccessible for more than seven days. An On-Demand backup // was created at the archival time. @@ -37,8 +38,11 @@ type AttributeDefinition struct { AttributeName *string // The data type for the attribute, where: + // // - S - the attribute is of type String + // // - N - the attribute is of type Number + // // - B - the attribute is of type Binary // // This member is required. @@ -47,10 +51,12 @@ type AttributeDefinition struct { noSmithyDocumentSerde } -// Represents the data for an attribute. Each attribute value is described as a -// name-value pair. The name is the data type, and the value is the data itself. -// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) -// in the Amazon DynamoDB Developer Guide. +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. // // The following types satisfy this interface: // @@ -64,12 +70,15 @@ type AttributeDefinition struct { // AttributeValueMemberNULL // AttributeValueMemberS // AttributeValueMemberSS +// +// [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes type AttributeValue interface { isAttributeValue() } -// An attribute of type Binary. For example: "B": -// "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" +// An attribute of type Binary. For example: +// +// "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" type AttributeValueMemberB struct { Value []byte @@ -78,7 +87,9 @@ type AttributeValueMemberB struct { func (*AttributeValueMemberB) isAttributeValue() {} -// An attribute of type Boolean. For example: "BOOL": true +// An attribute of type Boolean. For example: +// +// "BOOL": true type AttributeValueMemberBOOL struct { Value bool @@ -87,8 +98,9 @@ type AttributeValueMemberBOOL struct { func (*AttributeValueMemberBOOL) isAttributeValue() {} -// An attribute of type Binary Set. For example: "BS": ["U3Vubnk=", "UmFpbnk=", -// "U25vd3k="] +// An attribute of type Binary Set. For example: +// +// "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] type AttributeValueMemberBS struct { Value [][]byte @@ -97,8 +109,9 @@ type AttributeValueMemberBS struct { func (*AttributeValueMemberBS) isAttributeValue() {} -// An attribute of type List. For example: "L": [ {"S": "Cookies"} , {"S": -// "Coffee"}, {"N": "3.14159"}] +// An attribute of type List. For example: +// +// "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] type AttributeValueMemberL struct { Value []AttributeValue @@ -107,8 +120,9 @@ type AttributeValueMemberL struct { func (*AttributeValueMemberL) isAttributeValue() {} -// An attribute of type Map. For example: "M": {"Name": {"S": "Joe"}, "Age": {"N": -// "35"}} +// An attribute of type Map. For example: +// +// "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} type AttributeValueMemberM struct { Value map[string]AttributeValue @@ -117,10 +131,13 @@ type AttributeValueMemberM struct { func (*AttributeValueMemberM) isAttributeValue() {} -// An attribute of type Number. For example: "N": "123.45" Numbers are sent across -// the network to DynamoDB as strings, to maximize compatibility across languages -// and libraries. However, DynamoDB treats them as number type attributes for -// mathematical operations. +// An attribute of type Number. For example: +// +// "N": "123.45" +// +// Numbers are sent across the network to DynamoDB as strings, to maximize +// compatibility across languages and libraries. However, DynamoDB treats them as +// number type attributes for mathematical operations. type AttributeValueMemberN struct { Value string @@ -129,8 +146,11 @@ type AttributeValueMemberN struct { func (*AttributeValueMemberN) isAttributeValue() {} -// An attribute of type Number Set. For example: "NS": ["42.2", "-19", "7.5", -// "3.14"] Numbers are sent across the network to DynamoDB as strings, to maximize +// An attribute of type Number Set. For example: +// +// "NS": ["42.2", "-19", "7.5", "3.14"] +// +// Numbers are sent across the network to DynamoDB as strings, to maximize // compatibility across languages and libraries. However, DynamoDB treats them as // number type attributes for mathematical operations. type AttributeValueMemberNS struct { @@ -141,7 +161,9 @@ type AttributeValueMemberNS struct { func (*AttributeValueMemberNS) isAttributeValue() {} -// An attribute of type Null. For example: "NULL": true +// An attribute of type Null. For example: +// +// "NULL": true type AttributeValueMemberNULL struct { Value bool @@ -150,7 +172,9 @@ type AttributeValueMemberNULL struct { func (*AttributeValueMemberNULL) isAttributeValue() {} -// An attribute of type String. For example: "S": "Hello" +// An attribute of type String. For example: +// +// "S": "Hello" type AttributeValueMemberS struct { Value string @@ -159,8 +183,9 @@ type AttributeValueMemberS struct { func (*AttributeValueMemberS) isAttributeValue() {} -// An attribute of type String Set. For example: "SS": ["Giraffe", "Hippo" -// ,"Zebra"] +// An attribute of type String Set. For example: +// +// "SS": ["Giraffe", "Hippo" ,"Zebra"] type AttributeValueMemberSS struct { Value []string @@ -170,64 +195,89 @@ type AttributeValueMemberSS struct { func (*AttributeValueMemberSS) isAttributeValue() {} // For the UpdateItem operation, represents the attributes to be modified, the -// action to perform on each, and the new value for each. You cannot use UpdateItem -// to update any primary key attributes. Instead, you will need to delete the item, -// and then use PutItem to create a new item with new attributes. Attribute values -// cannot be null; string and binary type attributes must have lengths greater than -// zero; and set type attributes must not be empty. Requests with empty values will -// be rejected with a ValidationException exception. +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, you +// will need to delete the item, and then use PutItem to create a new item with +// new attributes. +// +// Attribute values cannot be null; string and binary type attributes must have +// lengths greater than zero; and set type attributes must not be empty. Requests +// with empty values will be rejected with a ValidationException exception. type AttributeValueUpdate struct { // Specifies how to perform the update. Valid values are PUT (default), DELETE , // and ADD . The behavior depends on whether the specified primary key already - // exists in the table. If an item with the specified Key is found in the table: + // exists in the table. + // + // If an item with the specified Key is found in the table: + // // - PUT - Adds the specified attribute to the item. If the attribute already // exists, it is replaced by the new value. + // // - DELETE - If no value is specified, the attribute and its value are removed // from the item. The data type of the specified value must match the existing - // value's data type. If a set of values is specified, then those values are - // subtracted from the old set. For example, if the attribute value was the set - // [a,b,c] and the DELETE action specified [a,c] , then the final attribute value - // would be [b] . Specifying an empty set is an error. + // value's data type. + // + // If a set of values is specified, then those values are subtracted from the old + // set. For example, if the attribute value was the set [a,b,c] and the DELETE + // action specified [a,c] , then the final attribute value would be [b] . + // Specifying an empty set is an error. + // // - ADD - If the attribute does not already exist, then the attribute and its // values are added to the item. If the attribute does exist, then the behavior of // ADD depends on the data type of the attribute: + // // - If the existing attribute is a number, and if Value is also a number, then // the Value is mathematically added to the existing attribute. If Value is a - // negative number, then it is subtracted from the existing attribute. If you use - // ADD to increment or decrement a number value for an item that doesn't exist - // before the update, DynamoDB uses 0 as the initial value. In addition, if you use - // ADD to update an existing item, and intend to increment or decrement an - // attribute value which does not yet exist, DynamoDB uses 0 as the initial - // value. For example, suppose that the item you want to update does not yet have - // an attribute named itemcount, but you decide to ADD the number 3 to this - // attribute anyway, even though it currently does not exist. DynamoDB will create - // the itemcount attribute, set its initial value to 0 , and finally add 3 to it. - // The result will be a new itemcount attribute in the item, with a value of 3 . + // negative number, then it is subtracted from the existing attribute. + // + // If you use ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. + // + // In addition, if you use ADD to update an existing item, and intend to increment + // or decrement an attribute value which does not yet exist, DynamoDB uses 0 as + // the initial value. For example, suppose that the item you want to update does + // not yet have an attribute named itemcount, but you decide to ADD the number 3 + // to this attribute anyway, even though it currently does not exist. DynamoDB will + // create the itemcount attribute, set its initial value to 0 , and finally add 3 + // to it. The result will be a new itemcount attribute in the item, with a value of + // 3 . + // // - If the existing data type is a set, and if the Value is also a set, then the // Value is added to the existing set. (This is a set operation, not mathematical // addition.) For example, if the attribute value was the set [1,2] , and the ADD // action specified [3] , then the final attribute value would be [1,2,3] . An // error occurs if an Add action is specified for a set attribute and the attribute - // type specified does not match the existing set type. Both sets must have the - // same primitive data type. For example, if the existing data type is a set of - // strings, the Value must also be a set of strings. The same holds true for - // number sets and binary sets. This action is only valid for an existing - // attribute whose data type is number or is a set. Do not use ADD for any other - // data types. + // type specified does not match the existing set type. + // + // Both sets must have the same primitive data type. For example, if the existing + // data type is a set of strings, the Value must also be a set of strings. The + // same holds true for number sets and binary sets. + // + // This action is only valid for an existing attribute whose data type is number + // or is a set. Do not use ADD for any other data types. + // // If no item with the specified Key is found: + // // - PUT - DynamoDB creates a new item with the specified primary key, and then // adds the attribute. + // // - DELETE - Nothing happens; there is no attribute to delete. + // // - ADD - DynamoDB creates a new item with the supplied primary key and number // (or set) for the attribute value. The only data types allowed are number, number // set, string set or binary set. Action AttributeAction - // Represents the data for an attribute. Each attribute value is described as a - // name-value pair. The name is the data type, and the value is the data itself. - // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. + // Represents the data for an attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. + // + // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes Value AttributeValue noSmithyDocumentSerde @@ -417,11 +467,14 @@ type BackupDetails struct { BackupStatus BackupStatus // BackupType: + // // - USER - You create and manage these using the on-demand backup feature. + // // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM // backup is automatically created and is retained for 35 days (at no additional // cost). System backups allow you to restore the deleted table to the state it was // in just before the point of deletion. + // // - AWS_BACKUP - On-demand backup created by you from Backup service. // // This member is required. @@ -461,11 +514,14 @@ type BackupSummary struct { BackupStatus BackupStatus // BackupType: + // // - USER - You create and manage these using the on-demand backup feature. + // // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM // backup is automatically created and is retained for 35 days (at no additional // cost). System backups allow you to restore the deleted table to the state it was // in just before the point of deletion. + // // - AWS_BACKUP - On-demand backup created by you from Backup service. BackupType BackupType @@ -484,14 +540,14 @@ type BackupSummary struct { // An error associated with a statement in a PartiQL batch that was run. type BatchStatementError struct { - // The error code associated with the failed PartiQL batch statement. + // The error code associated with the failed PartiQL batch statement. Code BatchStatementErrorCodeEnum // The item which caused the condition check to fail. This will be set if // ReturnValuesOnConditionCheckFailure is specified as ALL_OLD . Item map[string]AttributeValue - // The error message associated with the PartiQL batch response. + // The error message associated with the PartiQL batch response. Message *string noSmithyDocumentSerde @@ -500,22 +556,23 @@ type BatchStatementError struct { // A PartiQL batch statement request. type BatchStatementRequest struct { - // A valid PartiQL statement. + // A valid PartiQL statement. // // This member is required. Statement *string - // The read consistency of the PartiQL batch request. + // The read consistency of the PartiQL batch request. ConsistentRead *bool - // The parameters associated with a PartiQL statement in the batch request. + // The parameters associated with a PartiQL statement in the batch request. Parameters []AttributeValue // An optional parameter that returns the item attributes for a PartiQL batch - // request operation that failed a condition check. There is no additional cost - // associated with requesting a return value aside from the small network and - // processing overhead of receiving a larger response. No read capacity units are - // consumed. + // request operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -524,13 +581,13 @@ type BatchStatementRequest struct { // A PartiQL batch statement response.. type BatchStatementResponse struct { - // The error associated with a failed PartiQL batch statement. + // The error associated with a failed PartiQL batch statement. Error *BatchStatementError - // A DynamoDB item associated with a BatchStatementResponse + // A DynamoDB item associated with a BatchStatementResponse Item map[string]AttributeValue - // The table name associated with a failed PartiQL batch statement. + // The table name associated with a failed PartiQL batch statement. TableName *string noSmithyDocumentSerde @@ -538,15 +595,20 @@ type BatchStatementResponse struct { // Contains the details for the read/write capacity mode. This page talks about // PROVISIONED and PAY_PER_REQUEST billing modes. For more information about these -// modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html) -// . You may need to switch to on-demand mode at least once in order to return a +// modes, see [Read/write capacity mode]. +// +// You may need to switch to on-demand mode at least once in order to return a // BillingModeSummary response. +// +// [Read/write capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html type BillingModeSummary struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. + // // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We // recommend using PROVISIONED for predictable workloads. + // // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We // recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode BillingMode @@ -594,119 +656,168 @@ type Capacity struct { } // Represents the selection criteria for a Query or Scan operation: +// // - For a Query operation, Condition is used for specifying the KeyConditions to // use when querying a table or an index. For KeyConditions , only the following -// comparison operators are supported: EQ | LE | LT | GE | GT | BEGINS_WITH | -// BETWEEN Condition is also used in a QueryFilter , which evaluates the query -// results and returns only the desired values. -// - For a Scan operation, Condition is used in a ScanFilter , which evaluates -// the scan results and returns only the desired values. +// comparison operators are supported: +// +// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN +// +// Condition is also used in a QueryFilter , which evaluates the query results and +// +// returns only the desired values. +// +// - For a Scan operation, Condition is used in a ScanFilter , which evaluates +// the scan results and returns only the desired values. type Condition struct { // A comparator for evaluating attributes. For example, equals, greater than, less - // than, etc. The following comparison operators are available: EQ | NE | LE | LT - // | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | BEGINS_WITH | IN | - // BETWEEN The following are descriptions of each comparison operator. + // than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // // - EQ : Equal. EQ is supported for all data types, including lists and maps. - // AttributeValueList can contain only one AttributeValue element of type String, + // + // AttributeValueList can contain only one AttributeValue element of type String, // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue element of a different type than the one provided in the // request, the value does not match. For example, {"S":"6"} does not equal // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - NE : Not equal. NE is supported for all data types, including lists and - // maps. AttributeValueList can contain only one AttributeValue of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an + // + // - NE : Not equal. NE is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue of a different type than the one provided in the request, the // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, // {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - LE : Less than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - LT : Less than. AttributeValueList can contain only one AttributeValue of - // type String, Number, or Binary (not a set type). If an item contains an - // AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . - // - GE : Greater than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - GT : Greater than. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . + // + // - LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]} . + // + // - GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, - // including lists and maps. This operator tests for the existence of an attribute, - // not its data type. If the data type of attribute " a " is null, and you - // evaluate it using NOT_NULL , the result is a Boolean true . This result is - // because the attribute " a " exists; its data type is not relevant to the - // NOT_NULL comparison operator. + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NOT_NULL , + // the result is a Boolean true . This result is because the attribute " a " + // exists; its data type is not relevant to the NOT_NULL comparison operator. + // // - NULL : The attribute does not exist. NULL is supported for all data types, - // including lists and maps. This operator tests for the nonexistence of an - // attribute, not its data type. If the data type of attribute " a " is null, and - // you evaluate it using NULL , the result is a Boolean false . This is because - // the attribute " a " exists; its data type is not relevant to the NULL - // comparison operator. - // - CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or Binary - // (not a set type). If the target attribute of the comparison is of type String, - // then the operator checks for a substring match. If the target attribute of the - // comparison is of type Binary, then the operator looks for a subsequence of the - // target that matches the input. If the target attribute of the comparison is a - // set (" SS ", " NS ", or " BS "), then the operator evaluates to true if it - // finds an exact match with any member of the set. CONTAINS is supported for - // lists: When evaluating " a CONTAINS b ", " a " can be a list; however, " b " - // cannot be a set, a map, or a list. + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NULL , the + // result is a Boolean false . This is because the attribute " a " exists; its + // data type is not relevant to the NULL comparison operator. + // + // - CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the target + // attribute of the comparison is of type Binary, then the operator looks for a + // subsequence of the target that matches the input. If the target attribute of the + // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be + // a list; however, " b " cannot be a set, a map, or a list. + // // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in - // a set. AttributeValueList can contain only one AttributeValue element of type - // String, Number, or Binary (not a set type). If the target attribute of the - // comparison is a String, then the operator checks for the absence of a substring - // match. If the target attribute of the comparison is Binary, then the operator - // checks for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set (" SS ", " NS ", or " BS "), - // then the operator evaluates to true if it does not find an exact match with any - // member of the set. NOT_CONTAINS is supported for lists: When evaluating " a - // NOT CONTAINS b ", " a " can be a list; however, " b " cannot be a set, a map, - // or a list. - // - BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one - // AttributeValue of type String or Binary (not a Number or a set type). The - // target attribute of the comparison must be of type String or Binary (not a - // Number or a set type). - // - IN : Checks for matching elements in a list. AttributeValueList can contain - // one or more AttributeValue elements of type String, Number, or Binary. These - // attributes are compared against an existing attribute of an item. If any - // elements of the input are equal to the item attribute, the expression evaluates - // to true. + // a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // a String, then the operator checks for the absence of a substring match. If the + // target attribute of the comparison is Binary, then the operator checks for the + // absence of a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the + // operator evaluates to true if it does not find an exact match with any member of + // the set. + // + // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a " + // can be a list; however, " b " cannot be a set, a map, or a list. + // + // - BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or Binary + // (not a Number or a set type). The target attribute of the comparison must be of + // type String or Binary (not a Number or a set type). + // + // - IN : Checks for matching elements in a list. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary. These attributes are compared against an existing + // attribute of an item. If any elements of the input are equal to the item + // attribute, the expression evaluates to true. + // // - BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. AttributeValueList must contain two AttributeValue - // elements of the same type, either String, Number, or Binary (not a set type). A - // target attribute matches if the target value is greater than, or equal to, the - // first element and less than, or equal to, the second element. If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does not - // compare to {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} - // For usage examples of AttributeValueList and ComparisonOperator , see Legacy - // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) - // in the Amazon DynamoDB Developer Guide. + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same type, + // either String, Number, or Binary (not a set type). A target attribute matches if + // the target value is greater than, or equal to, the first element and less than, + // or equal to, the second element. If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does not + // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator , see [Legacy Conditional Parameters] in the + // Amazon DynamoDB Developer Guide. + // + // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html // // This member is required. ComparisonOperator ComparisonOperator // One or more values to evaluate against the supplied attribute. The number of - // values in the list depends on the ComparisonOperator being used. For type - // Number, value comparisons are numeric. String value comparisons for greater - // than, equals, or less than are based on ASCII character code values. For - // example, a is greater than A , and a is greater than B . For a list of code - // values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters) - // . For Binary, DynamoDB treats each byte of the binary data as unsigned when it + // values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based on + // ASCII character code values. For example, a is greater than A , and a is + // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]. + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when it // compares binary values. + // + // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters AttributeValueList []AttributeValue noSmithyDocumentSerde @@ -717,8 +828,9 @@ type Condition struct { type ConditionCheck struct { // A condition that must be satisfied in order for a conditional update to - // succeed. For more information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) - // in the Amazon DynamoDB Developer Guide. + // succeed. For more information, see [Condition expressions]in the Amazon DynamoDB Developer Guide. + // + // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html // // This member is required. ConditionExpression *string @@ -736,13 +848,15 @@ type ConditionCheck struct { TableName *string // One or more substitution tokens for attribute names in an expression. For more - // information, see Expression attribute names (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Expression attribute names]in the Amazon DynamoDB Developer Guide. + // + // [Expression attribute names]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html ExpressionAttributeNames map[string]string // One or more values that can be substituted in an expression. For more - // information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Condition expressions]in the Amazon DynamoDB Developer Guide. + // + // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html ExpressionAttributeValues map[string]AttributeValue // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the @@ -756,8 +870,10 @@ type ConditionCheck struct { // The capacity units consumed by an operation. The data returned includes the // total provisioned throughput consumed, along with statistics for the table and // any indexes involved in the operation. ConsumedCapacity is only returned if the -// request asked for it. For more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) -// in the Amazon DynamoDB Developer Guide. +// request asked for it. For more information, see [Provisioned Throughput]in the Amazon DynamoDB +// Developer Guide. +// +// [Provisioned Throughput]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html type ConsumedCapacity struct { // The total number of capacity units consumed by the operation. @@ -838,10 +954,18 @@ type CreateGlobalSecondaryIndexAction struct { // This member is required. Projection *Projection + // The maximum number of read and write units for the global secondary index being + // created. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput noSmithyDocumentSerde @@ -875,6 +999,11 @@ type CreateReplicationGroupMemberAction struct { // from the default DynamoDB KMS key alias/aws/dynamodb . KMSMasterKeyId *string + // The maximum on-demand throughput settings for the specified replica table being + // created. You can only modify MaxReadRequestUnits , because you can't modify + // MaxWriteRequestUnits for individual replica tables. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -889,10 +1018,10 @@ type CreateReplicationGroupMemberAction struct { // Processing options for the CSV file being imported. type CsvOptions struct { - // The delimiter used for separating items in the CSV file being imported. + // The delimiter used for separating items in the CSV file being imported. Delimiter *string - // List of the headers used to specify a common header for all source CSV files + // List of the headers used to specify a common header for all source CSV files // being imported. If this field is specified then the first line of each CSV file // is treated as data instead of the header. If this field is not specified the the // first line of each CSV file is treated as the header. @@ -1009,10 +1138,12 @@ type Endpoint struct { // can be used with DeleteItem , PutItem , or UpdateItem operations; if the // comparison evaluates to true, the operation succeeds; if not, the operation // fails. You can use ExpectedAttributeValue in one of two different ways: +// // - Use AttributeValueList to specify one or more values to compare against an // attribute. Use ComparisonOperator to specify how you want to perform the // comparison. If the comparison evaluates to true, then the conditional operation // succeeds. +// // - Use Value to specify a value that DynamoDB will compare against an // attribute. If the values match, then ExpectedAttributeValue evaluates to true // and the conditional operation succeeds. Optionally, you can also set Exists to @@ -1026,132 +1157,184 @@ type Endpoint struct { type ExpectedAttributeValue struct { // One or more values to evaluate against the supplied attribute. The number of - // values in the list depends on the ComparisonOperator being used. For type - // Number, value comparisons are numeric. String value comparisons for greater - // than, equals, or less than are based on ASCII character code values. For - // example, a is greater than A , and a is greater than B . For a list of code - // values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters) - // . For Binary, DynamoDB treats each byte of the binary data as unsigned when it - // compares binary values. For information on specifying data types in JSON, see - // JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) - // in the Amazon DynamoDB Developer Guide. + // values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based on + // ASCII character code values. For example, a is greater than A , and a is + // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]. + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when it + // compares binary values. + // + // For information on specifying data types in JSON, see [JSON Data Format] in the Amazon DynamoDB + // Developer Guide. + // + // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // [JSON Data Format]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html AttributeValueList []AttributeValue // A comparator for evaluating attributes in the AttributeValueList . For example, - // equals, greater than, less than, etc. The following comparison operators are - // available: EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | - // NOT_CONTAINS | BEGINS_WITH | IN | BETWEEN The following are descriptions of each - // comparison operator. + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // // - EQ : Equal. EQ is supported for all data types, including lists and maps. - // AttributeValueList can contain only one AttributeValue element of type String, + // + // AttributeValueList can contain only one AttributeValue element of type String, // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue element of a different type than the one provided in the // request, the value does not match. For example, {"S":"6"} does not equal // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - NE : Not equal. NE is supported for all data types, including lists and - // maps. AttributeValueList can contain only one AttributeValue of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an + // + // - NE : Not equal. NE is supported for all data types, including lists and maps. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // Binary, String Set, Number Set, or Binary Set. If an item contains an // AttributeValue of a different type than the one provided in the request, the // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, // {"N":"6"} does not equal {"NS":["6", "2", "1"]} . - // - LE : Less than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - LT : Less than. AttributeValueList can contain only one AttributeValue of - // type String, Number, or Binary (not a set type). If an item contains an - // AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . - // - GE : Greater than or equal. AttributeValueList can contain only one - // AttributeValue element of type String, Number, or Binary (not a set type). If - // an item contains an AttributeValue element of a different type than the one - // provided in the request, the value does not match. For example, {"S":"6"} does - // not equal {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} . - // - GT : Greater than. AttributeValueList can contain only one AttributeValue - // element of type String, Number, or Binary (not a set type). If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", "1"]} . + // + // - LE : Less than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, Number, + // or Binary (not a set type). If an item contains an AttributeValue element of a + // different type than the one provided in the request, the value does not match. + // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not + // compare to {"NS":["6", "2", "1"]} . + // + // - GE : Greater than or equal. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // + // - GT : Greater than. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value does + // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} . + // // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types, - // including lists and maps. This operator tests for the existence of an attribute, - // not its data type. If the data type of attribute " a " is null, and you - // evaluate it using NOT_NULL , the result is a Boolean true . This result is - // because the attribute " a " exists; its data type is not relevant to the - // NOT_NULL comparison operator. + // including lists and maps. + // + // This operator tests for the existence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NOT_NULL , + // the result is a Boolean true . This result is because the attribute " a " + // exists; its data type is not relevant to the NOT_NULL comparison operator. + // // - NULL : The attribute does not exist. NULL is supported for all data types, - // including lists and maps. This operator tests for the nonexistence of an - // attribute, not its data type. If the data type of attribute " a " is null, and - // you evaluate it using NULL , the result is a Boolean false . This is because - // the attribute " a " exists; its data type is not relevant to the NULL - // comparison operator. - // - CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList - // can contain only one AttributeValue element of type String, Number, or Binary - // (not a set type). If the target attribute of the comparison is of type String, - // then the operator checks for a substring match. If the target attribute of the - // comparison is of type Binary, then the operator looks for a subsequence of the - // target that matches the input. If the target attribute of the comparison is a - // set (" SS ", " NS ", or " BS "), then the operator evaluates to true if it - // finds an exact match with any member of the set. CONTAINS is supported for - // lists: When evaluating " a CONTAINS b ", " a " can be a list; however, " b " - // cannot be a set, a map, or a list. + // including lists and maps. + // + // This operator tests for the nonexistence of an attribute, not its data type. If + // the data type of attribute " a " is null, and you evaluate it using NULL , the + // result is a Boolean false . This is because the attribute " a " exists; its + // data type is not relevant to the NULL comparison operator. + // + // - CONTAINS : Checks for a subsequence, or value in a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the target + // attribute of the comparison is of type Binary, then the operator looks for a + // subsequence of the target that matches the input. If the target attribute of the + // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates + // to true if it finds an exact match with any member of the set. + // + // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be + // a list; however, " b " cannot be a set, a map, or a list. + // // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in - // a set. AttributeValueList can contain only one AttributeValue element of type - // String, Number, or Binary (not a set type). If the target attribute of the - // comparison is a String, then the operator checks for the absence of a substring - // match. If the target attribute of the comparison is Binary, then the operator - // checks for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set (" SS ", " NS ", or " BS "), - // then the operator evaluates to true if it does not find an exact match with any - // member of the set. NOT_CONTAINS is supported for lists: When evaluating " a - // NOT CONTAINS b ", " a " can be a list; however, " b " cannot be a set, a map, - // or a list. - // - BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only one - // AttributeValue of type String or Binary (not a Number or a set type). The - // target attribute of the comparison must be of type String or Binary (not a - // Number or a set type). - // - IN : Checks for matching elements in a list. AttributeValueList can contain - // one or more AttributeValue elements of type String, Number, or Binary. These - // attributes are compared against an existing attribute of an item. If any - // elements of the input are equal to the item attribute, the expression evaluates - // to true. + // a set. + // + // AttributeValueList can contain only one AttributeValue element of type String, + // Number, or Binary (not a set type). If the target attribute of the comparison is + // a String, then the operator checks for the absence of a substring match. If the + // target attribute of the comparison is Binary, then the operator checks for the + // absence of a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the + // operator evaluates to true if it does not find an exact match with any member of + // the set. + // + // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a " + // can be a list; however, " b " cannot be a set, a map, or a list. + // + // - BEGINS_WITH : Checks for a prefix. + // + // AttributeValueList can contain only one AttributeValue of type String or Binary + // (not a Number or a set type). The target attribute of the comparison must be of + // type String or Binary (not a Number or a set type). + // + // - IN : Checks for matching elements in a list. + // + // AttributeValueList can contain one or more AttributeValue elements of type + // String, Number, or Binary. These attributes are compared against an existing + // attribute of an item. If any elements of the input are equal to the item + // attribute, the expression evaluates to true. + // // - BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. AttributeValueList must contain two AttributeValue - // elements of the same type, either String, Number, or Binary (not a set type). A - // target attribute matches if the target value is greater than, or equal to, the - // first element and less than, or equal to, the second element. If an item - // contains an AttributeValue element of a different type than the one provided - // in the request, the value does not match. For example, {"S":"6"} does not - // compare to {"N":"6"} . Also, {"N":"6"} does not compare to {"NS":["6", "2", - // "1"]} + // to the second value. + // + // AttributeValueList must contain two AttributeValue elements of the same type, + // either String, Number, or Binary (not a set type). A target attribute matches if + // the target value is greater than, or equal to, the first element and less than, + // or equal to, the second element. If an item contains an AttributeValue element + // of a different type than the one provided in the request, the value does not + // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]} ComparisonOperator ComparisonOperator - // Causes DynamoDB to evaluate the value before attempting a conditional - // operation: + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // // - If Exists is true , DynamoDB will check to see if that attribute value // already exists in the table. If it is found, then the operation succeeds. If it // is not found, the operation fails with a ConditionCheckFailedException . + // // - If Exists is false , DynamoDB assumes that the attribute value does not // exist in the table. If in fact the value does not exist, then the assumption is // valid and the operation succeeds. If the value is found, despite the assumption // that it does not exist, the operation fails with a // ConditionCheckFailedException . + // // The default setting for Exists is true . If you supply a Value all by itself, // DynamoDB assumes the attribute exists: You don't have to set Exists to true , - // because it is implied. DynamoDB returns a ValidationException if: + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // // - Exists is true but there is no Value to check. (You expect a value to exist, // but don't specify what that value is.) + // // - Exists is false but you also provide a Value . (You cannot expect an // attribute to have a value, while also expecting it not to exist.) Exists *bool - // Represents the data for the expected attribute. Each attribute value is - // described as a name-value pair. The name is the data type, and the value is the - // data itself. For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) - // in the Amazon DynamoDB Developer Guide. + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide. + // + // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes Value AttributeValue noSmithyDocumentSerde @@ -1216,7 +1399,9 @@ type ExportDescription struct { // Type of encryption used on the bucket where export data is stored. Valid values // for S3SseAlgorithm are: + // // - AES256 - server-side encryption with Amazon S3 managed keys + // // - KMS - server-side encryption with KMS managed keys S3SseAlgorithm S3SseAlgorithm @@ -1305,14 +1490,19 @@ type GlobalSecondaryIndex struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeySchema []KeySchemaElement @@ -1324,10 +1514,18 @@ type GlobalSecondaryIndex struct { // This member is required. Projection *Projection + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput noSmithyDocumentSerde @@ -1355,12 +1553,15 @@ type GlobalSecondaryIndexDescription struct { // added to the index. (Not all items will qualify: For example, a partition key // cannot have any duplicate values.) If an item can be added to the index, // DynamoDB will do so. After all items have been processed, the backfilling - // operation is complete and Backfilling is false. You can delete an index that is - // being created during the Backfilling phase when IndexStatus is set to CREATING - // and Backfilling is true. You can't delete the index that is being created when - // IndexStatus is set to CREATING and Backfilling is false. For indexes that were - // created during a CreateTable operation, the Backfilling attribute does not - // appear in the DescribeTable output. + // operation is complete and Backfilling is false. + // + // You can delete an index that is being created during the Backfilling phase when + // IndexStatus is set to CREATING and Backfilling is true. You can't delete the + // index that is being created when IndexStatus is set to CREATING and Backfilling + // is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. Backfilling *bool // The Amazon Resource Name (ARN) that uniquely identifies the index. @@ -1375,9 +1576,13 @@ type GlobalSecondaryIndexDescription struct { IndexSizeBytes *int64 // The current state of the global secondary index: + // // - CREATING - The index is being created. + // // - UPDATING - The index is being updated. + // // - DELETING - The index is being deleted. + // // - ACTIVE - The index is ready for use. IndexStatus IndexStatus @@ -1388,25 +1593,38 @@ type GlobalSecondaryIndexDescription struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement + // The maximum number of read and write units for the specified global secondary + // index. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes and // index key attributes, which are automatically projected. Projection *Projection // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughputDescription noSmithyDocumentSerde @@ -1421,16 +1639,26 @@ type GlobalSecondaryIndexInfo struct { // The complete key schema for a global secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents attributes that are copied (projected) from the table into the // global secondary index. These are in addition to the primary key attributes and // index key attributes, which are automatically projected. @@ -1444,18 +1672,26 @@ type GlobalSecondaryIndexInfo struct { } // Represents one of the following: +// // - A new global secondary index to be added to an existing table. +// // - New provisioned throughput parameters for an existing global secondary // index. +// // - An existing global secondary index to be removed from an existing table. type GlobalSecondaryIndexUpdate struct { // The parameters required for creating a global secondary index on an existing // table: + // // - IndexName + // // - KeySchema + // // - AttributeDefinitions + // // - Projection + // // - ProvisionedThroughput Create *CreateGlobalSecondaryIndexAction @@ -1494,9 +1730,13 @@ type GlobalTableDescription struct { GlobalTableName *string // The current state of the global table: + // // - CREATING - The global table is being created. + // // - UPDATING - The global table is being updated. + // // - DELETING - The global table is being deleted. + // // - ACTIVE - The global table is ready for use. GlobalTableStatus GlobalTableStatus @@ -1530,32 +1770,32 @@ type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { // Summary information about the source file for the import. type ImportSummary struct { - // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with // this import task. CloudWatchLogGroupArn *string - // The time at which this import task ended. (Does this include the successful + // The time at which this import task ended. (Does this include the successful // complete creation of the table it was imported to?) EndTime *time.Time - // The Amazon Resource Number (ARN) corresponding to the import request. + // The Amazon Resource Number (ARN) corresponding to the import request. ImportArn *string - // The status of the import operation. + // The status of the import operation. ImportStatus ImportStatus - // The format of the source data. Valid values are CSV , DYNAMODB_JSON or ION . + // The format of the source data. Valid values are CSV , DYNAMODB_JSON or ION . InputFormat InputFormat - // The path and S3 bucket of the source file that is being imported. This includes - // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional if - // the bucket is owned by the requester). + // The path and S3 bucket of the source file that is being imported. This + // includes the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner + // (optional if the bucket is owned by the requester). S3BucketSource *S3BucketSource - // The time at which this import task began. + // The time at which this import task began. StartTime *time.Time - // The Amazon Resource Number (ARN) of the table being imported into. + // The Amazon Resource Number (ARN) of the table being imported into. TableArn *string noSmithyDocumentSerde @@ -1564,70 +1804,70 @@ type ImportSummary struct { // Represents the properties of the table being imported into. type ImportTableDescription struct { - // The client token that was provided for the import task. Reusing the client + // The client token that was provided for the import task. Reusing the client // token on retry makes a call to ImportTable idempotent. ClientToken *string - // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with // the target table. CloudWatchLogGroupArn *string - // The time at which the creation of the table associated with this import task + // The time at which the creation of the table associated with this import task // completed. EndTime *time.Time - // The number of errors occurred on importing the source file into the target + // The number of errors occurred on importing the source file into the target // table. ErrorCount int64 - // The error code corresponding to the failure that the import job ran into during - // execution. + // The error code corresponding to the failure that the import job ran into + // during execution. FailureCode *string - // The error message corresponding to the failure that the import job ran into + // The error message corresponding to the failure that the import job ran into // during execution. FailureMessage *string - // The Amazon Resource Number (ARN) corresponding to the import request. + // The Amazon Resource Number (ARN) corresponding to the import request. ImportArn *string - // The status of the import. + // The status of the import. ImportStatus ImportStatus - // The number of items successfully imported into the new table. + // The number of items successfully imported into the new table. ImportedItemCount int64 - // The compression options for the data that has been imported into the target + // The compression options for the data that has been imported into the target // table. The values are NONE, GZIP, or ZSTD. InputCompressionType InputCompressionType - // The format of the source data going into the target table. + // The format of the source data going into the target table. InputFormat InputFormat - // The format options for the data that was imported into the target table. There + // The format options for the data that was imported into the target table. There // is one value, CsvOption. InputFormatOptions *InputFormatOptions - // The total number of items processed from the source file. + // The total number of items processed from the source file. ProcessedItemCount int64 - // The total size of data processed from the source file, in Bytes. + // The total size of data processed from the source file, in Bytes. ProcessedSizeBytes *int64 - // Values for the S3 bucket the source file is imported from. Includes bucket name - // (required), key prefix (optional) and bucket account owner ID (optional). + // Values for the S3 bucket the source file is imported from. Includes bucket + // name (required), key prefix (optional) and bucket account owner ID (optional). S3BucketSource *S3BucketSource - // The time when this import task started. + // The time when this import task started. StartTime *time.Time - // The Amazon Resource Number (ARN) of the table being imported into. + // The Amazon Resource Number (ARN) of the table being imported into. TableArn *string - // The parameters for the new table that is being imported into. + // The parameters for the new table that is being imported into. TableCreationParameters *TableCreationParameters - // The table id corresponding to the table created by import table process. + // The table id corresponding to the table created by import table process. TableId *string noSmithyDocumentSerde @@ -1655,11 +1895,12 @@ type IncrementalExportSpecification struct { noSmithyDocumentSerde } -// The format options for the data that was imported into the target table. There +// The format options for the data that was imported into the target table. There +// // is one value, CsvOption. type InputFormatOptions struct { - // The options for imported source files in CSV format. The values are Delimiter + // The options for imported source files in CSV format. The values are Delimiter // and HeaderList. Csv *CsvOptions @@ -1681,8 +1922,10 @@ type ItemCollectionMetrics struct { // includes the size of all the items in the table, plus the size of all attributes // projected into all of the local secondary indexes on that table. Use this // estimate to measure whether a local secondary index is approaching its size - // limit. The estimate is subject to change over time; therefore, do not rely on - // the precision or accuracy of the estimate. + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. SizeEstimateRangeGB []float64 noSmithyDocumentSerde @@ -1698,10 +1941,11 @@ type ItemResponse struct { } // Represents a set of primary keys and, for each key, the attributes to retrieve -// from the table. For each primary key, you must provide all of the key -// attributes. For example, with a simple primary key, you only need to provide the -// partition key. For a composite primary key, you must provide both the partition -// key and the sort key. +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For a +// composite primary key, you must provide both the partition key and the sort key. type KeysAndAttributes struct { // The primary key attribute values that define the items and the attributes @@ -1711,8 +1955,9 @@ type KeysAndAttributes struct { Keys []map[string]AttributeValue // This is a legacy parameter. Use ProjectionExpression instead. For more - // information, see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) - // in the Amazon DynamoDB Developer Guide. + // information, see [Legacy Conditional Parameters]in the Amazon DynamoDB Developer Guide. + // + // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html AttributesToGet []string // The consistency of a read operation. If set to true , then a strongly consistent @@ -1721,35 +1966,52 @@ type KeysAndAttributes struct { // One or more substitution tokens for attribute names in an expression. The // following are some use cases for using ExpressionAttributeNames : + // // - To access an attribute whose name conflicts with a DynamoDB reserved word. + // // - To create a placeholder for repeating occurrences of an attribute name in // an expression. + // // - To prevent special characters in an attribute name from being // misinterpreted in an expression. + // // Use the # character in an expression to dereference an attribute name. For // example, consider the following attribute name: + // // - Percentile + // // The name of this attribute conflicts with a reserved word, so it cannot be used - // directly in an expression. (For the complete list of reserved words, see - // Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames : + // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the + // Amazon DynamoDB Developer Guide). To work around this, you could specify the + // following for ExpressionAttributeNames : + // // - {"#P":"Percentile"} + // // You could then use this substitution in an expression, as in this example: + // // - #P = :val + // // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. For more information on - // expression attribute names, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see [Accessing Item Attributes] in the Amazon DynamoDB + // Developer Guide. + // + // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ExpressionAttributeNames map[string]string // A string that identifies one or more attributes to retrieve from the table. // These attributes can include scalars, sets, or elements of a JSON document. The - // attributes in the ProjectionExpression must be separated by commas. If no - // attribute names are specified, then all attributes will be returned. If any of - // the requested attributes are not found, they will not appear in the result. For - // more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. + // attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. If + // any of the requested attributes are not found, they will not appear in the + // result. + // + // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide. + // + // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html ProjectionExpression *string noSmithyDocumentSerde @@ -1757,12 +2019,15 @@ type KeysAndAttributes struct { // Represents a single element of a key schema. A key schema specifies the // attributes that make up the primary key of a table, or the key attributes of an -// index. A KeySchemaElement represents exactly one attribute of the primary key. -// For example, a simple primary key would be represented by one KeySchemaElement -// (for the partition key). A composite primary key would require one -// KeySchemaElement for the partition key, and another KeySchemaElement for the -// sort key. A KeySchemaElement must be a scalar, top-level attribute (not a -// nested attribute). The data type must be one of String, Number, or Binary. The +// index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement (for +// the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested +// attribute). The data type must be one of String, Number, or Binary. The // attribute cannot be nested within a List or a Map. type KeySchemaElement struct { @@ -1772,14 +2037,19 @@ type KeySchemaElement struct { AttributeName *string // The role that this key attribute will assume: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeyType KeyType @@ -1817,14 +2087,19 @@ type LocalSecondaryIndex struct { // The complete key schema for the local secondary index, consisting of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. // // This member is required. KeySchema []KeySchemaElement @@ -1860,14 +2135,19 @@ type LocalSecondaryIndexDescription struct { // The complete key schema for the local secondary index, consisting of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement // Represents attributes that are copied (projected) from the table into the @@ -1887,14 +2167,19 @@ type LocalSecondaryIndexInfo struct { // The complete key schema for a local secondary index, which consists of one or // more pairs of attribute names and key types: + // // - HASH - partition key + // // - RANGE - sort key + // // The partition key of an item is also known as its hash attribute. The term // "hash attribute" derives from DynamoDB's usage of an internal hash function to // evenly distribute data items across partitions, based on their partition key - // values. The sort key of an item is also known as its range attribute. The term - // "range attribute" derives from the way DynamoDB stores items with the same - // partition key physically close together, in sorted order by the sort key value. + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. KeySchema []KeySchemaElement // Represents attributes that are copied (projected) from the table into the @@ -1905,22 +2190,58 @@ type LocalSecondaryIndexInfo struct { noSmithyDocumentSerde } +// Sets the maximum number of read and write units for the specified on-demand +// table. If you use this parameter, you must specify MaxReadRequestUnits , +// MaxWriteRequestUnits , or both. +type OnDemandThroughput struct { + + // Maximum number of read request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of + // MaxReadRequestUnits as greater than or equal to 1. To remove the maximum + // OnDemandThroughput that is currently set on your table, set the value of + // MaxReadRequestUnits to -1. + MaxReadRequestUnits *int64 + + // Maximum number of write request units for the specified table. + // + // To specify a maximum OnDemandThroughput on your table, set the value of + // MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum + // OnDemandThroughput that is currently set on your table, set the value of + // MaxWriteRequestUnits to -1. + MaxWriteRequestUnits *int64 + + noSmithyDocumentSerde +} + +// Overrides the on-demand throughput settings for this replica table. If you +// don't specify a value for this parameter, it uses the source table's on-demand +// throughput settings. +type OnDemandThroughputOverride struct { + + // Maximum number of read request units for the specified replica table. + MaxReadRequestUnits *int64 + + noSmithyDocumentSerde +} + // Represents a PartiQL statement that uses parameters. type ParameterizedStatement struct { - // A PartiQL statement that uses parameters. + // A PartiQL statement that uses parameters. // // This member is required. Statement *string - // The parameter values. + // The parameter values. Parameters []AttributeValue // An optional parameter that returns the item attributes for a PartiQL - // ParameterizedStatement operation that failed a condition check. There is no - // additional cost associated with requesting a return value aside from the small - // network and processing overhead of receiving a larger response. No read capacity - // units are consumed. + // ParameterizedStatement operation that failed a condition check. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure noSmithyDocumentSerde @@ -1937,7 +2258,9 @@ type PointInTimeRecoveryDescription struct { LatestRestorableDateTime *time.Time // The current state of point in time recovery: + // // - ENABLED - Point in time recovery is enabled. + // // - DISABLED - Point in time recovery is disabled. PointInTimeRecoveryStatus PointInTimeRecoveryStatus @@ -1962,6 +2285,7 @@ type PointInTimeRecoverySpecification struct { type Projection struct { // Represents the non-key attribute names which will be projected into the index. + // // For local secondary indexes, the total count of NonKeyAttributes summed across // all of the local secondary indexes, must not exceed 100. If you project the same // attribute into two different indexes, this counts as two distinct attributes @@ -1969,10 +2293,14 @@ type Projection struct { NonKeyAttributes []string // The set of attributes that are projected into the index: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the // secondary index will include other non-key attributes that you specify. + // // - ALL - All of the table attributes are projected into the index. + // // When using the DynamoDB console, ALL is selected by default. ProjectionType ProjectionType @@ -1980,26 +2308,32 @@ type Projection struct { } // Represents the provisioned throughput settings for a specified table or index. -// The settings can be modified using the UpdateTable operation. For current -// minimum and maximum provisioned throughput values, see Service, Account, and -// Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) -// in the Amazon DynamoDB Developer Guide. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the +// Amazon DynamoDB Developer Guide. +// +// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html type ProvisionedThroughput struct { // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException . For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) - // in the Amazon DynamoDB Developer Guide. If read/write capacity mode is - // PAY_PER_REQUEST the value is set to 0. + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html // // This member is required. ReadCapacityUnits *int64 // The maximum number of writes consumed per second before DynamoDB returns a - // ThrottlingException . For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) - // in the Amazon DynamoDB Developer Guide. If read/write capacity mode is - // PAY_PER_REQUEST the value is set to 0. + // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB + // Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html // // This member is required. WriteCapacityUnits *int64 @@ -2018,9 +2352,10 @@ type ProvisionedThroughputDescription struct { LastIncreaseDateTime *time.Time // The number of provisioned throughput decreases for this table during this UTC - // calendar day. For current maximums on provisioned throughput decreases, see - // Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // calendar day. For current maximums on provisioned throughput decreases, see [Service, Account, and Table Quotas]in + // the Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html NumberOfDecreasesToday *int64 // The maximum number of strongly consistent reads consumed per second before @@ -2124,9 +2459,13 @@ type ReplicaAutoScalingDescription struct { ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription // The current state of the replica: + // // - CREATING - The replica is being created. + // // - UPDATING - The replica is being updated. + // // - DELETING - The replica is being deleted. + // // - ACTIVE - The replica is ready for use. ReplicaStatus ReplicaStatus @@ -2161,6 +2500,10 @@ type ReplicaDescription struct { // The KMS key of the replica that will be used for KMS encryption. KMSMasterKeyId *string + // Overrides the maximum on-demand throughput settings for the specified replica + // table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica-specific provisioned throughput. If not described, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -2173,19 +2516,28 @@ type ReplicaDescription struct { ReplicaInaccessibleDateTime *time.Time // The current state of the replica: + // // - CREATING - The replica is being created. + // // - UPDATING - The replica is being updated. + // // - DELETING - The replica is being deleted. + // // - ACTIVE - The replica is ready for use. + // // - REGION_DISABLED - The replica is inaccessible because the Amazon Web - // Services Region has been disabled. If the Amazon Web Services Region remains - // inaccessible for more than 20 hours, DynamoDB will remove this replica from the - // replication group. The replica will not be deleted and replication will stop - // from and to this region. - // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table - // is inaccessible. If the KMS key remains inaccessible for more than 20 hours, + // Services Region has been disabled. + // + // If the Amazon Web Services Region remains inaccessible for more than 20 hours, // DynamoDB will remove this replica from the replication group. The replica will // not be deleted and replication will stop from and to this region. + // + // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table + // is inaccessible. + // + // If the KMS key remains inaccessible for more than 20 hours, DynamoDB will + // remove this replica from the replication group. The replica will not be deleted + // and replication will stop from and to this region. ReplicaStatus ReplicaStatus // Detailed information about the replica status. @@ -2209,6 +2561,10 @@ type ReplicaGlobalSecondaryIndex struct { // This member is required. IndexName *string + // Overrides the maximum on-demand throughput settings for the specified global + // secondary index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica table GSI-specific provisioned throughput. If not specified, uses the // source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -2223,10 +2579,14 @@ type ReplicaGlobalSecondaryIndexAutoScalingDescription struct { IndexName *string // The current state of the replica global secondary index: + // // - CREATING - The index is being created. + // // - UPDATING - The table/index configuration is being updated. The table/index // remains available for data operations when UPDATING + // // - DELETING - The index is being deleted. + // // - ACTIVE - The index is ready for use. IndexStatus IndexStatus @@ -2261,6 +2621,10 @@ type ReplicaGlobalSecondaryIndexDescription struct { // The name of the global secondary index. IndexName *string + // Overrides the maximum on-demand throughput for the specified global secondary + // index in the specified replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // If not described, uses the source table GSI's read capacity settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride @@ -2276,10 +2640,14 @@ type ReplicaGlobalSecondaryIndexSettingsDescription struct { // This member is required. IndexName *string - // The current status of the global secondary index: + // The current status of the global secondary index: + // // - CREATING - The global secondary index is being created. + // // - UPDATING - The global secondary index is being updated. + // // - DELETING - The global secondary index is being deleted. + // // - ACTIVE - The global secondary index is ready for use. IndexStatus IndexStatus @@ -2341,24 +2709,30 @@ type ReplicaSettingsDescription struct { ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException . For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput ReplicaProvisionedReadCapacityUnits *int64 // Auto scaling settings for a global table replica's write capacity units. ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription // The maximum number of writes consumed per second before DynamoDB returns a - // ThrottlingException . For more information, see Specifying Read and Write - // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. + // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB + // Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput ReplicaProvisionedWriteCapacityUnits *int64 // The current state of the Region: + // // - CREATING - The Region is being created. + // // - UPDATING - The Region is being updated. + // // - DELETING - The Region is being deleted. + // // - ACTIVE - The Region is ready for use. ReplicaStatus ReplicaStatus @@ -2384,9 +2758,10 @@ type ReplicaSettingsUpdate struct { ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate // The maximum number of strongly consistent reads consumed per second before - // DynamoDB returns a ThrottlingException . For more information, see Specifying - // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) - // in the Amazon DynamoDB Developer Guide. + // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the + // Amazon DynamoDB Developer Guide. + // + // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput ReplicaProvisionedReadCapacityUnits *int64 // Replica-specific table class. If not specified, uses the source table's table @@ -2397,10 +2772,13 @@ type ReplicaSettingsUpdate struct { } // Represents one of the following: +// // - A new replica to be added to an existing regional table or global table. // This request invokes the CreateTableReplica action in the destination Region. +// // - New parameters for an existing replica. This request invokes the UpdateTable // action in the destination Region. +// // - An existing replica to be deleted. The request invokes the // DeleteTableReplica action in the destination Region, deleting the replica and // all if its items in the destination Region. @@ -2423,8 +2801,11 @@ type ReplicationGroupUpdate struct { } // Represents one of the following: +// // - A new replica to be added to an existing global table. +// // - New parameters for an existing replica. +// // - An existing replica to be removed from an existing global table. type ReplicaUpdate struct { @@ -2462,16 +2843,16 @@ type RestoreSummary struct { // The S3 bucket that is being imported from. type S3BucketSource struct { - // The S3 bucket that is being imported from. + // The S3 bucket that is being imported from. // // This member is required. S3Bucket *string - // The account number of the S3 bucket that is being imported from. If the bucket + // The account number of the S3 bucket that is being imported from. If the bucket // is owned by the requester this is optional. S3BucketOwner *string - // The key prefix shared by all S3 Objects that are being imported. + // The key prefix shared by all S3 Objects that are being imported. S3KeyPrefix *string noSmithyDocumentSerde @@ -2507,8 +2888,10 @@ type SourceTableDetails struct { // Controls how you are charged for read and write throughput and how you manage // capacity. This setting can be changed later. + // // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We // recommend using PROVISIONED for predictable workloads. + // // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We // recommend using PAY_PER_REQUEST for unpredictable workloads. BillingMode BillingMode @@ -2516,6 +2899,11 @@ type SourceTableDetails struct { // Number of items in the table. Note that this is an approximate value. ItemCount *int64 + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // ARN of the table for which backup was created. TableArn *string @@ -2566,13 +2954,16 @@ type SSEDescription struct { KMSMasterKeyArn *string // Server-side encryption type. The only supported value is: + // // - KMS - Server-side encryption that uses Key Management Service. The key is // stored in your account and is managed by KMS (KMS charges apply). SSEType SSEType // Represents the current state of server-side encryption. The only supported // values are: + // // - ENABLED - Server-side encryption is enabled. + // // - UPDATING - Server-side encryption is being updated. Status SSEStatus @@ -2596,6 +2987,7 @@ type SSESpecification struct { KMSMasterKeyId *string // Server-side encryption type. The only supported value is: + // // - KMS - Server-side encryption that uses Key Management Service. The key is // stored in your account and is managed by KMS (KMS charges apply). SSEType SSEType @@ -2612,15 +3004,19 @@ type StreamSpecification struct { // This member is required. StreamEnabled *bool - // When an item in the table is modified, StreamViewType determines what + // When an item in the table is modified, StreamViewType determines what // information is written to the stream for this table. Valid values for // StreamViewType are: + // // - KEYS_ONLY - Only the key attributes of the modified item are written to the // stream. + // // - NEW_IMAGE - The entire item, as it appears after it was modified, is written // to the stream. + // // - OLD_IMAGE - The entire item, as it appeared before it was modified, is // written to the stream. + // // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are // written to the stream. StreamViewType StreamViewType @@ -2638,9 +3034,13 @@ type TableAutoScalingDescription struct { TableName *string // The current state of the table: + // // - CREATING - The table is being created. + // // - UPDATING - The table is being updated. + // // - DELETING - The table is being deleted. + // // - ACTIVE - The table is ready for use. TableStatus TableStatus @@ -2663,35 +3063,42 @@ type TableClassSummary struct { // The parameters for the table created as part of the import operation. type TableCreationParameters struct { - // The attributes of the table created as part of the import operation. + // The attributes of the table created as part of the import operation. // // This member is required. AttributeDefinitions []AttributeDefinition - // The primary key and option sort key of the table created as part of the import + // The primary key and option sort key of the table created as part of the import // operation. // // This member is required. KeySchema []KeySchemaElement - // The name of the table created as part of the import operation. + // The name of the table created as part of the import operation. // // This member is required. TableName *string - // The billing mode for provisioning the table created as part of the import + // The billing mode for provisioning the table created as part of the import // operation. BillingMode BillingMode - // The Global Secondary Indexes (GSI) of the table to be created as part of the + // The Global Secondary Indexes (GSI) of the table to be created as part of the // import operation. GlobalSecondaryIndexes []GlobalSecondaryIndex + // Sets the maximum number of read and write units for the specified on-demand + // table. If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for a specified table or index. - // The settings can be modified using the UpdateTable operation. For current - // minimum and maximum provisioned throughput values, see Service, Account, and - // Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput // Represents the settings used to enable server-side encryption. @@ -2707,17 +3114,21 @@ type TableDescription struct { ArchivalSummary *ArchivalSummary // An array of AttributeDefinition objects. Each of these objects describes one - // attribute in the table and index key schema. Each AttributeDefinition object in - // this array is composed of: + // attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // // - AttributeName - The name of the attribute. + // // - AttributeType - The data type for the attribute. AttributeDefinitions []AttributeDefinition // Contains the details for the read/write capacity mode. BillingModeSummary *BillingModeSummary - // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) - // format. + // The date and time when the table was created, in [UNIX epoch time] format. + // + // [UNIX epoch time]: http://www.epochconverter.com/ CreationDateTime *time.Time // Indicates whether deletion protection is enabled (true) or disabled (false) on @@ -2726,53 +3137,75 @@ type TableDescription struct { // The global secondary indexes, if any, on the table. Each index is scoped to a // given partition key value. Each element is composed of: + // // - Backfilling - If true, then the index is currently in the backfilling phase. // Backfilling occurs only when a new global secondary index is added to the table. // It is the process by which DynamoDB populates the new index with data from the // table. (This attribute does not appear for indexes that were created during a - // CreateTable operation.) You can delete an index that is being created during - // the Backfilling phase when IndexStatus is set to CREATING and Backfilling is - // true. You can't delete the index that is being created when IndexStatus is set - // to CREATING and Backfilling is false. (This attribute does not appear for - // indexes that were created during a CreateTable operation.) + // CreateTable operation.) + // + // You can delete an index that is being created during the Backfilling phase when + // IndexStatus is set to CREATING and Backfilling is true. You can't delete the + // index that is being created when IndexStatus is set to CREATING and + // Backfilling is false. (This attribute does not appear for indexes that were + // created during a CreateTable operation.) + // // - IndexName - The name of the global secondary index. + // // - IndexSizeBytes - The total size of the global secondary index, in bytes. // DynamoDB updates this value approximately every six hours. Recent changes might // not be reflected in this value. + // // - IndexStatus - The current status of the global secondary index: + // // - CREATING - The index is being created. + // // - UPDATING - The index is being updated. + // // - DELETING - The index is being deleted. + // // - ACTIVE - The index is ready for use. + // // - ItemCount - The number of items in the global secondary index. DynamoDB // updates this value approximately every six hours. Recent changes might not be // reflected in this value. + // // - KeySchema - Specifies the complete index key schema. The attribute names in // the key schema must be between 1 and 255 characters (inclusive). The key schema // must begin with the same partition key as the table. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the // secondary index will include other non-key attributes that you specify. + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed // 100. If you project the same attribute into two different indexes, this counts // as two distinct attributes when determining the total. + // // - ProvisionedThroughput - The provisioned throughput settings for the global // secondary index, consisting of read and write capacity units, along with data // about increases and decreases. + // // If the table is in the DELETING state, no information about indexes will be // returned. GlobalSecondaryIndexes []GlobalSecondaryIndexDescription - // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html) - // in use, if the table is replicated across Amazon Web Services Regions. + // Represents the version of [global tables] in use, if the table is replicated across Amazon Web + // Services Regions. + // + // [global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html GlobalTableVersion *string // The number of items in the specified table. DynamoDB updates this value @@ -2781,30 +3214,45 @@ type TableDescription struct { ItemCount *int64 // The primary key structure for the table. Each KeySchemaElement consists of: + // // - AttributeName - The name of the attribute. + // // - KeyType - The role of the attribute: + // // - HASH - partition key - // - RANGE - sort key The partition key of an item is also known as its hash - // attribute. The term "hash attribute" derives from DynamoDB's usage of an - // internal hash function to evenly distribute data items across partitions, based - // on their partition key values. The sort key of an item is also known as its - // range attribute. The term "range attribute" derives from the way DynamoDB stores - // items with the same partition key physically close together, in sorted order by - // the sort key value. - // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) - // in the Amazon DynamoDB Developer Guide. + // + // - RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB's usage of an internal hash function to + // evenly distribute data items across partitions, based on their partition key + // values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer + // Guide. + // + // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey KeySchema []KeySchemaElement // The Amazon Resource Name (ARN) that uniquely identifies the latest stream for // this table. LatestStreamArn *string - // A timestamp, in ISO 8601 format, for this stream. Note that LatestStreamLabel - // is not a unique identifier for the stream, because it is possible that a stream - // from another table might have the same timestamp. However, the combination of - // the following three elements is guaranteed to be unique: + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to be + // unique: + // // - Amazon Web Services customer ID + // // - Table name + // // - StreamLabel LatestStreamLabel *string @@ -2814,33 +3262,48 @@ type TableDescription struct { // within a given item collection cannot exceed 10 GB. Each element is composed of: // // - IndexName - The name of the local secondary index. + // // - KeySchema - Specifies the complete index key schema. The attribute names in // the key schema must be between 1 and 255 characters (inclusive). The key schema // must begin with the same partition key as the table. + // // - Projection - Specifies attributes that are copied (projected) from the table // into the index. These are in addition to the primary key attributes and index // key attributes, which are automatically projected. Each attribute specification // is composed of: + // // - ProjectionType - One of the following: + // // - KEYS_ONLY - Only the index and primary keys are projected into the index. + // // - INCLUDE - Only the specified table attributes are projected into the index. // The list of projected attributes is in NonKeyAttributes . + // // - ALL - All of the table attributes are projected into the index. + // // - NonKeyAttributes - A list of one or more non-key attribute names that are // projected into the secondary index. The total count of attributes provided in // NonKeyAttributes , summed across all of the secondary indexes, must not exceed // 100. If you project the same attribute into two different indexes, this counts // as two distinct attributes when determining the total. + // // - IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB // updates this value approximately every six hours. Recent changes might not be // reflected in this value. + // // - ItemCount - Represents the number of items in the index. DynamoDB updates // this value approximately every six hours. Recent changes might not be reflected // in this value. + // // If the table is in the DELETING state, no information about indexes will be // returned. LocalSecondaryIndexes []LocalSecondaryIndexDescription + // The maximum number of read and write units for the specified on-demand table. + // If you use this parameter, you must specify MaxReadRequestUnits , + // MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // The provisioned throughput settings for the table, consisting of read and write // capacity units, along with data about increases and decreases. ProvisionedThroughput *ProvisionedThroughputDescription @@ -2875,17 +3338,24 @@ type TableDescription struct { TableSizeBytes *int64 // The current state of the table: + // // - CREATING - The table is being created. + // // - UPDATING - The table/index configuration is being updated. The table/index // remains available for data operations when UPDATING . + // // - DELETING - The table is being deleted. + // // - ACTIVE - The table is ready for use. + // // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table // in inaccessible. Table operations may fail due to failure to use the KMS key. // DynamoDB will initiate the table archival process when a table's KMS key remains // inaccessible for more than seven days. + // // - ARCHIVING - The table is being archived. Operations are not allowed until // archival is complete. + // // - ARCHIVED - The table has been archived. See the ArchivalReason for more // information. TableStatus TableStatus @@ -2894,13 +3364,18 @@ type TableDescription struct { } // Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a -// single DynamoDB table. Amazon Web Services-assigned tag names and values are -// automatically assigned the aws: prefix, which the user cannot assign. Amazon -// Web Services-assigned tag names do not count towards the tag limit of 50. -// User-assigned tag names have the prefix user: in the Cost Allocation Report. -// You cannot backdate the application of a tag. For an overview on tagging -// DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// single DynamoDB table. +// +// Amazon Web Services-assigned tag names and values are automatically assigned +// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned tag +// names do not count towards the tag limit of 50. User-assigned tag names have the +// prefix user: in the Cost Allocation Report. You cannot backdate the application +// of a tag. +// +// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB +// Developer Guide. +// +// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html type Tag struct { // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only @@ -2921,10 +3396,10 @@ type Tag struct { // The description of the Time to Live (TTL) status on the specified table. type TimeToLiveDescription struct { - // The name of the TTL attribute for items in the table. + // The name of the TTL attribute for items in the table. AttributeName *string - // The TTL status for the table. + // The TTL status for the table. TimeToLiveStatus TimeToLiveStatus noSmithyDocumentSerde @@ -3027,12 +3502,18 @@ type UpdateGlobalSecondaryIndexAction struct { // This member is required. IndexName *string + // Updates the maximum number of read and write units for the specified global + // secondary index. If you use this parameter, you must specify MaxReadRequestUnits + // , MaxWriteRequestUnits , or both. + OnDemandThroughput *OnDemandThroughput + // Represents the provisioned throughput settings for the specified global - // secondary index. For current minimum and maximum provisioned throughput values, - // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) - // in the Amazon DynamoDB Developer Guide. + // secondary index. // - // This member is required. + // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the + // Amazon DynamoDB Developer Guide. + // + // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html ProvisionedThroughput *ProvisionedThroughput noSmithyDocumentSerde @@ -3064,6 +3545,9 @@ type UpdateReplicationGroupMemberAction struct { // default DynamoDB KMS key alias/aws/dynamodb . KMSMasterKeyId *string + // Overrides the maximum on-demand throughput for the replica table. + OnDemandThroughputOverride *OnDemandThroughputOverride + // Replica-specific provisioned throughput. If not specified, uses the source // table's provisioned throughput settings. ProvisionedThroughputOverride *ProvisionedThroughputOverride diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go index d1925ed8..3d7f72cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go @@ -2430,9 +2430,7 @@ func validateUpdateGlobalSecondaryIndexAction(v *types.UpdateGlobalSecondaryInde if v.IndexName == nil { invalidParams.Add(smithy.NewErrParamRequired("IndexName")) } - if v.ProvisionedThroughput == nil { - invalidParams.Add(smithy.NewErrParamRequired("ProvisionedThroughput")) - } else if v.ProvisionedThroughput != nil { + if v.ProvisionedThroughput != nil { if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil { invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError)) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md index bb5a864a..5fc3e631 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.9.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.6 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go index d35a8bfc..38db3007 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go @@ -3,4 +3,4 @@ package endpointdiscovery // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.6" +const goModuleVersion = "1.9.8" diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 31553e78..5dd4e447 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -148,6 +148,12 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM var pairs []string for key, vals := range req.Header { key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + for _, val := range vals { // For backwards-compatibility, pass through 'authorization' header with no prefix. if key == "Authorization" { @@ -181,18 +187,17 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcM pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) } + xff := req.Header.Values(xForwardedFor) if addr := req.RemoteAddr; addr != "" { if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } + xff = append(xff, remoteIP) } } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } if timeout != 0 { - //nolint:govet // The context outlives this function ctx, _ = context.WithTimeout(ctx, timeout) } if len(pairs) == 0 { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 230cac7b..56829986 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -71,7 +71,7 @@ func HTTPStatusFromCode(code codes.Code) int { case codes.DataLoss: return http.StatusInternalServerError default: - grpclog.Infof("Unknown gRPC error code: %v", code) + grpclog.Warningf("Unknown gRPC error code: %v", code) return http.StatusInternalServerError } } @@ -114,17 +114,17 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh buf, merr := marshaler.Marshal(pb) if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s, merr) + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } return } md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -148,7 +148,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh w.WriteHeader(st) if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 5e14cf8b..de1eef1f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "net/textproto" + "strconv" "strings" "google.golang.org/genproto/googleapis/api/httpbody" @@ -17,16 +18,10 @@ import ( // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - + rc := http.NewResponseController(w) md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") http.Error(w, "unexpected error", http.StatusInternalServerError) return } @@ -81,20 +76,29 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) + grpclog.Errorf("Failed to marshal response chunk: %v", err) handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) + grpclog.Errorf("Failed to send response chunk: %v", err) return } wroteHeader = true if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) return } - f.Flush() } } @@ -136,7 +140,7 @@ type responseBody interface { func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { md, ok := ServerMetadataFromContext(ctx) if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") + grpclog.Error("Failed to extract ServerMetadata from context") } handleForwardResponseServerMetadata(w, mux, md) @@ -168,13 +172,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha buf, err = marshaler.Marshal(resp) } if err != nil { - grpclog.Infof("Marshal error: %v", err) + grpclog.Errorf("Marshal error: %v", err) HTTPError(ctx, mux, marshaler, w, req, err) return } + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) + grpclog.Errorf("Failed to write response: %v", err) } if doForwardTrailers { @@ -193,7 +201,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) return err } } @@ -209,15 +217,15 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar } buf, err := marshaler.Marshal(msg) if err != nil { - grpclog.Infof("Failed to marshal an error: %v", err) + grpclog.Errorf("Failed to marshal an error: %v", err) return } if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to notify error to client: %v", err) + grpclog.Errorf("Failed to notify error to client: %v", err) return } if _, err := w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) + grpclog.Errorf("Failed to send delimiter chunk: %v", err) return } } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go index d6aa8257..fe52081a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -24,6 +24,11 @@ func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + // Unmarshal unmarshals JSON data into "v". func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { return json.Unmarshal(data, v) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index 51b8247d..8376d1e0 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -30,10 +30,6 @@ func (*JSONPb) ContentType(_ interface{}) string { // Marshal marshals "v" into JSON. func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - var buf bytes.Buffer if err := j.marshalTo(&buf, v); err != nil { return nil, err @@ -48,9 +44,17 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { if err != nil { return err } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } _, err = w.Write(buf) return err } + b, err := j.MarshalOptions.Marshal(p) if err != nil { return err @@ -150,9 +154,6 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { } m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } return json.Marshal(m) } if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index a714de02..0b051e6e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -46,7 +46,7 @@ func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, out for _, contentTypeVal := range r.Header[contentTypeHeader] { contentType, _, err := mime.ParseMediaType(contentTypeVal) if err != nil { - grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) continue } if m, ok := mux.marshalers.mimeMap[contentType]; ok { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go index 8f90d15a..e5450714 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -52,13 +52,13 @@ type Pattern struct { // It returns an error if the given definition is invalid. func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { if version != 1 { - grpclog.Infof("unsupported version: %d", version) + grpclog.Errorf("unsupported version: %d", version) return Pattern{}, ErrInvalidPattern } l := len(ops) if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) + grpclog.Errorf("odd number of ops codes: %d", l) return Pattern{}, ErrInvalidPattern } @@ -81,14 +81,14 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpPushM: if pushMSeen { - grpclog.Infof("pushM appears twice") + grpclog.Error("pushM appears twice") return Pattern{}, ErrInvalidPattern } pushMSeen = true stack++ case utilities.OpLitPush: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) + grpclog.Errorf("negative literal index: %d", op.operand) return Pattern{}, ErrInvalidPattern } if pushMSeen { @@ -97,18 +97,18 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er stack++ case utilities.OpConcatN: if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) + grpclog.Errorf("negative concat size: %d", op.operand) return Pattern{}, ErrInvalidPattern } stack -= op.operand if stack < 0 { - grpclog.Info("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } stack++ case utilities.OpCapture: if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) + grpclog.Errorf("variable name index out of bound: %d", op.operand) return Pattern{}, ErrInvalidPattern } v := pool[op.operand] @@ -116,11 +116,11 @@ func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, er vars = append(vars, v) stack-- if stack < 0 { - grpclog.Infof("stack underflow") + grpclog.Error("stack underflow") return Pattern{}, ErrInvalidPattern } default: - grpclog.Infof("invalid opcode: %d", op.code) + grpclog.Errorf("invalid opcode: %d", op.code) return Pattern{}, ErrInvalidPattern } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 178fdbaf..80d1fe94 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool { // Status returns the status of the alert. func (a *Alert) Status() AlertStatus { - if a.Resolved() { + return a.StatusAt(time.Now()) +} + +// StatusAt returns the status of the alert at the given timestamp. +func (a *Alert) StatusAt(ts time.Time) AlertStatus { + if a.ResolvedAt(ts) { return AlertResolved } return AlertFiring @@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool { return false } +// HasFiringAt returns true iff one of the alerts is not resolved +// at the time ts. +func (as Alerts) HasFiringAt(ts time.Time) bool { + for _, a := range as { + if !a.ResolvedAt(ts) { + return true + } + } + return false +} + // Status returns StatusFiring iff at least one of the alerts is firing. func (as Alerts) Status() AlertStatus { if as.HasFiring() { @@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus { } return AlertResolved } + +// StatusAt returns StatusFiring iff at least one of the alerts is firing +// at the time ts. +func (as Alerts) StatusAt(ts time.Time) AlertStatus { + if as.HasFiringAt(ts) { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 174753ed..481c47b4 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -17,7 +17,7 @@ package model import ( "bytes" - "sort" + "slices" "strconv" ) @@ -28,7 +28,7 @@ func (l LabelSet) String() string { for name := range l { labelNames = append(labelNames, string(name)) } - sort.Strings(labelNames) + slices.Sort(labelNames) var bytea [1024]byte // On stack to avoid memory allocation while building the output. b := bytes.NewBuffer(bytea[:0]) b.WriteByte('{') diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3..e00f3b36 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,3 @@ * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 92558151..0e9ace29 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,19 +49,19 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum > /dev/null),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.55.2 +GOLANGCI_LINT_VERSION ?= v1.56.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) @@ -182,7 +182,7 @@ endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint > /dev/null)) +ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -208,6 +208,10 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 28783e2d..cdcc8a7c 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 4a173636..83807500 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f4f5501c..f0950bb4 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) } field := strings.SplitN(firstLine, ": ", 2) @@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 13ee66f1..5f2a37a7 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f560a8db..cf2e3eaa 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 5a145bbf..bc3a20c9 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { return nil, 0, - fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 59465c5b..332e76c1 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index fdd4b954..67a9d2b4 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive @@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 73a03e8a..4b2c4050 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -202,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) } return *m, nil diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 388ebf39..a704c5e7 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, fmt.Errorf("%s: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 9d8af6db..75a3b6c8 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -88,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -194,8 +194,6 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 - // The average time from the point the client sends RPC requests until it receives the response. - AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this @@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } - if ns[0] != 0 { - opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) - } if len(ns) > 8 { opStats.Errors = ns[8] @@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index fdfa4561..316df5fb 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index ba7d9caa..b70f1fc7 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -141,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -155,7 +155,7 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) } } @@ -178,7 +178,7 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") @@ -189,7 +189,7 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address @@ -201,12 +201,12 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue @@ -219,27 +219,27 @@ func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) } // drops if isUDP { drops, err := strconv.ParseUint(fields[12], 0, 64) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) } line.Drops = &drops } diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index 360e36af..fae62b13 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index c7708529..71c8059f 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index acbbc57e..d868cebd 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7443edca..7c597bc8 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) { m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) } return m, nil @@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index d1f71caa..14279636 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. @@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index c86d815d..9530b14b 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c2266675..0f8f847f 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) { typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index fe9dbb42..ccd35f15 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,7 +61,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index ad8785a4..09060e82 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 46307f57..a055197c 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "math/bits" "sort" "strconv" "strings" @@ -76,9 +77,9 @@ type ProcStatus struct { NonVoluntaryCtxtSwitches uint64 // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string + UIDs [4]uint64 // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string + GIDs [4]uint64 // CpusAllowedList: List of cpu cores processes are allowed to run on. CpusAllowedList []uint64 @@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) { // convert kB to B vBytes := vKBytes * 1024 - s.fillStatus(k, v, vKBytes, vBytes) + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } } return s, nil } -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { switch k { case "Tgid": s.TGID = int(vUint) case "Name": s.Name = vString case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "NSpid": s.NSpids = calcNSPidsList(vString) case "VmPeak": @@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.CpusAllowedList = calcCpusAllowedList(vString) } + return nil } // TotalCtxtSwitches returns the total context switch. diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 12c5bf05..5eefbe2e 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index b8fad677..28708e07 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 34fc3ee2..e36b41c1 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index fa00f555..65fec834 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index df2215ec..80e0e947 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index ce5fefa5..e54d94b0 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md new file mode 100644 index 00000000..7360d188 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/BENCHMARKS.md @@ -0,0 +1,24 @@ +## Summary + +Using the Prometheus bridge and the OTLP exporter adds roughly ~50% to the CPU and memory overhead of an application compared to serving a Prometheus HTTP endpoint for metrics. + +However, unless the application has extremely high cardinality for metrics, this is unlikely to represent a significant amount of additional overhead because the base-line memory consumption of client libraries is relatively low. For an application with 30k timeseries (which is a very high number), the additional overhead is about 50MB and about 0.1 CPU cores. + +The bridge is particularly useful if you are exporting to an OpenTelemetry Collector, since the OTLP receiver is much more efficient than the Prometheus receiver. For the same 30k timeseries, the Prometheus receiver uses 3x the amount of memory, and 20x the amount of CPU. In concrete numbers, this is an additional 228 MB of memory, and 0.57 CPU cores. + +For an application using the Prometheus client library, and exporting to an OpenTelemetry collector, the total CPU usage is 55% lower and total memory usage is 45% lower when using the bridge and the OTLP receiver compared to using a Prometheus endpoint and the collector's Prometheus receiver. + +## Methods and Results + +The sample application uses the Prometheus client library, and defines one histogram with the default 12 buckets, one counter, and one gauge. Each metric has a single label with 10k values, which are observed every second. See the [sample application's source](https://github.com/dashpole/client_golang/pull/1). + +The memory usage of the sample application is measured using the `/memory/classes/total:bytes` metric from the go runtime. The CPU usage of the application is measured using `top`. The CPU and memory usage of the collector are measured using `docker stats`. It was built using v0.50.0 of the bridge, v1.25.0 of the OpenTelemetry API and SDK, and v1.19.0 of the Prometheus client. + +The OpenTelemetry Collector is configured with only the OTLP or Prometheus receiver, and the debug (logging) exporter with only the basic output. The benchmark uses the Contrib distribution at v0.97.0. + +| Experiment | Memory Usage (MB) | CPU Usage (millicores) | +|---|---|---| +| App w/ Prometheus Export | 94 | 220 | +| App w/ Bridge + OTLP Export | 140 | 330 | +| Collector w/ Prometheus Receiver | 320 | 600 | +| Collector w/ OTLP Receiver | 92 | 30 | diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/config.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/config.go new file mode 100644 index 00000000..5b8a16c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/config.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "go.opentelemetry.io/contrib/bridges/prometheus" + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// config contains options for the producer. +type config struct { + gatherers []prometheus.Gatherer +} + +// newConfig creates a validated config configured with options. +func newConfig(opts ...Option) config { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + if len(cfg.gatherers) == 0 { + cfg.gatherers = []prometheus.Gatherer{prometheus.DefaultGatherer} + } + + return cfg +} + +// Option sets producer option values. +type Option interface { + apply(config) config +} + +type optionFunc func(config) config + +func (fn optionFunc) apply(cfg config) config { + return fn(cfg) +} + +// WithGatherer configures which prometheus Gatherer the Bridge will gather +// from. If no registerer is used the prometheus DefaultGatherer is used. +func WithGatherer(gatherer prometheus.Gatherer) Option { + return optionFunc(func(cfg config) config { + cfg.gatherers = append(cfg.gatherers, gatherer) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/doc.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/doc.go new file mode 100644 index 00000000..44145a10 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/doc.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package prometheus provides a bridge from Prometheus to OpenTelemetry. +// +// The Prometheus Bridge allows using the [Prometheus Golang client library] +// with the OpenTelemetry SDK. This enables prometheus instrumentation libraries +// to be used with OpenTelemetry exporters, including OTLP. +// +// Prometheus histograms are translated to OpenTelemetry exponential histograms +// when native histograms are enabled in the Prometheus client. To enable +// Prometheus native histograms, set the (currently experimental) NativeHistogram... +// options of the prometheus [HistogramOpts] when creating prometheus histograms. +// +// While the Prometheus Bridge has some overhead, it can significantly reduce the +// combined overall CPU and Memory footprint when sending to an OpenTelemetry +// Collector. See the [benchmarks] for more details. +// +// [Prometheus Golang client library]: https://github.com/prometheus/client_golang +// [HistogramOpts]: https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#HistogramOpts +// [benchmarks]: https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/bridges/prometheus/BENCHMARKS.md +package prometheus // import "go.opentelemetry.io/contrib/bridges/prometheus" diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go new file mode 100644 index 00000000..5626bdc5 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go @@ -0,0 +1,403 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "go.opentelemetry.io/contrib/bridges/prometheus" + +import ( + "context" + "errors" + "fmt" + "math" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +const ( + scopeName = "go.opentelemetry.io/contrib/bridges/prometheus" + traceIDLabel = "trace_id" + spanIDLabel = "span_id" +) + +var ( + errUnsupportedType = errors.New("unsupported metric type") + processStartTime = time.Now() +) + +type producer struct { + gatherers prometheus.Gatherers +} + +// NewMetricProducer returns a metric.Producer that fetches metrics from +// Prometheus. This can be used to allow Prometheus instrumentation to be +// added to an OpenTelemetry export pipeline. +func NewMetricProducer(opts ...Option) metric.Producer { + cfg := newConfig(opts...) + return &producer{ + gatherers: cfg.gatherers, + } +} + +func (p *producer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) { + now := time.Now() + var errs multierr + otelMetrics := make([]metricdata.Metrics, 0) + for _, gatherer := range p.gatherers { + promMetrics, err := gatherer.Gather() + if err != nil { + errs = append(errs, err) + continue + } + m, err := convertPrometheusMetricsInto(promMetrics, now) + otelMetrics = append(otelMetrics, m...) + if err != nil { + errs = append(errs, err) + } + } + if errs.errOrNil() != nil { + otel.Handle(errs.errOrNil()) + } + if len(otelMetrics) == 0 { + return nil, nil + } + return []metricdata.ScopeMetrics{{ + Scope: instrumentation.Scope{ + Name: scopeName, + }, + Metrics: otelMetrics, + }}, nil +} + +func convertPrometheusMetricsInto(promMetrics []*dto.MetricFamily, now time.Time) ([]metricdata.Metrics, error) { + var errs multierr + otelMetrics := make([]metricdata.Metrics, 0) + for _, pm := range promMetrics { + if len(pm.GetMetric()) == 0 { + // This shouldn't ever happen + continue + } + newMetric := metricdata.Metrics{ + Name: pm.GetName(), + Description: pm.GetHelp(), + } + switch pm.GetType() { + case dto.MetricType_GAUGE: + newMetric.Data = convertGauge(pm.GetMetric(), now) + case dto.MetricType_COUNTER: + newMetric.Data = convertCounter(pm.GetMetric(), now) + case dto.MetricType_SUMMARY: + newMetric.Data = convertSummary(pm.GetMetric(), now) + case dto.MetricType_HISTOGRAM: + if isExponentialHistogram(pm.GetMetric()[0].GetHistogram()) { + newMetric.Data = convertExponentialHistogram(pm.GetMetric(), now) + } else { + newMetric.Data = convertHistogram(pm.GetMetric(), now) + } + default: + // MetricType_GAUGE_HISTOGRAM, MetricType_UNTYPED + errs = append(errs, fmt.Errorf("%w: %v for metric %v", errUnsupportedType, pm.GetType(), pm.GetName())) + continue + } + otelMetrics = append(otelMetrics, newMetric) + } + return otelMetrics, errs.errOrNil() +} + +func isExponentialHistogram(hist *dto.Histogram) bool { + // The prometheus go client ensures at least one of these is non-zero + // so it can be distinguished from a fixed-bucket histogram. + // https://github.com/prometheus/client_golang/blob/7ac90362b02729a65109b33d172bafb65d7dab50/prometheus/histogram.go#L818 + return hist.GetZeroThreshold() > 0 || + hist.GetZeroCount() > 0 || + len(hist.GetPositiveSpan()) > 0 || + len(hist.GetNegativeSpan()) > 0 +} + +func convertGauge(metrics []*dto.Metric, now time.Time) metricdata.Gauge[float64] { + otelGauge := metricdata.Gauge[float64]{ + DataPoints: make([]metricdata.DataPoint[float64], len(metrics)), + } + for i, m := range metrics { + dp := metricdata.DataPoint[float64]{ + Attributes: convertLabels(m.GetLabel()), + Time: now, + Value: m.GetGauge().GetValue(), + } + if m.GetTimestampMs() != 0 { + dp.Time = time.UnixMilli(m.GetTimestampMs()) + } + otelGauge.DataPoints[i] = dp + } + return otelGauge +} + +func convertCounter(metrics []*dto.Metric, now time.Time) metricdata.Sum[float64] { + otelCounter := metricdata.Sum[float64]{ + DataPoints: make([]metricdata.DataPoint[float64], len(metrics)), + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + } + for i, m := range metrics { + dp := metricdata.DataPoint[float64]{ + Attributes: convertLabels(m.GetLabel()), + StartTime: processStartTime, + Time: now, + Value: m.GetCounter().GetValue(), + } + if ex := m.GetCounter().GetExemplar(); ex != nil { + dp.Exemplars = []metricdata.Exemplar[float64]{convertExemplar(ex)} + } + createdTs := m.GetCounter().GetCreatedTimestamp() + if createdTs.IsValid() { + dp.StartTime = createdTs.AsTime() + } + if m.GetTimestampMs() != 0 { + dp.Time = time.UnixMilli(m.GetTimestampMs()) + } + otelCounter.DataPoints[i] = dp + } + return otelCounter +} + +func convertExponentialHistogram(metrics []*dto.Metric, now time.Time) metricdata.ExponentialHistogram[float64] { + otelExpHistogram := metricdata.ExponentialHistogram[float64]{ + DataPoints: make([]metricdata.ExponentialHistogramDataPoint[float64], len(metrics)), + Temporality: metricdata.CumulativeTemporality, + } + for i, m := range metrics { + dp := metricdata.ExponentialHistogramDataPoint[float64]{ + Attributes: convertLabels(m.GetLabel()), + StartTime: processStartTime, + Time: now, + Count: m.GetHistogram().GetSampleCount(), + Sum: m.GetHistogram().GetSampleSum(), + Scale: m.GetHistogram().GetSchema(), + ZeroCount: m.GetHistogram().GetZeroCount(), + ZeroThreshold: m.GetHistogram().GetZeroThreshold(), + PositiveBucket: convertExponentialBuckets( + m.GetHistogram().GetPositiveSpan(), + m.GetHistogram().GetPositiveDelta(), + ), + NegativeBucket: convertExponentialBuckets( + m.GetHistogram().GetNegativeSpan(), + m.GetHistogram().GetNegativeDelta(), + ), + // TODO: Support exemplars + } + createdTs := m.GetHistogram().GetCreatedTimestamp() + if createdTs.IsValid() { + dp.StartTime = createdTs.AsTime() + } + if t := m.GetTimestampMs(); t != 0 { + dp.Time = time.UnixMilli(t) + } + otelExpHistogram.DataPoints[i] = dp + } + return otelExpHistogram +} + +func convertExponentialBuckets(bucketSpans []*dto.BucketSpan, deltas []int64) metricdata.ExponentialBucket { + if len(bucketSpans) == 0 { + return metricdata.ExponentialBucket{} + } + // Prometheus Native Histograms buckets are indexed by upper boundary + // while Exponential Histograms are indexed by lower boundary, the result + // being that the Offset fields are different-by-one. + initialOffset := bucketSpans[0].GetOffset() - 1 + // We will have one bucket count for each delta, and zeros for the offsets + // after the initial offset. + lenCounts := int32(len(deltas)) + for i, bs := range bucketSpans { + if i != 0 { + lenCounts += bs.GetOffset() + } + } + counts := make([]uint64, lenCounts) + deltaIndex := 0 + countIndex := int32(0) + count := int64(0) + for i, bs := range bucketSpans { + // Do not insert zeroes if this is the first bucketSpan, since those + // zeroes are accounted for in the Offset field. + if i != 0 { + // Increase the count index by the Offset to insert Offset zeroes + countIndex += bs.GetOffset() + } + for j := uint32(0); j < bs.GetLength(); j++ { + // Convert deltas to the cumulative number of observations + count += deltas[deltaIndex] + deltaIndex++ + // count should always be positive after accounting for deltas + if count > 0 { + counts[countIndex] = uint64(count) + } + countIndex++ + } + } + return metricdata.ExponentialBucket{ + Offset: initialOffset, + Counts: counts, + } +} + +func convertHistogram(metrics []*dto.Metric, now time.Time) metricdata.Histogram[float64] { + otelHistogram := metricdata.Histogram[float64]{ + DataPoints: make([]metricdata.HistogramDataPoint[float64], len(metrics)), + Temporality: metricdata.CumulativeTemporality, + } + for i, m := range metrics { + bounds, bucketCounts, exemplars := convertBuckets(m.GetHistogram().GetBucket(), m.GetHistogram().GetSampleCount()) + dp := metricdata.HistogramDataPoint[float64]{ + Attributes: convertLabels(m.GetLabel()), + StartTime: processStartTime, + Time: now, + Count: m.GetHistogram().GetSampleCount(), + Sum: m.GetHistogram().GetSampleSum(), + Bounds: bounds, + BucketCounts: bucketCounts, + Exemplars: exemplars, + } + createdTs := m.GetHistogram().GetCreatedTimestamp() + if createdTs.IsValid() { + dp.StartTime = createdTs.AsTime() + } + if m.GetTimestampMs() != 0 { + dp.Time = time.UnixMilli(m.GetTimestampMs()) + } + otelHistogram.DataPoints[i] = dp + } + return otelHistogram +} + +func convertBuckets(buckets []*dto.Bucket, sampleCount uint64) ([]float64, []uint64, []metricdata.Exemplar[float64]) { + if len(buckets) == 0 { + // This should never happen + return nil, nil, nil + } + // buckets will only include the +Inf bucket if there is an exemplar for it + // https://github.com/prometheus/client_golang/blob/d038ab96c0c7b9cd217a39072febd610bcdf1fd8/prometheus/metric.go#L189 + // we need to handle the case where it is present, or where it is missing. + hasInf := math.IsInf(buckets[len(buckets)-1].GetUpperBound(), +1) + var bounds []float64 + var bucketCounts []uint64 + if hasInf { + bounds = make([]float64, len(buckets)-1) + bucketCounts = make([]uint64, len(buckets)) + } else { + bounds = make([]float64, len(buckets)) + bucketCounts = make([]uint64, len(buckets)+1) + } + exemplars := make([]metricdata.Exemplar[float64], 0) + for i, bucket := range buckets { + // The last bound may be the +Inf bucket, which is implied in OTel, but + // is explicit in Prometheus. Skip the last boundary if it is the +Inf + // bound. + if bound := bucket.GetUpperBound(); !math.IsInf(bound, +1) { + bounds[i] = bound + } + bucketCounts[i] = bucket.GetCumulativeCount() + if ex := bucket.GetExemplar(); ex != nil { + exemplars = append(exemplars, convertExemplar(ex)) + } + } + if !hasInf { + // The Inf bucket was missing, so set the last bucket counts to the + // overall count + bucketCounts[len(bucketCounts)-1] = sampleCount + } + return bounds, bucketCounts, exemplars +} + +func convertSummary(metrics []*dto.Metric, now time.Time) metricdata.Summary { + otelSummary := metricdata.Summary{ + DataPoints: make([]metricdata.SummaryDataPoint, len(metrics)), + } + for i, m := range metrics { + dp := metricdata.SummaryDataPoint{ + Attributes: convertLabels(m.GetLabel()), + StartTime: processStartTime, + Time: now, + Count: m.GetSummary().GetSampleCount(), + Sum: m.GetSummary().GetSampleSum(), + QuantileValues: convertQuantiles(m.GetSummary().GetQuantile()), + } + createdTs := m.GetSummary().GetCreatedTimestamp() + if createdTs.IsValid() { + dp.StartTime = createdTs.AsTime() + } + if t := m.GetTimestampMs(); t != 0 { + dp.Time = time.UnixMilli(t) + } + otelSummary.DataPoints[i] = dp + } + return otelSummary +} + +func convertQuantiles(quantiles []*dto.Quantile) []metricdata.QuantileValue { + otelQuantiles := make([]metricdata.QuantileValue, len(quantiles)) + for i, quantile := range quantiles { + dp := metricdata.QuantileValue{ + Quantile: quantile.GetQuantile(), + Value: quantile.GetValue(), + } + otelQuantiles[i] = dp + } + return otelQuantiles +} + +func convertLabels(labels []*dto.LabelPair) attribute.Set { + kvs := make([]attribute.KeyValue, len(labels)) + for i, l := range labels { + kvs[i] = attribute.String(l.GetName(), l.GetValue()) + } + return attribute.NewSet(kvs...) +} + +func convertExemplar(exemplar *dto.Exemplar) metricdata.Exemplar[float64] { + attrs := make([]attribute.KeyValue, 0) + var traceID, spanID []byte + // find the trace ID and span ID in attributes, if it exists + for _, label := range exemplar.GetLabel() { + if label.GetName() == traceIDLabel { + traceID = []byte(label.GetValue()) + } else if label.GetName() == spanIDLabel { + spanID = []byte(label.GetValue()) + } else { + attrs = append(attrs, attribute.String(label.GetName(), label.GetValue())) + } + } + return metricdata.Exemplar[float64]{ + Value: exemplar.GetValue(), + Time: exemplar.GetTimestamp().AsTime(), + TraceID: traceID, + SpanID: spanID, + FilteredAttributes: attrs, + } +} + +type multierr []error + +func (e multierr) errOrNil() error { + if len(e) == 0 { + return nil + } else if len(e) == 1 { + return e[0] + } + return e +} + +func (e multierr) Error() string { + es := make([]string, len(e)) + for i, err := range e { + es[i] = fmt.Sprintf("* %s", err) + } + return strings.Join(es, "\n\t") +} diff --git a/vendor/go.opentelemetry.io/contrib/detectors/aws/lambda/detector.go b/vendor/go.opentelemetry.io/contrib/detectors/aws/lambda/detector.go index e3daaa4d..420d876b 100644 --- a/vendor/go.opentelemetry.io/contrib/detectors/aws/lambda/detector.go +++ b/vendor/go.opentelemetry.io/contrib/detectors/aws/lambda/detector.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) // For a complete list of reserved environment variables in Lambda, see: diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go index ddcea0cb..0dcb6c0c 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/metrics.go @@ -10,11 +10,13 @@ import ( "net" "net/http" "os" + "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + prometheusbridge "go.opentelemetry.io/contrib/bridges/prometheus" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" @@ -52,9 +54,13 @@ func WithFallbackMetricReader(metricReaderFactory func(ctx context.Context) (met // OTEL_EXPORTER_PROMETHEUS_PORT (defaulting to 9464) define the host and port for the // Prometheus exporter's HTTP server. // +// Experimental: OTEL_METRICS_PRODUCERS can be used to configure metric producers. +// supported values: prometheus, none. Multiple values can be specified separated by commas. +// // An error is returned if an environment value is set to an unhandled value. // // Use [RegisterMetricReader] to handle more values of OTEL_METRICS_EXPORTER. +// Use [RegisterMetricProducer] to handle more values of OTEL_METRICS_PRODUCERS. // // Use [WithFallbackMetricReader] option to change the returned exporter // when OTEL_METRICS_EXPORTER is unset or empty. @@ -71,10 +77,35 @@ func RegisterMetricReader(name string, factory func(context.Context) (metric.Rea must(metricsSignal.registry.store(name, factory)) } -var metricsSignal = newSignal[metric.Reader]("OTEL_METRICS_EXPORTER") +// RegisterMetricProducer sets the MetricReader factory to be used when the +// OTEL_METRICS_PRODUCERS environment variable contains the producer name. This +// will panic if name has already been registered. +func RegisterMetricProducer(name string, factory func(context.Context) (metric.Producer, error)) { + must(metricsProducers.registry.store(name, factory)) +} + +// WithFallbackMetricProducer sets the fallback producer to use when no producer +// is configured through the OTEL_METRICS_PRODUCERS environment variable. +func WithFallbackMetricProducer(producerFactory func(ctx context.Context) (metric.Producer, error)) { + metricsProducers.fallbackProducer = producerFactory +} + +var ( + metricsSignal = newSignal[metric.Reader]("OTEL_METRICS_EXPORTER") + metricsProducers = newProducerRegistry("OTEL_METRICS_PRODUCERS") +) func init() { RegisterMetricReader("otlp", func(ctx context.Context) (metric.Reader, error) { + producers, err := metricsProducers.create(ctx) + if err != nil { + return nil, err + } + readerOpts := []metric.PeriodicReaderOption{} + for _, producer := range producers { + readerOpts = append(readerOpts, metric.WithProducer(producer)) + } + proto := os.Getenv(otelExporterOTLPProtoEnvKey) if proto == "" { proto = "http/protobuf" @@ -86,33 +117,54 @@ func init() { if err != nil { return nil, err } - return metric.NewPeriodicReader(r), nil + return metric.NewPeriodicReader(r, readerOpts...), nil case "http/protobuf": r, err := otlpmetrichttp.New(ctx) if err != nil { return nil, err } - return metric.NewPeriodicReader(r), nil + return metric.NewPeriodicReader(r, readerOpts...), nil default: return nil, errInvalidOTLPProtocol } }) RegisterMetricReader("console", func(ctx context.Context) (metric.Reader, error) { + producers, err := metricsProducers.create(ctx) + if err != nil { + return nil, err + } + readerOpts := []metric.PeriodicReaderOption{} + for _, producer := range producers { + readerOpts = append(readerOpts, metric.WithProducer(producer)) + } + r, err := stdoutmetric.New() if err != nil { return nil, err } - return metric.NewPeriodicReader(r), nil + return metric.NewPeriodicReader(r, readerOpts...), nil }) RegisterMetricReader("none", func(ctx context.Context) (metric.Reader, error) { return newNoopMetricReader(), nil }) RegisterMetricReader("prometheus", func(ctx context.Context) (metric.Reader, error) { // create an isolated registry instead of using the global registry -- - // the user might not want to mix OTel with non-OTel metrics + // the user might not want to mix OTel with non-OTel metrics. + // Those that want to comingle metrics from global registry can use + // OTEL_METRICS_PRODUCERS=prometheus reg := prometheus.NewRegistry() - reader, err := promexporter.New(promexporter.WithRegisterer(reg)) + exporterOpts := []promexporter.Option{promexporter.WithRegisterer(reg)} + + producers, err := metricsProducers.create(ctx) + if err != nil { + return nil, err + } + for _, producer := range producers { + exporterOpts = append(exporterOpts, promexporter.WithProducer(producer)) + } + + reader, err := promexporter.New(exporterOpts...) if err != nil { return nil, err } @@ -148,6 +200,13 @@ func init() { return readerWithServer{lis.Addr(), reader, &server}, nil }) + + RegisterMetricProducer("prometheus", func(ctx context.Context) (metric.Producer, error) { + return prometheusbridge.NewMetricProducer(), nil + }) + RegisterMetricProducer("none", func(ctx context.Context) (metric.Producer, error) { + return newNoopMetricProducer(), nil + }) } type readerWithServer struct { @@ -170,3 +229,61 @@ func getenv(key, fallback string) string { } return result } + +type producerRegistry struct { + envKey string + fallbackProducer func(context.Context) (metric.Producer, error) + registry *registry[metric.Producer] +} + +func newProducerRegistry(envKey string) producerRegistry { + return producerRegistry{ + envKey: envKey, + registry: ®istry[metric.Producer]{ + names: make(map[string]func(context.Context) (metric.Producer, error)), + }, + } +} + +func (pr producerRegistry) create(ctx context.Context) ([]metric.Producer, error) { + expType := os.Getenv(pr.envKey) + if expType == "" { + if pr.fallbackProducer != nil { + producer, err := pr.fallbackProducer(ctx) + if err != nil { + return nil, err + } + + return []metric.Producer{producer}, nil + } + + return nil, nil + } + + producers := dedupedMetricProducers(expType) + metricProducers := make([]metric.Producer, 0, len(producers)) + for _, producer := range producers { + producer, err := pr.registry.load(ctx, producer) + if err != nil { + return nil, err + } + + metricProducers = append(metricProducers, producer) + } + + return metricProducers, nil +} + +func dedupedMetricProducers(envValue string) []string { + producers := make(map[string]struct{}) + for _, producer := range strings.Split(envValue, ",") { + producers[producer] = struct{}{} + } + + result := make([]string, 0, len(producers)) + for producer := range producers { + result = append(result, producer) + } + + return result +} diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go index c0921941..7ea4bd69 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/noop.go @@ -7,6 +7,7 @@ import ( "context" "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/trace" ) @@ -46,3 +47,13 @@ func IsNoneMetricReader(e metric.Reader) bool { _, ok := e.(noopMetricReader) return ok } + +type noopMetricProducer struct{} + +func (e noopMetricProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) { + return nil, nil +} + +func newNoopMetricProducer() noopMetricProducer { + return noopMetricProducer{} +} diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/registry.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/registry.go index a7032e10..3d9abcaf 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/registry.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/registry.go @@ -21,9 +21,9 @@ type registry[T any] struct { } var ( - // errUnknownExporter is returned when an unknown exporter name is used in - // the OTEL_*_EXPORTER environment variables. - errUnknownExporter = errors.New("unknown exporter") + // errUnknownExporterProducer is returned when an unknown exporter name is used in + // the OTEL_*_EXPORTER or OTEL_METRICS_PRODUCERS environment variables. + errUnknownExporterProducer = errors.New("unknown exporter or metrics producer") // errInvalidOTLPProtocol is returned when an invalid protocol is used in // the OTEL_EXPORTER_OTLP_PROTOCOL environment variable. @@ -35,7 +35,7 @@ var ( // load returns tries to find the exporter factory with the key and // then execute the factory, returning the created SpanExporter. -// errUnknownExporter is returned if the registration is missing and the error from +// errUnknownExporterProducer is returned if the registration is missing and the error from // executing the factory if not nil. func (r *registry[T]) load(ctx context.Context, key string) (T, error) { r.mu.Lock() @@ -43,7 +43,7 @@ func (r *registry[T]) load(ctx context.Context, key string) (T, error) { factory, ok := r.names[key] if !ok { var zero T - return zero, errUnknownExporter + return zero, errUnknownExporterProducer } return factory(ctx) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go index 50a56fe4..1767c1cc 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws/version.go @@ -5,7 +5,7 @@ package otelaws // import "go.opentelemetry.io/contrib/instrumentation/github.co // Version is the current release version of the AWS SDKv2 instrumentation. func Version() string { - return "0.51.0" + return "0.52.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 0fcc279d..22e485dd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.51.0" + return "0.52.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go index b5a3533d..18932c84 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go @@ -5,7 +5,7 @@ package b3 // import "go.opentelemetry.io/contrib/propagators/b3" // Version is the current release version of the B3 propagator. func Version() string { - return "1.26.0" + return "1.27.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index cb28b36b..e5946bfb 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,41 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + ## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 ### Added @@ -33,6 +68,11 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) - Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) ## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 @@ -2921,7 +2961,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.26.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.27.0...HEAD +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 [1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 [1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 31d336d9..88f4c7d0 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7847b459..2176ce52 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -570,6 +570,9 @@ functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + ### Testing The tests should never leak goroutines. @@ -629,7 +632,6 @@ should be canceled. - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers @@ -643,6 +645,7 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index ca2f0ad0..a9845a88 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default -ci: generate dependabot-check license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default +ci: generate license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage # Tools @@ -39,9 +39,6 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -70,7 +67,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -252,15 +249,6 @@ license-check: exit 1; \ fi -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) - -.PHONY: dependabot-generate -dependabot-generate: $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 47f9a41f..5a890931 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -15,7 +15,7 @@ It provides a set of APIs to directly measure performance and behavior of your s |---------|--------------------| | Traces | Stable | | Metrics | Stable | -| Logs | In development[^1] | +| Logs | Beta[^1] | Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) @@ -97,12 +97,12 @@ export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). -| Exporter | Metrics | Traces | -|---------------------------------------|:-------:|:------:| -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | ## Contributing diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index d2691d0b..940f57f3 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index b3203141..9ea0ecbb 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -231,15 +231,27 @@ func (v Value) Emit() string { case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 75773bc1..f98c54a3 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -335,9 +335,9 @@ func (m Member) String() string { // A key is just an ASCII string. A value is restricted to be // US-ASCII characters excluding CTLs, whitespace, // DQUOTE, comma, semicolon, and backslash. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + s += propertyDelimiter + m.properties.String() } return s } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index 4a86c02c..99a5e9f0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.26.0" + return "1.27.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index c69f4111..60551370 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.26.0" + return "1.27.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index e3f7f431..8f84a799 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -256,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { // Generic Options +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint @@ -263,6 +266,8 @@ func WithEndpoint(endpoint string) GenericOption { }) } +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". func WithEndpointURL(v string) GenericOption { return newGenericOption(func(cfg Config) Config { u, err := url.Parse(v) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index a9e7f933..bbad0e6d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -53,9 +53,11 @@ func WithInsecure() Option { return wrappedOption{otlpconfig.WithInsecure()} } -// WithEndpoint sets the target endpoint the Exporter will connect to. +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4317" (no +// scheme or path). // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // will take precedence. @@ -71,9 +73,11 @@ func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } -// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) +// the Exporter will connect to. The provided endpoint URL should resemble +// "https://example.com:4318/v1/traces". // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // will take precedence. @@ -84,7 +88,7 @@ func WithEndpoint(endpoint string) Option { // If an invalid URL is provided, the default value will be kept. // // By default, if an environment variable is not set, and this option is not -// passed, "localhost:4317" will be used. +// passed, "https://localhost:4317/v1/traces" will be used. // // This option has no effect if WithGRPCConn is used. func WithEndpointURL(u string) Option { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index 45fefc4d..2ebbc752 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -256,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { // Generic Options +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint @@ -263,6 +266,8 @@ func WithEndpoint(endpoint string) GenericOption { }) } +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". func WithEndpointURL(v string) GenericOption { return newGenericOption(func(cfg Config) Config { u, err := url.Parse(v) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go index 4b2758e4..6497f3cc 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -56,12 +56,15 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config return w.ApplyHTTPOption(cfg) } -// WithEndpoint sets the target endpoint the Exporter will connect to. +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4318" (no +// scheme or path). // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// will take precedence. Note, both environment variables include the full +// scheme and path, while WithEndpoint sets only the host and port. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -74,9 +77,10 @@ func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } -// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) the +// Exporter will connect to. // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // will take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index f07a809f..fc7190d9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.26.0" + return "1.27.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go index 96006ab2..fc155d79 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric/exporter.go @@ -51,12 +51,8 @@ func (e *exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation { } func (e *exporter) Export(ctx context.Context, data *metricdata.ResourceMetrics) error { - select { - case <-ctx.Done(): - // Don't do anything if the context has already timed out. - return ctx.Err() - default: - // Context is still valid, continue. + if err := ctx.Err(); err != nil { + return err } if e.redactTimestamps { redactTimestamps(data) @@ -67,18 +63,18 @@ func (e *exporter) Export(ctx context.Context, data *metricdata.ResourceMetrics) return e.encVal.Load().(encoderHolder).Encode(data) } -func (e *exporter) ForceFlush(ctx context.Context) error { +func (e *exporter) ForceFlush(context.Context) error { // exporter holds no state, nothing to flush. - return ctx.Err() + return nil } -func (e *exporter) Shutdown(ctx context.Context) error { +func (e *exporter) Shutdown(context.Context) error { e.shutdownOnce.Do(func() { e.encVal.Store(encoderHolder{ encoder: shutdownEncoder{}, }) }) - return ctx.Err() + return nil } func (e *exporter) MarshalLog() interface{} { diff --git a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go index dcbd04dd..b19eb0df 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go +++ b/vendor/go.opentelemetry.io/otel/exporters/stdout/stdouttrace/trace.go @@ -47,6 +47,9 @@ type Exporter struct { // ExportSpans writes spans in json format to stdout. func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + if err := ctx.Err(); err != nil { + return err + } e.stoppedMu.RLock() stopped := e.stopped e.stoppedMu.RUnlock() @@ -88,11 +91,6 @@ func (e *Exporter) Shutdown(ctx context.Context) error { e.stopped = true e.stoppedMu.Unlock() - select { - case <-ctx.Done(): - return ctx.Err() - default: - } return nil } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 0c8ed20a..3a0cc42f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -281,6 +281,32 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco } } +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + type siCounter struct { embedded.Int64Counter @@ -358,3 +384,29 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index f2189859..590fa738 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -164,6 +164,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, nil } +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) @@ -230,6 +241,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, nil } +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index c7234f4b..cf23db77 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -39,7 +39,7 @@ type Float64ObservableCounter interface { } // Float64ObservableCounterConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableCounterConfig struct { description string unit string @@ -97,7 +97,7 @@ type Float64ObservableUpDownCounter interface { } // Float64ObservableUpDownCounterConfig contains options for asynchronous -// counter instruments that record int64 values. +// counter instruments that record float64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string @@ -154,7 +154,7 @@ type Float64ObservableGauge interface { } // Float64ObservableGaugeConfig contains options for asynchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64ObservableGaugeConfig struct { description string unit string diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go index 15bebae0..1a9dc680 100644 --- a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -102,6 +102,16 @@ type Float64Counter interface{ float64Counter() } // the API package). type Float64Histogram interface{ float64Histogram() } +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // @@ -174,6 +184,16 @@ type Int64Counter interface{ int64Counter() } // the API package). type Int64Histogram interface{ int64Histogram() } +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index 45141319..ea52e402 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -16,6 +16,7 @@ type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption + Int64GaugeOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption @@ -23,6 +24,7 @@ type InstrumentOption interface { Float64CounterOption Float64UpDownCounterOption Float64HistogramOption + Float64GaugeOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption @@ -51,6 +53,11 @@ func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c @@ -81,6 +88,11 @@ func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c @@ -116,6 +128,11 @@ func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64Histogra return c } +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c @@ -146,6 +163,11 @@ func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfi return c } +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 7aa82e0c..460b3f9b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -58,6 +58,10 @@ type Meter interface { // synchronously record the distribution of int64 measurements during a // computational operation. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -104,6 +108,10 @@ type Meter interface { // synchronously record the distribution of float64 measurements during a // computational operation. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index 4524a57d..ca6fcbdc 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -32,6 +32,8 @@ var ( _ metric.Float64UpDownCounter = Float64UpDownCounter{} _ metric.Int64Histogram = Int64Histogram{} _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} _ metric.Int64ObservableCounter = Int64ObservableCounter{} _ metric.Float64ObservableCounter = Float64ObservableCounter{} _ metric.Int64ObservableGauge = Int64ObservableGauge{} @@ -76,6 +78,12 @@ func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int6 return Int64Histogram{}, nil } +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { @@ -112,6 +120,12 @@ func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric. return Float64Histogram{}, nil } +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { @@ -197,6 +211,20 @@ type Float64Histogram struct{ embedded.Float64Histogram } // Record performs no operation. func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + // Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record // int64 measurements. It produces no telemetry. type Int64ObservableCounter struct { diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go index 5420d546..8403a4ba 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -28,7 +28,7 @@ type Float64Counter interface { } // Float64CounterConfig contains options for synchronous counter instruments that -// record int64 values. +// record float64 values. type Float64CounterConfig struct { description string unit string @@ -81,7 +81,7 @@ type Float64UpDownCounter interface { } // Float64UpDownCounterConfig contains options for synchronous counter -// instruments that record int64 values. +// instruments that record float64 values. type Float64UpDownCounterConfig struct { description string unit string @@ -133,8 +133,8 @@ type Float64Histogram interface { Record(ctx context.Context, incr float64, options ...RecordOption) } -// Float64HistogramConfig contains options for synchronous counter instruments -// that record int64 values. +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. type Float64HistogramConfig struct { description string unit string @@ -172,3 +172,55 @@ func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go index 0dcbf06d..783fdfba 100644 --- a/vendor/go.opentelemetry.io/otel/metric/syncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -133,7 +133,7 @@ type Int64Histogram interface { Record(ctx context.Context, incr int64, options ...RecordOption) } -// Int64HistogramConfig contains options for synchronous counter instruments +// Int64HistogramConfig contains options for synchronous histogram instruments // that record int64 values. type Int64HistogramConfig struct { description string @@ -172,3 +172,55 @@ func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 00000000..8c5ac55c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 7eaa0769..07923ed8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -22,7 +22,7 @@ const ( BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. // 512). Note: it must be less than or equal to - // EnvBatchSpanProcessorMaxQueueSize. + // BatchSpanProcessorMaxQueueSize. BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" // AttributeValueLengthKey is the maximum allowed attribute value size. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 9155c242..c774a468 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -19,21 +19,21 @@ import ( // Note: This will only return non-nil values when the experimental exemplar // feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable // is not set to always_off. -func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir[N] { +func reservoirFunc(agg Aggregation) func() exemplar.Reservoir { if !x.Exemplars.Enabled() { return nil } // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults - resF := func() func() exemplar.Reservoir[N] { + resF := func() func() exemplar.Reservoir { // Explicit bucket histogram aggregation with more than 1 bucket will // use AlignedHistogramBucketExemplarReservoir. a, ok := agg.(AggregationExplicitBucketHistogram) if ok && len(a.Boundaries) > 0 { cp := slices.Clone(a.Boundaries) - return func() exemplar.Reservoir[N] { + return func() exemplar.Reservoir { bounds := cp - return exemplar.Histogram[N](bounds) + return exemplar.Histogram(bounds) } } @@ -61,8 +61,8 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir } } - return func() exemplar.Reservoir[N] { - return exemplar.FixedSize[N](n) + return func() exemplar.Reservoir { + return exemplar.FixedSize(n) } } @@ -73,12 +73,12 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.Reservoir case "always_on": return resF() case "always_off": - return exemplar.Drop[N] + return exemplar.Drop case "trace_based": fallthrough default: newR := resF() - return func() exemplar.Reservoir[N] { + return func() exemplar.Reservoir { return exemplar.SampledFilter(newR()) } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index fa8ce053..f9768fd1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -30,28 +30,32 @@ type InstrumentKind uint8 const ( // instrumentKindUndefined is an undefined instrument kind, it should not // be used by any initialized type. - instrumentKindUndefined InstrumentKind = iota // nolint:deadcode,varcheck,unused + instrumentKindUndefined InstrumentKind = 0 // nolint:deadcode,varcheck,unused // InstrumentKindCounter identifies a group of instruments that record // increasing values synchronously with the code path they are measuring. - InstrumentKindCounter + InstrumentKindCounter InstrumentKind = 1 // InstrumentKindUpDownCounter identifies a group of instruments that // record increasing and decreasing values synchronously with the code path // they are measuring. - InstrumentKindUpDownCounter + InstrumentKindUpDownCounter InstrumentKind = 2 // InstrumentKindHistogram identifies a group of instruments that record a // distribution of values synchronously with the code path they are // measuring. - InstrumentKindHistogram + InstrumentKindHistogram InstrumentKind = 3 // InstrumentKindObservableCounter identifies a group of instruments that // record increasing values in an asynchronous callback. - InstrumentKindObservableCounter + InstrumentKindObservableCounter InstrumentKind = 4 // InstrumentKindObservableUpDownCounter identifies a group of instruments // that record increasing and decreasing values in an asynchronous // callback. - InstrumentKindObservableUpDownCounter + InstrumentKindObservableUpDownCounter InstrumentKind = 5 // InstrumentKindObservableGauge identifies a group of instruments that // record current values in an asynchronous callback. - InstrumentKindObservableGauge + InstrumentKindObservableGauge InstrumentKind = 6 + // InstrumentKindGauge identifies a group of instruments that record + // instantaneous values synchronously with the code path they are + // measuring. + InstrumentKindGauge InstrumentKind = 7 ) type nonComparable [0]func() // nolint: unused // This is indeed used. @@ -175,12 +179,14 @@ type int64Inst struct { embedded.Int64Counter embedded.Int64UpDownCounter embedded.Int64Histogram + embedded.Int64Gauge } var ( _ metric.Int64Counter = (*int64Inst)(nil) _ metric.Int64UpDownCounter = (*int64Inst)(nil) _ metric.Int64Histogram = (*int64Inst)(nil) + _ metric.Int64Gauge = (*int64Inst)(nil) ) func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { @@ -205,12 +211,14 @@ type float64Inst struct { embedded.Float64Counter embedded.Float64UpDownCounter embedded.Float64Histogram + embedded.Float64Gauge } var ( _ metric.Float64Counter = (*float64Inst)(nil) _ metric.Float64UpDownCounter = (*float64Inst)(nil) _ metric.Float64Histogram = (*float64Inst)(nil) + _ metric.Float64Gauge = (*float64Inst)(nil) ) func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go index d5f9e982..25ea6244 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrumentkind_string.go @@ -15,11 +15,12 @@ func _() { _ = x[InstrumentKindObservableCounter-4] _ = x[InstrumentKindObservableUpDownCounter-5] _ = x[InstrumentKindObservableGauge-6] + _ = x[InstrumentKindGauge-7] } -const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGauge" +const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGaugeGauge" -var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107} +var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107, 112} func (i InstrumentKind) String() string { if i >= InstrumentKind(len(_InstrumentKind_index)-1) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index aa5229b0..c9976de6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -39,7 +39,7 @@ type Builder[N int64 | float64] struct { // // If this is not provided a default factory function that returns an // exemplar.Drop reservoir will be used. - ReservoirFunc func() exemplar.Reservoir[N] + ReservoirFunc func() exemplar.Reservoir // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -50,12 +50,12 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -func (b Builder[N]) resFunc() func() exemplar.Reservoir[N] { +func (b Builder[N]) resFunc() func() exemplar.Reservoir { if b.ReservoirFunc != nil { return b.ReservoirFunc } - return exemplar.Drop[N] + return exemplar.Drop } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) @@ -74,21 +74,26 @@ func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { } // LastValue returns a last-value aggregate function input and output. -// -// The Builder.Temporality is ignored and delta is use always. func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { - // Delta temporality is the only temporality that makes semantic sense for - // a last-value aggregate. lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} - return b.filter(lv.measure), func(dest *metricdata.Aggregation) int { - // Ignore if dest is not a metricdata.Gauge. The chance for memory - // reuse of the DataPoints is missed (better luck next time). - gData, _ := (*dest).(metricdata.Gauge[N]) - lv.computeAggregation(&gData.DataPoints) - *dest = gData - - return len(gData.DataPoints) +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative } } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go new file mode 100644 index 00000000..170ae8e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "sync" + + "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var exemplarPool = sync.Pool{ + New: func() any { return new([]exemplar.Exemplar) }, +} + +func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { + dest := exemplarPool.Get().(*[]exemplar.Exemplar) + defer func() { + *dest = (*dest)[:0] + exemplarPool.Put(dest) + }() + + *dest = reset(*dest, len(*out), cap(*out)) + + f(dest) + + *out = reset(*out, len(*dest), cap(*dest)) + for i, e := range *dest { + (*out)[i].FilteredAttributes = e.FilteredAttributes + (*out)[i].Time = e.Time + (*out)[i].SpanID = e.SpanID + (*out)[i].TraceID = e.TraceID + + switch e.Value.Type() { + case exemplar.Int64ValueType: + (*out)[i].Value = N(e.Value.Int64()) + case exemplar.Float64ValueType: + (*out)[i].Value = N(e.Value.Float64()) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index a6629ee3..902074b5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -31,7 +31,7 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set - res exemplar.Reservoir[N] + res exemplar.Reservoir count uint64 min N @@ -282,7 +282,7 @@ func (b *expoBuckets) downscale(delta int) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, @@ -305,7 +305,7 @@ type expoHistogram[N int64 | float64] struct { maxSize int maxScale int - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.Reservoir limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -333,7 +333,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib e.values[attr.Equivalent()] = v } v.record(value) - v.res.Offer(ctx, t, value, droppedAttr) + v.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) } func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { @@ -376,7 +376,7 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ } @@ -429,7 +429,7 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ // TODO (#3006): This will use an unbounded amount of memory if there diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index 911d7c18..213baf50 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -17,7 +17,7 @@ import ( type buckets[N int64 | float64] struct { attrs attribute.Set - res exemplar.Reservoir[N] + res exemplar.Reservoir counts []uint64 count uint64 @@ -48,13 +48,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.Reservoir limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.Reservoir) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -106,12 +106,12 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute if !s.noSum { b.sum(value) } - b.res.Offer(ctx, t, value, droppedAttr) + b.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) } // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir[N]) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.Reservoir) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, @@ -163,7 +163,7 @@ func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ } @@ -219,7 +219,7 @@ func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].Max = metricdata.NewExtrema(val.max) } - val.res.Collect(&hDPts[i].Exemplars) + collectExemplars(&hDPts[i].Exemplars, val.res.Collect) i++ // TODO (#3006): This will use an unbounded amount of memory if there diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index 73cf98c7..8f406dd2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -18,14 +18,15 @@ type datapoint[N int64 | float64] struct { attrs attribute.Set timestamp time.Time value N - res exemplar.Reservoir[N] + res exemplar.Reservoir } -func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *lastValue[N] { +func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *lastValue[N] { return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), values: make(map[attribute.Distinct]datapoint[N]), + start: now(), } } @@ -33,9 +34,10 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) type lastValue[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.Reservoir limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] + start time.Time } func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { @@ -53,28 +55,108 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. d.attrs = attr d.timestamp = t d.value = value - d.res.Offer(ctx, t, value, droppedAttr) + d.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) s.values[attr.Equivalent()] = d } -func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) { +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = now() + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + s.Lock() defer s.Unlock() + n := s.copyDpts(&gData.DataPoints) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N]) int { n := len(s.values) *dest = reset(*dest, n, n) var i int for _, v := range s.values { (*dest)[i].Attributes = v.attrs - // The event time is the only meaningful timestamp, StartTime is - // ignored. + (*dest)[i].StartTime = s.start (*dest)[i].Time = v.timestamp (*dest)[i].Value = v.value - v.res.Collect(&(*dest)[i].Exemplars) + collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) i++ } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints) // Do not report stale values. clear(s.values) + // Update start time for delta temporality. + s.start = now() + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints) + // Do not report stale values. + clear(s.values) + *dest = gData + + return n } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 7514b95e..babe76ab 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -15,19 +15,19 @@ import ( type sumValue[N int64 | float64] struct { n N - res exemplar.Reservoir[N] + res exemplar.Reservoir attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.Reservoir[N] + newRes func() exemplar.Reservoir limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir[N]) *valueMap[N] { +func newValueMap[N int64 | float64](limit int, r func() exemplar.Reservoir) *valueMap[N] { return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -49,7 +49,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S v.attrs = attr v.n += value - v.res.Offer(ctx, t, value, droppedAttr) + v.res.Offer(ctx, t, exemplar.NewValue(value), droppedAttr) s.values[attr.Equivalent()] = v } @@ -57,7 +57,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -94,7 +94,7 @@ func (s *sum[N]) delta(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = val.n - val.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) i++ } // Do not report stale values. @@ -129,7 +129,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = value.n - value.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, value.res.Collect) // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not @@ -146,7 +146,7 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { // newPrecomputedSum returns an aggregator that summarizes a set of // observatrions as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir[N]) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.Reservoir) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -188,7 +188,7 @@ func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta - value.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, value.res.Collect) newReported[key] = value.n i++ @@ -226,7 +226,7 @@ func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = val.n - val.res.Collect(&dPts[i].Exemplars) + collectExemplars(&dPts[i].Exemplars, val.res.Collect) i++ } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go index 729c2793..bf21e45d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go @@ -8,18 +8,17 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // Drop returns a [Reservoir] that drops all measurements it is offered. -func Drop[N int64 | float64]() Reservoir[N] { return &dropRes[N]{} } +func Drop() Reservoir { return &dropRes{} } -type dropRes[N int64 | float64] struct{} +type dropRes struct{} // Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, time.Time, N, []attribute.KeyValue) {} +func (r *dropRes) Offer(context.Context, time.Time, Value, []attribute.KeyValue) {} // Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]metricdata.Exemplar[N]) { +func (r *dropRes) Collect(dest *[]Exemplar) { *dest = (*dest)[:0] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go new file mode 100644 index 00000000..fcaa6a46 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Exemplar is a measurement sampled from a timeseries providing a typical +// example. +type Exemplar struct { + // FilteredAttributes are the attributes recorded with the measurement but + // filtered out of the timeseries' aggregated data. + FilteredAttributes []attribute.KeyValue + // Time is the time when the measurement was recorded. + Time time.Time + // Value is the measured value. + Value Value + // SpanID is the ID of the span that was active during the measurement. If + // no span was active or the span was not sampled this will be empty. + SpanID []byte `json:",omitempty"` + // TraceID is the ID of the trace the active span belonged to during the + // measurement. If no span was active or the span was not sampled this will + // be empty. + TraceID []byte `json:",omitempty"` +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go index 53c86d5c..d96aacc2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go @@ -14,15 +14,15 @@ import ( // SampledFilter returns a [Reservoir] wrapping r that will only offer measurements // to r if the passed context associated with the measurement contains a sampled // [go.opentelemetry.io/otel/trace.SpanContext]. -func SampledFilter[N int64 | float64](r Reservoir[N]) Reservoir[N] { - return filtered[N]{Reservoir: r} +func SampledFilter(r Reservoir) Reservoir { + return filtered{Reservoir: r} } -type filtered[N int64 | float64] struct { - Reservoir[N] +type filtered struct { + Reservoir } -func (f filtered[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { +func (f filtered) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { if trace.SpanContextFromContext(ctx).IsSampled() { f.Reservoir.Offer(ctx, t, n, a) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go index 463c8a7d..a6ff86d0 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go @@ -17,21 +17,30 @@ import ( // by bounds. // // The passed bounds will be sorted by this function. -func Histogram[N int64 | float64](bounds []float64) Reservoir[N] { +func Histogram(bounds []float64) Reservoir { slices.Sort(bounds) - return &histRes[N]{ + return &histRes{ bounds: bounds, - storage: newStorage[N](len(bounds) + 1), + storage: newStorage(len(bounds) + 1), } } -type histRes[N int64 | float64] struct { - *storage[N] +type histRes struct { + *storage // bounds are bucket bounds in ascending order. bounds []float64 } -func (r *histRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { - r.store[sort.SearchFloat64s(r.bounds, float64(n))] = newMeasurement(ctx, t, n, a) +func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go index 923953cb..6753e116 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go @@ -10,7 +10,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // rng is used to make sampling decisions. @@ -50,14 +49,14 @@ func random() float64 { // are k or less measurements made, the Reservoir will sample each one. If // there are more than k, the Reservoir will then randomly sample all // additional measurement with a decreasing probability. -func FixedSize[N int64 | float64](k int) Reservoir[N] { - r := &randRes[N]{storage: newStorage[N](k)} +func FixedSize(k int) Reservoir { + r := &randRes{storage: newStorage(k)} r.reset() return r } -type randRes[N int64 | float64] struct { - *storage[N] +type randRes struct { + *storage // count is the number of measurement seen. count int64 @@ -69,7 +68,7 @@ type randRes[N int64 | float64] struct { w float64 } -func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute.KeyValue) { +func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December // 1994). "Reservoir-Sampling Algorithms of Time Complexity // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): @@ -125,7 +124,7 @@ func (r *randRes[N]) Offer(ctx context.Context, t time.Time, n N, a []attribute. } // reset resets r to the initial state. -func (r *randRes[N]) reset() { +func (r *randRes) reset() { // This resets the number of exemplars known. r.count = 0 // Random index inserts should only happen after the storage is full. @@ -147,7 +146,7 @@ func (r *randRes[N]) reset() { // advance updates the count at which the offered measurement will overwrite an // existing exemplar. -func (r *randRes[N]) advance() { +func (r *randRes) advance() { // Calculate the next value in the random number series. // // The current value of r.w is based on the max of a distribution of random @@ -174,7 +173,7 @@ func (r *randRes[N]) advance() { r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 } -func (r *randRes[N]) Collect(dest *[]metricdata.Exemplar[N]) { +func (r *randRes) Collect(dest *[]Exemplar) { r.storage.Collect(dest) // Call reset here even though it will reset r.count and restart the random // number series. This will persist any old exemplars as long as no new diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go index a663aa22..80fa5955 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go @@ -8,11 +8,10 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // Reservoir holds the sampled exemplar of measurements made. -type Reservoir[N int64 | float64] interface { +type Reservoir interface { // Offer accepts the parameters associated with a measurement. The // parameters will be stored as an exemplar if the Reservoir decides to // sample the measurement. @@ -24,10 +23,10 @@ type Reservoir[N int64 | float64] interface { // The time t is the time when the measurement was made. The val and attr // parameters are the value and dropped (filtered) attributes of the // measurement respectively. - Offer(ctx context.Context, t time.Time, val N, attr []attribute.KeyValue) + Offer(ctx context.Context, t time.Time, val Value, attr []attribute.KeyValue) // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. - Collect(dest *[]metricdata.Exemplar[N]) + Collect(dest *[]Exemplar) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go index 994ab107..10b2976f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go @@ -8,27 +8,26 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/trace" ) // storage is an exemplar storage for [Reservoir] implementations. -type storage[N int64 | float64] struct { +type storage struct { // store are the measurements sampled. // // This does not use []metricdata.Exemplar because it potentially would // require an allocation for trace and span IDs in the hot path of Offer. - store []measurement[N] + store []measurement } -func newStorage[N int64 | float64](n int) *storage[N] { - return &storage[N]{store: make([]measurement[N], n)} +func newStorage(n int) *storage { + return &storage{store: make([]measurement, n)} } // Collect returns all the held exemplars. // // The Reservoir state is preserved after this call. -func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) { +func (r *storage) Collect(dest *[]Exemplar) { *dest = reset(*dest, len(r.store), len(r.store)) var n int for _, m := range r.store { @@ -43,13 +42,13 @@ func (r *storage[N]) Collect(dest *[]metricdata.Exemplar[N]) { } // measurement is a measurement made by a telemetry system. -type measurement[N int64 | float64] struct { +type measurement struct { // FilteredAttributes are the attributes dropped during the measurement. FilteredAttributes []attribute.KeyValue // Time is the time when the measurement was made. Time time.Time // Value is the value of the measurement. - Value N + Value Value // SpanContext is the SpanContext active when a measurement was made. SpanContext trace.SpanContext @@ -57,8 +56,8 @@ type measurement[N int64 | float64] struct { } // newMeasurement returns a new non-empty Measurement. -func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, droppedAttr []attribute.KeyValue) measurement[N] { - return measurement[N]{ +func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []attribute.KeyValue) measurement { + return measurement{ FilteredAttributes: droppedAttr, Time: ts, Value: v, @@ -67,8 +66,8 @@ func newMeasurement[N int64 | float64](ctx context.Context, ts time.Time, v N, d } } -// Exemplar returns m as a [metricdata.Exemplar]. -func (m measurement[N]) Exemplar(dest *metricdata.Exemplar[N]) { +// Exemplar returns m as an [Exemplar]. +func (m measurement) Exemplar(dest *Exemplar) { dest.FilteredAttributes = m.FilteredAttributes dest.Time = m.Time dest.Value = m.Value diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go new file mode 100644 index 00000000..9daf27dc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + +import "math" + +// ValueType identifies the type of value used in exemplar data. +type ValueType uint8 + +const ( + // UnknownValueType should not be used. It represents a misconfigured + // Value. + UnknownValueType ValueType = 0 + // Int64ValueType represents a Value with int64 data. + Int64ValueType ValueType = 1 + // Float64ValueType represents a Value with float64 data. + Float64ValueType ValueType = 2 +) + +// Value is the value of data held by an exemplar. +type Value struct { + t ValueType + val uint64 +} + +// NewValue returns a new [Value] for the provided value. +func NewValue[N int64 | float64](value N) Value { + switch v := any(value).(type) { + case int64: + return Value{t: Int64ValueType, val: uint64(v)} + case float64: + return Value{t: Float64ValueType, val: math.Float64bits(v)} + } + return Value{} +} + +// Type returns the [ValueType] of data held by v. +func (v Value) Type() ValueType { return v.t } + +// Int64 returns the value of v as an int64. If the ValueType of v is not an +// Int64ValueType, 0 is returned. +func (v Value) Int64() int64 { + if v.t == Int64ValueType { + return int64(v.val) + } + return 0 +} + +// Float64 returns the value of v as an float64. If the ValueType of v is not +// an Float64ValueType, 0 is returned. +func (v Value) Float64() float64 { + if v.t == Float64ValueType { + return math.Float64frombits(v.val) + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index 7840c486..479b7610 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -108,6 +108,21 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return i, validateInstrumentName(name) } +// Int64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of int64 measurements during a computational operation. +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + cfg := metric.NewInt64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := int64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + // int64ObservableInstrument returns a new observable identified by the Instrument. // It registers callbacks for each reader's pipeline. func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int64Callback) (int64Observable, error) { @@ -242,6 +257,21 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return i, validateInstrumentName(name) } +// Float64Gauge returns a new instrument identified by name and configured +// with options. The instrument is used to synchronously record the +// distribution of float64 measurements during a computational operation. +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + cfg := metric.NewFloat64GaugeConfig(options...) + const kind = InstrumentKindGauge + p := float64InstProvider{m} + i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) + if err != nil { + return i, err + } + + return i, validateInstrumentName(name) +} + // float64ObservableInstrument returns a new observable identified by the Instrument. // It registers callbacks for each reader's pipeline. func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Float64Callback) (float64Observable, error) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index c3e2d9cc..c6f95971 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -349,7 +349,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), - ReservoirFunc: reservoirFunc[N](stream.Aggregation), + ReservoirFunc: reservoirFunc(stream.Aggregation), } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -447,7 +447,12 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationDrop: // Return nil in and out to signify the drop aggregator. case AggregationLastValue: - meas, comp = b.LastValue() + switch kind { + case InstrumentKindGauge: + meas, comp = b.LastValue() + case InstrumentKindObservableGauge: + meas, comp = b.PrecomputedLastValue() + } case AggregationSum: switch kind { case InstrumentKindObservableCounter: @@ -464,7 +469,7 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationExplicitBucketHistogram: var noSum bool switch kind { - case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: // The sum should not be collected for any instrument that can make // negative measurements: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations @@ -474,7 +479,7 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationBase2ExponentialHistogram: var noSum bool switch kind { - case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: + case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, InstrumentKindGauge: // The sum should not be collected for any instrument that can make // negative measurements: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations @@ -497,6 +502,7 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin // | Counter | ✓ | | ✓ | ✓ | ✓ | // | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | // | Histogram | ✓ | | ✓ | ✓ | ✓ | +// | Gauge | ✓ | ✓ | | ✓ | ✓ | // | Observable Counter | ✓ | | ✓ | ✓ | ✓ | // | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | // | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. @@ -509,6 +515,7 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindHistogram, + InstrumentKindGauge, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: @@ -526,7 +533,8 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { return errIncompatibleAggregation } case AggregationLastValue: - if kind == InstrumentKindObservableGauge { + switch kind { + case InstrumentKindObservableGauge, InstrumentKindGauge: return nil } // TODO: review need for aggregation check after diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index 9f900130..a55f9a53 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -148,7 +148,7 @@ func DefaultAggregationSelector(ik InstrumentKind) Aggregation { switch ik { case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: return AggregationSum{} - case InstrumentKindObservableGauge: + case InstrumentKindObservableGauge, InstrumentKindGauge: return AggregationLastValue{} case InstrumentKindHistogram: return AggregationExplicitBucketHistogram{ @@ -166,7 +166,7 @@ type ReaderOption interface { ManualReaderOption } -// WithProducers registers producers as an external Producer of metric data +// WithProducer registers producers as an external Producer of metric data // for this Reader. func WithProducer(p Producer) ReaderOption { return producerOption{p: p} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index e413f910..43f85cfb 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.26.0" + return "1.27.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 488cabc4..50d2df5e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go index f3eeb45a..7525ee75 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -11,7 +11,7 @@ import ( "os" "regexp" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) type containerIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 7b221c70..0d5a355a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) const ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go index 5acbec23..3c1aa628 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -8,7 +8,7 @@ import ( "errors" "strings" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) type hostIDProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index cf0165a6..ff78020f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 8ba4e9a4..e4e1df8c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -11,7 +11,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) type ( diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index c44f6b92..f0221eaa 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -630,7 +630,11 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() || !link.SpanContext.IsValid() { + if !s.IsRecording() { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { return } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index ec425157..f0d8fc51 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.26.0" + return "1.27.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md deleted file mode 100644 index 0b6cbe96..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.24.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go deleted file mode 100644 index d66bbe9c..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ /dev/null @@ -1,2545 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index c1718234..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md new file mode 100644 index 00000000..980fcc7d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.25.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.25.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.25.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go similarity index 50% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go index 6e688345..30a51fa7 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/attribute_group.go @@ -3,103 +3,10 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" import "go.opentelemetry.io/otel/attribute" -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - // Attributes for Events represented using Log Records. const ( // EventNameKey is the attribute Key conforming to the "event.name" @@ -111,7 +18,7 @@ const ( // Stability: experimental // Examples: 'browser.mouse.click', 'device.app.lifecycle' // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.31.0/specification/common/attribute-naming.md). // Notably, event names are namespaced to avoid collisions and provide a // clean separation of semantics for events in separate domains like // browser, mobile, and kubernetes. @@ -245,9 +152,9 @@ const ( // PoolNameKey is the attribute Key conforming to the "pool.name" semantic // conventions. It represents the name of the connection pool; unique // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used + // implementation doesn't provide a name, instrumentation should use a + // combination of `server.address` and `server.port` attributes formatted + // as `server.address:server.port`. // // Type: string // RequirementLevel: Required @@ -275,15 +182,26 @@ var ( // PoolName returns an attribute KeyValue conforming to the "pool.name" // semantic conventions. It represents the name of the connection pool; unique // within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used +// implementation doesn't provide a name, instrumentation should use a +// combination of `server.address` and `server.port` attributes formatted as +// `server.address:server.port`. func PoolName(val string) attribute.KeyValue { return PoolNameKey.String(val) } // ASP.NET Core attributes const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to // the "aspnetcore.diagnostics.handler.type" semantic conventions. It // represents the full type name of the @@ -293,7 +211,7 @@ const ( // Type: string // RequirementLevel: ConditionallyRequired (if and only if the exception // was handled by this handler.) - // Stability: experimental + // Stability: stable // Examples: 'Contoso.MyHandler' AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") @@ -304,21 +222,10 @@ const ( // Type: string // RequirementLevel: ConditionallyRequired (if the matched endpoint for the // request had a rate-limiting policy.) - // Stability: experimental + // Stability: stable // Examples: 'fixed', 'sliding', 'token' AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the // "aspnetcore.request.is_unhandled" semantic conventions. It represents // the flag indicating if request was handled by the application pipeline. @@ -326,7 +233,7 @@ const ( // Type: boolean // RequirementLevel: ConditionallyRequired (if and only if the request was // not handled.) - // Stability: experimental + // Stability: stable // Examples: True AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") @@ -337,7 +244,7 @@ const ( // Type: boolean // RequirementLevel: ConditionallyRequired (If and only if a route was // successfully matched.) - // Stability: experimental + // Stability: stable // Examples: True AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") ) @@ -391,7 +298,7 @@ const ( // // Type: Enum // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'app_shutdown', 'timeout' SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") @@ -402,7 +309,7 @@ const ( // // Type: Enum // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'web_sockets', 'long_polling' SignalrTransportKey = attribute.Key("signalr.transport") ) @@ -486,6 +393,29 @@ func JvmMemoryPoolName(val string) attribute.KeyValue { return JvmMemoryPoolNameKey.String(val) } +// Attributes for process CPU metrics. +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state + // for this data point. A process SHOULD be characterized _either_ by data + // points with no `state` labels, _or only_ data points with `state` + // labels. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + // Describes System metric attributes const ( // SystemDeviceKey is the attribute Key conforming to the "system.device" @@ -517,8 +447,10 @@ const ( SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU + // "system.cpu.state" semantic conventions. It represents the CPU state for + // this data point. A system's CPU SHOULD be characterized *either* by data + // points with no `state` labels, *or only* data points with `state` + // labels. // // Type: Enum // RequirementLevel: Optional @@ -754,522 +686,1691 @@ var ( // Describes System Process metric attributes const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) // // Type: Enum // RequirementLevel: Optional // Stability: experimental // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") + SystemProcessStatusKey = attribute.Key("system.process.status") ) var ( // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") ) -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +// The Android platform on which the Android application is running. const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). // // Type: string // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") ) -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) } -// The attributes used to describe telemetry in the context of databases. +// Attributes for AWS DynamoDB. const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. // - // Type: Enum + // Type: string[] // RequirementLevel: Optional // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. // - // Type: string + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. // - // Type: string + // Type: boolean // RequirementLevel: Optional // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. // - // Type: boolean + // Type: string[] // RequirementLevel: Optional // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. // - // Type: int + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. // - // Type: string + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field // - // Type: string + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. // - // Type: Enum + // Type: string // RequirementLevel: Optional // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. // - // Type: Enum + // Type: double // RequirementLevel: Optional // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. // - // Type: int + // Type: boolean // RequirementLevel: Optional // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. // - // Type: string + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. // - // Type: string + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. // - // Type: string + // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space // - // Type: string + // Type: string[] // RequirementLevel: Optional // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device // - // Type: string + // Type: boolean // RequirementLevel: Optional // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. // // Type: int // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: experimental - DBSystemKey = attribute.Key("db.system") + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") ) var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assinged by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") ) var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// The attributes used to describe telemetry in the context of databases. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary Cassandra table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" + // semantic conventions. It represents an identifier (address, unique name, + // or any other identifier) of the database instance that is executing + // queries or mutations on the current connection. This is useful in cases + // where the database is running in a clustered environment and the + // instrumentation is able to record the node executing the query. The + // client may obtain this value in databases like MySQL using queries like + // `select @@hostname`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + DBInstanceIDKey = attribute.Key("db.instance.id") + + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the MongoDB + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") + + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") + + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") + + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBSystemKey = attribute.Key("db.system") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB @@ -1332,864 +2433,2001 @@ var ( DBSystemTrino = DBSystemKey.String("trino") ) -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary Cassandra table that the operation is acting upon, including the +// keyspace name (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBInstanceID returns an attribute KeyValue conforming to the +// "db.instance.id" semantic conventions. It represents an identifier (address, +// unique name, or any other identifier) of the database instance that is +// executing queries or mutations on the current connection. This is useful in +// cases where the database is running in a clustered environment and the +// instrumentation is able to record the node executing the query. The client +// may obtain this value in databases like MySQL using queries like `select +// @@hostname`. +func DBInstanceID(val string) attribute.KeyValue { + return DBInstanceIDKey.String(val) +} + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the MongoDB +// collection being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// "Describes deprecated db attributes." +const ( + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // deprecated, use `server.address`, `server.port` attributes instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // deprecated, use `db.instance.id` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // removed, no replacement at this time. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the deprecated, +// use `server.address`, `server.port` attributes instead. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// deprecated, use `db.instance.id` instead. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the removed, +// no replacement at this time. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// Describes deprecated HTTP attributes. +const ( + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the deprecated, use + // `network.protocol.name` instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the deprecated, use + // `http.request.method` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // deprecated, use `http.request.header.content-length` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // deprecated, use `http.response.header.content-length` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the deprecated, use `url.scheme` + // instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the deprecated, + // use `http.response.status_code` instead. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the deprecated, use `url.path` and + // `url.query` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/search?q=OpenTelemetry#SemConv' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the deprecated, use `url.full` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + HTTPURLKey = attribute.Key("http.url") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the deprecated, + // use `user_agent.original` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + HTTPUserAgentKey = attribute.Key("http.user_agent") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the deprecated, use +// `http.request.method` instead. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the +// deprecated, use `http.request.header.content-length` instead. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the +// deprecated, use `http.response.header.content-length` instead. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the deprecated, use `url.scheme` +// instead. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the deprecated, use +// `http.response.status_code` instead. +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the deprecated, use `url.path` and +// `url.query` instead. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the deprecated, use `url.full` instead. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the deprecated, use +// `user_agent.original` instead. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// Describes deprecated messaging attributes. +const ( + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the "Deprecated, use `messaging.destination.partition.id` + // instead." + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") +) + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the "Deprecated, use +// `messaging.destination.partition.id` instead." +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the deprecated, use + // `server.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the deprecated, use `server.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the deprecated, use `server.address` + // on client spans and `client.address` on server spans. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the deprecated, use `server.port` on + // client spans and `client.port` on server spans. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 8080 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the deprecated, + // use `network.protocol.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the + // deprecated, use `network.protocol.version`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3.1.1' + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the deprecated, + // use `network.transport` and `network.type`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the deprecated, + // use `network.local.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/my.sock' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the deprecated, + // use `network.local.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 8080 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the deprecated, + // use `network.peer.address`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.0.1' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the deprecated, + // no replacement at this time. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/my.sock' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the deprecated, + // use `network.peer.port`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 65531 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the deprecated, use + // `network.transport`. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + NetTransportKey = attribute.Key("net.transport") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the deprecated, use +// `server.address`. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) } -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the deprecated, use +// `server.port`. +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) } -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the deprecated, use +// `server.address` on client spans and `client.address` on server spans. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) } -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the deprecated, use +// `server.port` on client spans and `client.port` on server spans. +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) } -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the deprecated, use +// `network.protocol.name`. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) } -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the deprecated, +// use `network.protocol.version`. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) } -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the deprecated, use +// `network.local.address`. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) } -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the deprecated, use +// `network.local.port`. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) } -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the deprecated, use +// `network.peer.address`. +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) } -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the deprecated, no +// replacement at this time. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) } -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the deprecated, use +// `network.peer.port`. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) } -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} +// Deprecated system attributes. +const ( + // SystemProcessesStatusKey is the attribute Key conforming to the + // "system.processes.status" semantic conventions. It represents the + // deprecated, use `system.process.status` instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessesStatusKey = attribute.Key("system.processes.status") +) -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} +var ( + // running + SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") + // sleeping + SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") + // stopped + SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") + // defunct + SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") +) -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) } -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) } -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) } -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) } -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) } -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) } -// Describes deprecated HTTP attributes. +// These attributes may be used for any disk related operation. const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. // // Type: Enum // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. // - // Type: int + // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. // - // Type: string + // Type: Enum // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable and SHOULD have low + // cardinality. + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") ) var ( - // HTTP/1.0 + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](#recording-an-exception). // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") ) -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) } -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) } -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) } -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) } -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. // - // Type: int + // Type: Enum // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. // - // Type: int + // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. // // Type: Enum // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. // // Type: int // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. // - // Type: int + // Type: Enum // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. // - // Type: Enum + // Type: string // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") ) var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ) -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) } -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) } -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) } -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) } -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) } -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) } -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) } -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) } -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. +// Attributes for Google Cloud Run. const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. // // Type: int // RequirementLevel: Optional // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") ) -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) } -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) } -// These attributes may be used for any disk related operation. +// Attributes for Google Compute Engine (GCE). const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). // - // Type: Enum + // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") ) -// The shared attributes used to report an error. +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. // // Type: Enum // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. // - // Type: boolean + // Type: string // RequirementLevel: Optional // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") ) -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) } -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) } -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) } -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) } // Semantic convention attributes in the HTTP namespace. const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + // HTTPRequestBodySizeKey is the attribute Key conforming to the // "http.request.body.size" semantic conventions. It represents the size of // the request payload body in bytes. This is the number of bytes @@ -2265,6 +4503,18 @@ const ( // network issues, or any other). HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + // HTTPResponseBodySizeKey is the attribute Key conforming to the // "http.response.body.size" semantic conventions. It represents the size // of the response payload body in bytes. This is the number of bytes @@ -2280,6 +4530,18 @@ const ( // Examples: 3495 HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + // HTTPResponseStatusCodeKey is the attribute Key conforming to the // "http.response.status_code" semantic conventions. It represents the // [HTTP response status @@ -2307,6 +4569,13 @@ const ( HTTPRouteKey = attribute.Key("http.route") ) +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + var ( // CONNECT method HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") @@ -2330,54 +4599,449 @@ var ( HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") ) -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) } -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) } -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) } -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) } -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) } -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) } // Attributes describing telemetry around messaging systems and messaging @@ -2433,6 +5097,17 @@ const ( // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + // MessagingDestinationTemplateKey is the attribute Key conforming to the // "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name @@ -2483,6 +5158,28 @@ const ( // SHOULD uniquely identify the broker. MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. // It represents the ordering key for a given message. If the attribute is @@ -2505,16 +5202,6 @@ const ( // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure @@ -2618,6 +5305,16 @@ const ( // Examples: 'myKey' MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the // "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the @@ -2713,6 +5410,49 @@ const ( // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") + // MessagingSystemKey is the attribute Key conforming to the // "messaging.system" semantic conventions. It represents an identifier for // the messaging system being used. See below for a list of well-known @@ -2731,8 +5471,10 @@ var ( MessagingOperationCreate = MessagingOperationKey.String("create") // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") + // One or more messages are delivered to or processed by a consumer + MessagingOperationDeliver = MessagingOperationKey.String("process") + // One or more messages are settled + MessagingOperationSettle = MessagingOperationKey.String("settle") ) var ( @@ -2753,17 +5495,28 @@ var ( MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + var ( // Apache ActiveMQ MessagingSystemActivemq = MessagingSystemKey.String("activemq") // Amazon Simple Queue Service (SQS) MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") // Google Cloud Pub/Sub MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") // Java Message Service @@ -2806,6 +5559,14 @@ func MessagingDestinationName(val string) attribute.KeyValue { return MessagingDestinationNameKey.String(val) } +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + // MessagingDestinationTemplate returns an attribute KeyValue conforming to // the "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name @@ -2836,6 +5597,22 @@ func MessagingDestinationPublishName(val string) attribute.KeyValue { return MessagingDestinationPublishNameKey.String(val) } +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + // MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue // conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic // conventions. It represents the ordering key for a given message. If the @@ -2852,13 +5629,6 @@ func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { return MessagingKafkaConsumerGroupKey.String(val) } -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - // MessagingKafkaMessageKey returns an attribute KeyValue conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure they're @@ -2920,6 +5690,13 @@ func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { return MessagingRabbitmqDestinationRoutingKeyKey.String(val) } +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the @@ -2975,6 +5752,31 @@ func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + // These attributes may be used for any network related operation. const ( // NetworkCarrierIccKey is the attribute Key conforming to the @@ -3106,16 +5908,16 @@ const ( // NetworkProtocolVersionKey is the attribute Key conforming to the // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. + // actual version of the protocol used for network communication. // // Type: string // RequirementLevel: Optional // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. NetworkProtocolVersionKey = attribute.Key("network.protocol.version") // NetworkTransportKey is the attribute Key conforming to the @@ -3273,33 +6075,390 @@ func NetworkLocalPort(val int) attribute.KeyValue { return NetworkLocalPortKey.Int(val) } -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) } -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) } -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) } -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) } // Attributes for remote procedure calls. @@ -3591,6 +6750,180 @@ func ServerPort(val int) attribute.KeyValue { return ServerPortKey.Int(val) } +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + // These attributes may be used to describe the sender of a network // exchange/packet. These should be used when there is no client/server // relationship between the two sides, or when that relationship is unknown. @@ -3639,6 +6972,165 @@ func SourcePort(val int) attribute.KeyValue { return SourcePortKey.Int(val) } +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + // Semantic convention attributes in the TLS namespace. const ( // TLSCipherKey is the attribute Key conforming to the "tls.cipher" @@ -4213,6 +7705,37 @@ func TLSServerSubject(val string) attribute.KeyValue { // Attributes describing URL. const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + // URLFragmentKey is the attribute Key conforming to the "url.fragment" // semantic conventions. It represents the [URI // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component @@ -4241,10 +7764,28 @@ const ( // password SHOULD be redacted and attribute's value SHOULD be // `https://REDACTED:REDACTED@www.example.com/`. // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. URLFullKey = attribute.Key("url.full") + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + // URLPathKey is the attribute Key conforming to the "url.path" semantic // conventions. It represents the [URI // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component @@ -4253,8 +7794,19 @@ const ( // RequirementLevel: Optional // Stability: stable // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. URLPathKey = attribute.Key("url.path") + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + // URLQueryKey is the attribute Key conforming to the "url.query" semantic // conventions. It represents the [URI // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component @@ -4263,10 +7815,24 @@ const ( // RequirementLevel: Optional // Stability: stable // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when // instrumentations can identify it. URLQueryKey = attribute.Key("url.query") + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + // URLSchemeKey is the attribute Key conforming to the "url.scheme" // semantic conventions. It represents the [URI // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component @@ -4277,8 +7843,53 @@ const ( // Stability: stable // Examples: 'https', 'ftp', 'telnet' URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") ) +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + // URLFragment returns an attribute KeyValue conforming to the // "url.fragment" semantic conventions. It represents the [URI // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component @@ -4293,6 +7904,13 @@ func URLFull(val string) attribute.KeyValue { return URLFullKey.String(val) } +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + // URLPath returns an attribute KeyValue conforming to the "url.path" // semantic conventions. It represents the [URI // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component @@ -4300,6 +7918,12 @@ func URLPath(val string) attribute.KeyValue { return URLPathKey.String(val) } +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + // URLQuery returns an attribute KeyValue conforming to the "url.query" // semantic conventions. It represents the [URI // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component @@ -4307,6 +7931,13 @@ func URLQuery(val string) attribute.KeyValue { return URLQueryKey.String(val) } +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + // URLScheme returns an attribute KeyValue conforming to the "url.scheme" // semantic conventions. It represents the [URI // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component @@ -4315,8 +7946,42 @@ func URLScheme(val string) attribute.KeyValue { return URLSchemeKey.String(val) } +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + // Describes user-agent attributes. const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + // UserAgentOriginalKey is the attribute Key conforming to the // "user_agent.original" semantic conventions. It represents the value of // the [HTTP @@ -4328,10 +7993,34 @@ const ( // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") ) +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + // UserAgentOriginal returns an attribute KeyValue conforming to the // "user_agent.original" semantic conventions. It represents the value of the // [HTTP @@ -4341,47 +8030,9 @@ func UserAgentOriginal(val string) attribute.KeyValue { return UserAgentOriginalKey.String(val) } -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/doc.go similarity index 96% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.25.0/doc.go index d27e8a8f..facbdf5f 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 +// patterns for OpenTelemetry things. This package represents the v1.25.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/event.go similarity index 67% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.25.0/event.go index 6c019aaf..4f0ccb44 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/event.go @@ -3,7 +3,7 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" import "go.opentelemetry.io/otel/attribute" @@ -62,72 +62,6 @@ var ( AndroidStateForeground = AndroidStateKey.String("foreground") ) -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - // RPC received/sent message. const ( // MessageCompressedSizeKey is the attribute Key conforming to the diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/exception.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.25.0/exception.go index 7235bb51..e36ab1e3 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/metric.go similarity index 86% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.25.0/metric.go index a6b953f6..1c8b5bde 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/metric.go @@ -3,10 +3,47 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" const ( + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + // DBClientConnectionsUsage is the metric conforming to the // "db.client.connections.usage" semantic conventions. It represents the number // of connections that are currently in state described by the `state` @@ -100,12 +137,22 @@ const ( DBClientConnectionsUseTimeUnit = "ms" DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + // AspnetcoreRoutingMatchAttempts is the metric conforming to the // "aspnetcore.routing.match_attempts" semantic conventions. It represents the // number of requests that were attempted to be matched to an endpoint. // Instrument: counter // Unit: {match_attempt} - // Stability: Experimental + // Stability: Stable AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." @@ -115,7 +162,7 @@ const ( // number of exceptions caught by exception handling middleware. // Instrument: counter // Unit: {exception} - // Stability: Experimental + // Stability: Stable AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" AspnetcoreDiagnosticsExceptionsUnit = "{exception}" AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." @@ -126,7 +173,7 @@ const ( // that hold a rate limiting lease. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." @@ -137,7 +184,7 @@ const ( // server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." @@ -148,7 +195,7 @@ const ( // limiting lease. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." @@ -159,7 +206,7 @@ const ( // acquire a rate limiting lease. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." @@ -169,69 +216,17 @@ const ( // number of requests that tried to acquire a rate limiting lease. // Instrument: counter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" AspnetcoreRateLimitingRequestsUnit = "{request}" AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // HTTPClientRequestTimeInQueue is the metric conforming to the - // "http.client.request.time_in_queue" semantic conventions. It represents the - // amount of time requests spent on a queue waiting for an available - // connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" - HTTPClientRequestTimeInQueueUnit = "s" - HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." - // KestrelActiveConnections is the metric conforming to the // "kestrel.active_connections" semantic conventions. It represents the number // of connections that are currently active on the server. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelActiveConnectionsName = "kestrel.active_connections" KestrelActiveConnectionsUnit = "{connection}" KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." @@ -241,7 +236,7 @@ const ( // duration of connections on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable KestrelConnectionDurationName = "kestrel.connection.duration" KestrelConnectionDurationUnit = "s" KestrelConnectionDurationDescription = "The duration of connections on the server." @@ -251,7 +246,7 @@ const ( // number of connections rejected by the server. // Instrument: counter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelRejectedConnectionsName = "kestrel.rejected_connections" KestrelRejectedConnectionsUnit = "{connection}" KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." @@ -261,7 +256,7 @@ const ( // of connections that are currently queued and are waiting to start. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelQueuedConnectionsName = "kestrel.queued_connections" KestrelQueuedConnectionsUnit = "{connection}" KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." @@ -272,7 +267,7 @@ const ( // currently queued and are waiting to start. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable KestrelQueuedRequestsName = "kestrel.queued_requests" KestrelQueuedRequestsUnit = "{request}" KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." @@ -282,7 +277,7 @@ const ( // number of connections that are currently upgraded (WebSockets). . // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" KestrelUpgradedConnectionsUnit = "{connection}" KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." @@ -292,7 +287,7 @@ const ( // duration of TLS handshakes on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" KestrelTLSHandshakeDurationUnit = "s" KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." @@ -302,7 +297,7 @@ const ( // number of TLS handshakes that are currently in progress on the server. // Instrument: updowncounter // Unit: {handshake} - // Stability: Experimental + // Stability: Stable KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" KestrelActiveTLSHandshakesUnit = "{handshake}" KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." @@ -312,7 +307,7 @@ const ( // duration of connections on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable SignalrServerConnectionDurationName = "signalr.server.connection.duration" SignalrServerConnectionDurationUnit = "s" SignalrServerConnectionDurationDescription = "The duration of connections on the server." @@ -322,7 +317,7 @@ const ( // number of connections that are currently active on the server. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable SignalrServerActiveConnectionsName = "signalr.server.active_connections" SignalrServerActiveConnectionsUnit = "{connection}" SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." @@ -481,6 +476,37 @@ const ( HTTPClientResponseBodySizeUnit = "By" HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic // conventions. It represents the measure of initial memory requested. // Instrument: updowncounter @@ -673,15 +699,15 @@ const ( MessagingReceiveDurationUnit = "s" MessagingReceiveDurationDescription = "Measures the duration of receive operation." - // MessagingDeliverDuration is the metric conforming to the - // "messaging.deliver.duration" semantic conventions. It represents the - // measures the duration of deliver operation. + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. // Instrument: histogram // Unit: s // Stability: Experimental - MessagingDeliverDurationName = "messaging.deliver.duration" - MessagingDeliverDurationUnit = "s" - MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." // MessagingPublishMessages is the metric conforming to the // "messaging.publish.messages" semantic conventions. It represents the @@ -703,15 +729,112 @@ const ( MessagingReceiveMessagesUnit = "{message}" MessagingReceiveMessagesDescription = "Measures the number of received messages." - // MessagingDeliverMessages is the metric conforming to the - // "messaging.deliver.messages" semantic conventions. It represents the - // measures the number of delivered messages. + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. // Instrument: counter // Unit: {message} // Stability: Experimental - MessagingDeliverMessagesName = "messaging.deliver.messages" - MessagingDeliverMessagesUnit = "{message}" - MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." // RPCServerDuration is the metric conforming to the "rpc.server.duration" // semantic conventions. It represents the measures the duration of inbound @@ -1038,25 +1161,25 @@ const ( SystemNetworkConnectionsName = "system.network.connections" SystemNetworkConnectionsUnit = "{connection}" - // SystemProcessesCount is the metric conforming to the - // "system.processes.count" semantic conventions. It represents the total - // number of processes in each state. + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. // Instrument: updowncounter // Unit: {process} // Stability: Experimental - SystemProcessesCountName = "system.processes.count" - SystemProcessesCountUnit = "{process}" - SystemProcessesCountDescription = "Total number of processes in each state" + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" - // SystemProcessesCreated is the metric conforming to the - // "system.processes.created" semantic conventions. It represents the total + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total // number of processes created over uptime of the host. // Instrument: counter // Unit: {process} // Stability: Experimental - SystemProcessesCreatedName = "system.processes.created" - SystemProcessesCreatedUnit = "{process}" - SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" // SystemLinuxMemoryAvailable is the metric conforming to the // "system.linux.memory.available" semantic conventions. It represents an diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/resource.go new file mode 100644 index 00000000..13affea0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/resource.go @@ -0,0 +1,429 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" + +import "go.opentelemetry.io/otel/attribute" + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the none + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the none + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the none +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the none +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/schema.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.25.0/schema.go index fe80b173..58443115 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.25.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/trace.go new file mode 100644 index 00000000..5c5ede22 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.25.0/trace.go @@ -0,0 +1,393 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.25.0" + +import "go.opentelemetry.io/otel/attribute" + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ef97d30f..102f2f50 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.26.0" + return "1.27.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index ecd2734e..60985f43 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.26.0 + version: v1.27.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,12 +29,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.48.0 + version: v0.49.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.2.0-alpha + version: v0.3.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -46,3 +46,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e85..9b4de940 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7..105c3b27 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40..c5d08108 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -732,11 +732,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -1482,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c..2fa49490 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -936,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 3543268f..e7d3805e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/api/httpbody.proto package httpbody diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 7bd161e4..3e562182 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/error_details.proto package errdetails diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index a6b50818..6ad1b1c1 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v4.24.4 // source: google/rpc/status.proto package status diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 608aa6e1..0854d298 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `./scripts/vet.sh` to catch vet errors - `go test -cpu 1,4 -timeout 7m ./...` to run the tests - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index c6672c0a..6a8a0778 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,6 +9,7 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [atollena](https://github.com/atollena), Datadog, Inc. - [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index 1f896092..be38384f 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -30,17 +30,20 @@ testdeps: GO111MODULE=on go get -d -v -t google.golang.org/grpc/... vet: vetdeps - ./vet.sh + ./scripts/vet.sh vetdeps: - ./vet.sh -install + ./scripts/vet.sh -install .PHONY: \ all \ build \ clean \ + deps \ proto \ test \ + testsubmodule \ testrace \ + testdeps \ vet \ vetdeps diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 856c75dd..1afb1e84 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc v4.25.2 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index c7f26071..2359f94b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -37,7 +37,6 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" - "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -121,8 +120,9 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns // resolver, a "dns:///" prefix should be applied to the target. // -// The DialOptions returned by WithBlock, WithTimeout, and -// WithReturnConnectionError are ignored by this function. +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, @@ -196,6 +196,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } // Dial calls DialContext(context.Background(), target, opts...). +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } @@ -209,6 +211,8 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // "passthrough" for backward compatibility. This distinction should not matter // to most users, but could matter to legacy users that specify a custom dialer // and expect it to receive the target string directly. +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. @@ -838,6 +842,9 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel created", @@ -929,10 +936,14 @@ func equalAddresses(a, b []resolver.Address) bool { // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - ac.mu.Lock() - channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) - addrs = copyAddressesWithoutBalancerAttributes(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1167,6 +1178,10 @@ type addrConn struct { // is received, transport is closed, ac has been torn down). transport transport.ClientTransport // The current transport. + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. mu sync.Mutex curAddr resolver.Address // The current address. addrs []resolver.Address // All addresses that the resolver resolved to. @@ -1292,6 +1307,7 @@ func (ac *addrConn) resetTransport() { func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) if ctx.Err() != nil { return errConnClosing } @@ -1739,7 +1755,7 @@ func encodeAuthority(authority string) string { return false case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters return false - case ':', '[', ']', '@': // Authority related delimeters + case ':', '[', ']', '@': // Authority related delimiters return false } // Everything else must be escaped. diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100644 index 4cdc6ba7..00000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 08476ad1..0b42c302 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -235,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= _maxCode { - return fmt.Errorf("invalid code: %q", ci) + return fmt.Errorf("invalid code: %d", ci) } *c = Code(ci) diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index f6b55c68..665e790b 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -30,7 +30,7 @@ import ( "google.golang.org/grpc/attributes" icredentials "google.golang.org/grpc/internal/credentials" - "google.golang.org/protobuf/protoadapt" + "google.golang.org/protobuf/proto" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { } // CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. -// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method // or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. // // This API is experimental. @@ -287,5 +287,5 @@ type ChannelzSecurityValue interface { type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string - Value protoadapt.MessageV1 + Value proto.Message } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 40249322..00273702 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -300,6 +300,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -314,10 +317,8 @@ func WithBlock() DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithReturnConnectionError() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -387,8 +388,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext instead of Dial and context.WithTimeout -// instead. Will be supported throughout 1.x. +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -470,9 +471,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // Use of this feature is not recommended. For more information, please see: // https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a // later release. func FailOnNonTempDialError(f bool) DialOption { return newFuncDialOption(func(o *dialOptions) { @@ -601,12 +601,22 @@ func WithDisableRetry() DialOption { }) } +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + // WithMaxHeaderListSize returns a DialOption that specifies the maximum // (uncompressed) size of header list that the client is prepared to accept. func WithMaxHeaderListSize(s uint32) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.copts.MaxHeaderListSize = &s - }) + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } } // WithDisableHealthCheck disables the LB channel health checking for all @@ -648,7 +658,7 @@ func defaultDialOptions() dialOptions { } } -// withGetMinConnectDeadline specifies the function that clientconn uses to +// withMinConnectDeadline specifies the function that clientconn uses to // get minConnectDeadline. This can be used to make connection attempts happen // faster/slower. // diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 5bf880d4..6a93475a 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 +// protoc-gen-go v1.33.0 // protoc v4.25.2 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 4c46c098..8f793e6e 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -81,8 +81,9 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { } func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...) if err != nil { return nil, err } @@ -90,11 +91,12 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . } func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{stream} + x := &healthWatchClient{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -198,7 +200,7 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) + return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) } type Health_WatchServer interface { diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 6bf7f873..13821a92 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -75,7 +75,6 @@ func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) if err != nil { return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) } - return &lbConfig{childBuilder: builder, childConfig: cfg}, nil } diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 45d5e50e..73bb4c4e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -169,7 +169,6 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper { func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() - gsbCfg, ok := state.BalancerConfig.(*lbConfig) if ok { // Switch to the child in the config unless it is already active. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index e8456a77..aa4505a8 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -65,7 +65,7 @@ type TruncatingMethodLogger struct { callID uint64 idWithinCallGen *callIDGenerator - sink Sink // TODO(blog): make this plugable. + sink Sink // TODO(blog): make this pluggable. } // NewTruncatingMethodLogger returns a new truncating method logger. @@ -80,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { callID: idGen.next(), idWithinCallGen: &callIDGenerator{}, - sink: DefaultSink, // TODO(blog): make it plugable. + sink: DefaultSink, // TODO(blog): make it pluggable. } } @@ -397,7 +397,7 @@ func metadataKeyOmit(key string) bool { switch key { case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": return true - case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. return false } return strings.HasPrefix(key, "grpc-") diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 685a3cb4..9c915d9e 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -28,9 +28,6 @@ import ( var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) - // AdvertiseCompressors is set if registered compressor should be advertised - // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) // RingHashCap indicates the maximum ring size which defaults to 4096 // entries but may be overridden by setting the environment variable // "GRPC_RING_HASH_CAP". This does not override the default bounds diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go index 9f409096..e8d86698 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -20,8 +20,6 @@ package grpcutil import ( "strings" - - "google.golang.org/grpc/internal/envconfig" ) // RegisteredCompressorNames holds names of the registered compressors. @@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool { // RegisteredCompressors returns a string of registered compressor names // separated by comma. func RegisteredCompressors() string { - if !envconfig.AdvertiseCompressors { - return "" - } return strings.Join(RegisteredCompressorNames, ",") } diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index abab35e2..f3f52a59 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -41,18 +41,24 @@ import ( "google.golang.org/grpc/serviceconfig" ) -// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB -// addresses from SRV records. Must not be changed after init time. -var EnableSRVLookups = false - -// ResolvingTimeout specifies the maximum duration for a DNS resolution request. -// If the timeout expires before a response is received, the request will be canceled. -// -// It is recommended to set this value at application startup. Avoid modifying this variable -// after initialization as it's not thread-safe for concurrent modification. -var ResolvingTimeout = 30 * time.Second - -var logger = grpclog.Component("dns") +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false + + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) func init() { resolver.Register(NewBuilder()) @@ -208,7 +214,7 @@ func (d *dnsResolver) watcher() { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - waitTime = internal.MinResolutionRate + waitTime = MinResolutionInterval select { case <-d.ctx.Done(): return diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go index c7fc557d..a7ecaf8d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -28,7 +28,7 @@ import ( // NetResolver groups the methods on net.Resolver that are used by the DNS // resolver implementation. This allows the default net.Resolver instance to be -// overidden from tests. +// overridden from tests. type NetResolver interface { LookupHost(ctx context.Context, host string) (addrs []string, err error) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) @@ -50,10 +50,6 @@ var ( // The following vars are overridden from tests. var ( - // MinResolutionRate is the minimum rate at which re-resolutions are - // allowed. This helps to prevent excessive re-resolution. - MinResolutionRate = 30 * time.Second - // TimeAfterFunc is used by the DNS resolver to wait for the given duration // to elapse. In non-test code, this is implemented by time.After. In test // code, this can be used to control the amount of time the resolver is diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 83c38298..3deadfb4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -193,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn error // if set, loopyWriter will exit, resulting in conn closure + closeConn error // if set, loopyWriter will exit with this error } func (*goAway) isTransportResponseFrame() bool { return false } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -344,7 +344,7 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err return false, c.err } if f != nil { - if !f(it) { // f wasn't successful + if !f() { // f wasn't successful c.mu.Unlock() return false, nil } @@ -495,21 +495,22 @@ type loopyWriter struct { ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ - side: s, - cbuf: cbuf, - sendQuota: defaultWindowSize, - oiws: defaultWindowSize, - estdStreams: make(map[uint32]*outStream), - activeStreams: newOutStreamList(), - framer: fr, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - bdpEst: bdpEst, - conn: conn, - logger: logger, + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, } return l } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index deba0c4d..3c63c706 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -114,11 +114,11 @@ type http2Client struct { streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 - nextID uint32 registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables + nextID uint32 state transportState activeStreams map[uint32]*Stream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. @@ -408,10 +408,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerErrCh := make(chan error, 1) go t.reader(readerErrCh) defer func() { - if err == nil { - err = <-readerErrCh - } if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) t.Close(err) } }() @@ -458,8 +458,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if err := t.framer.writer.Flush(); err != nil { return nil, err } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -517,6 +521,17 @@ func (t *http2Client) getPeer() *peer.Peer { } } +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + return false, g.closeConn +} + func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ @@ -781,7 +796,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it any) bool { + checkForStreamQuota := func() bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -793,23 +808,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, t.waitingStreams-- } t.streamQuota-- - h := it.(*headerFrame) - h.streamID = t.nextID - t.nextID += 2 - - // Drain client transport if nextID > MaxStreamID which signals gRPC that - // the connection is closed and a new one must be created for subsequent RPCs. - transportDrainRequired = t.nextID > MaxStreamID - s.id = h.streamID - s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.activeStreams[s.id] = s t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { select { case t.streamsQuotaAvailable <- struct{}{}: @@ -819,13 +835,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it any) bool { + checkForHeaderListSize := func() bool { if t.maxSendHeaderListSize == nil { return true } - hdrFrame := it.(*headerFrame) var sz int64 - for _, f := range hdrFrame.hf { + for _, f := range hdr.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) return false @@ -834,8 +849,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it any) bool { - return checkForHeaderListSize(it) && checkForStreamQuota(it) + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() }, hdr) if err != nil { // Connection closed. @@ -946,7 +961,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(any) bool { + addBackStreamQuota := func() bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -966,7 +981,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be -// accessed any more. +// accessed anymore. func (t *http2Client) Close(err error) { t.mu.Lock() // Make sure we only close once. @@ -991,7 +1006,10 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() - t.controlBuf.finish() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + <-t.writerDone t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1099,7 +1117,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(any) bool { + updateIWS := func() bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1252,7 +1270,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index d582e047..cab0e2d3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -330,8 +330,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) - t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -860,7 +859,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(any) bool { + t.controlBuf.executeAndPut(func() bool { for _, f := range updateFuncs { f() } @@ -1014,12 +1013,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + hf := &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, onWrite: t.setResetPingStrikes, - }) + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) if !success { if err != nil { return err @@ -1208,7 +1208,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) return } if !outstandingPing { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 0d2a6e47..4b39c0ad 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -304,7 +304,7 @@ func (s *Stream) isHeaderSent() bool { } // updateHeaderSent updates headerSent and returns true -// if it was alreay set. It is valid only on server-side. +// if it was already set. It is valid only on server-side. func (s *Stream) updateHeaderSent() bool { return atomic.SwapUint32(&s.headerSent, 1) == 1 } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd..6c01a9b3 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -90,6 +90,21 @@ func Pairs(kv ...string) MD { return md } +// String implements the Stringer interface for pretty-printing a MD. +// Ordering of the values is non-deterministic as it ranges over a map. +func (md MD) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "MD{") + for k, v := range md { + if sb.Len() > 3 { + fmt.Fprintf(&sb, ", ") + } + fmt.Fprintf(&sb, "%s=[%s]", k, strings.Join(v, ", ")) + } + fmt.Fprintf(&sb, "}") + return sb.String() +} + // Len returns the number of items in md. func (md MD) Len() int { return len(md) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index a821ff9b..499a49c8 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -22,7 +22,9 @@ package peer import ( "context" + "fmt" "net" + "strings" "google.golang.org/grpc/credentials" ) @@ -39,6 +41,34 @@ type Peer struct { AuthInfo credentials.AuthInfo } +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + type peerKey struct{} // NewContext creates a new context with peer information attached. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bf56faa7..56e8aba7 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -20,6 +20,7 @@ package grpc import ( "context" + "fmt" "io" "sync" @@ -117,7 +118,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = ctx.Err().Error() + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) } switch ctx.Err() { case context.DeadlineExceeded: diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index e3ea42ba..88536266 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -54,7 +54,7 @@ type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` // If set to true, instructs the LB policy to shuffle the order of the list - // of addresses received from the name resolver before attempting to + // of endpoints received from the name resolver before attempting to // connect to them. ShuffleAddressList bool `json:"shuffleAddressList"` } @@ -94,8 +94,7 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - addrs := state.ResolverState.Addresses - if len(addrs) == 0 { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -107,22 +106,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - // We don't have to guard this block with the env var because ParseConfig // already does so. cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - if cfg.ShuffleAddressList { - addrs = append([]resolver.Address{}, addrs...) - grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) - } if b.logger.V(2) { b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) } + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forwarrd the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a6f26c8a..3edca296 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -63,7 +63,7 @@ LEGACY_SOURCES=( # Generates only the new gRPC Service symbols SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto @@ -93,7 +93,7 @@ Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ -I"." \ -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ @@ -118,6 +118,6 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # grpc_testing_not_regenerate/*.pb.go are not re-generated, # see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index b54a3a32..ef3d6ed6 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -18,9 +18,6 @@ // Package dns implements a dns resolver to be installed as the default resolver // in grpc. -// -// Deprecated: this package is imported by grpc and should not need to be -// imported directly by users. package dns import ( @@ -52,3 +49,12 @@ func SetResolvingTimeout(timeout time.Duration) { func NewBuilder() resolver.Builder { return dns.NewBuilder() } + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 998e251d..fdd49e6e 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -964,7 +964,7 @@ func setCallInfoCodec(c *callInfo) error { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 7. +// support package version is 9. // // Older versions are kept for compatibility. // @@ -976,6 +976,7 @@ const ( SupportPackageIsVersion6 = true SupportPackageIsVersion7 = true SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index fd4558da..89f8e479 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -527,12 +527,22 @@ func ConnectionTimeout(d time.Duration) ServerOption { }) } +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size // of header list that the server is prepared to accept. func MaxHeaderListSize(s uint32) ServerOption { - return newFuncServerOption(func(o *serverOptions) { - o.maxHeaderListSize = &s - }) + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } } // HeaderTableSize returns a ServerOption that sets the size of dynamic diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2b35c5d2..9da8fc80 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -172,7 +172,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -219,7 +219,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -239,13 +239,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 4ab70e2d..fdb0bd65 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -73,9 +73,12 @@ func (*PickerUpdated) isRPCStats() {} type InPayload struct { // Client is true if this InPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any @@ -143,9 +146,12 @@ func (s *InTrailer) isRPCStats() {} type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool - // Payload is the payload with original type. + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. Payload any // Data is the serialized message payload. + // Deprecated: Data will be removed in the next release. Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d939ffc6..b54563e8 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -516,6 +516,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s + a.ctx = s.Context() a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 00000000..8b813529 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + Recv() (*Res, error) + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingServer[Res any] interface { + Send(*Res) error + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + Send(*Req) error + CloseAndRecv() (*Res, error) + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + SendAndClose(*Res) error + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + Send(*Req) error + Recv() (*Res, error) + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingServer[Req any, Res any] interface { + Recv() (*Req, error) + Send(*Res) error + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 2556f758..e1806e76 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.63.2" +const Version = "1.64.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100644 index 7e6b92e4..00000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -# not makes sure the command passed to it does not exit with a return code of 0. -not() { - # This is required instead of the earlier (! $COMMAND) because subshells and - # pipefail don't work the same on Darwin as in Linux. - ! "$@" -} - -die() { - echo "$@" >&2 - exit 1 -} - -fail_on_output() { - tee /dev/stderr | not read -} - -# Check to make sure it's safe to modify the user's git repo. -git status --porcelain | fail_on_output - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" -go version - -if [[ "$1" = "-install" ]]; then - # Install the pinned versions as defined in module tools. - pushd ./test/tools - go install \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell - popd - if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=25.2 # a.k.a. v4.22.0 in pb.go files. - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/runner/go - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif not which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - make proto && git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - -if [[ -n "${VET_ONLY_PROTO}" ]]; then - exit 0 -fi - -# - Ensure all source files contain a copyright message. -# (Done in two parts because Darwin "git grep" has broken support for compound -# exclusion matches.) -(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output - -# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -not grep 'func Test[^(]' *_test.go -not grep 'func Test[^(]' test/*.go - -# - Check for typos in test function names -git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' -git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' - -# - Do not import x/net/context. -not git grep -l 'x/net/context' -- "*.go" - -# - Do not use time.After except in tests. It has the potential to leak the -# timer since there is no way to stop it early. -git grep -l 'time.After(' -- "*.go" | not grep -v '_test.go\|test_utils\|testutils' - -# - Do not import math/rand for real library code. Use internal/grpcrand for -# thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' - -# - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' - -# - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' - -# - Ensure all ptypes proto packages are renamed when importing. -not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" - -# - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" - -# - Ensure all xds proto imports are renamed to *pb or *grpc. -git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' - -misspell -error . - -# - gofmt, goimports, go vet, go mod tidy. -# Perform these checks on each module inside gRPC. -for MOD_FILE in $(find . -name 'go.mod'); do - MOD_DIR=$(dirname ${MOD_FILE}) - pushd ${MOD_DIR} - go vet -all ./... | fail_on_output - gofmt -s -d -l . 2>&1 | fail_on_output - goimports -l . 2>&1 | not grep -vE "\.pb\.go" - - go mod tidy -compat=1.19 - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) - popd -done - -# - Collection of static analysis checks -SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true - -# Error for anything other than checks that need exclusions. -grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" - -# Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' - -# Error for duplicate imports not including grpc protos. -grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -channelz/grpc_channelz_v1" -go-control-plane/envoy -grpclb/grpc_lb_v1" -health/grpc_health_v1" -interop/grpc_testing" -orca/v3" -proto/grpc_gcp" -proto/grpc_lookup_v1" -reflection/grpc_reflection_v1" -reflection/grpc_reflection_v1alpha" -XXXXX PleaseIgnoreUnused' - -# Error for any package comments not in generated code. -grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" - -# Only ignore the following deprecated types/fields/functions and exclude -# generated code. -grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused -XXXXX Protobuf related deprecation errors: -"github.com/golang/protobuf -.pb.go: -grpc_testing_not_regenerate -: ptypes. -proto.RegisterType -XXXXX gRPC internal usage deprecation errors: -"google.golang.org/grpc -: grpc. -: v1alpha. -: v1alphareflectionpb. -BalancerAttributes is deprecated: -CredsBundle is deprecated: -Metadata is deprecated: use Attributes instead. -NewSubConn is deprecated: -OverrideServerName is deprecated: -RemoveSubConn is deprecated: -SecurityVersion is deprecated: -Target is deprecated: Use the Target field in the BuildOptions instead. -UpdateAddresses is deprecated: -UpdateSubConnState is deprecated: -balancer.ErrTransientFailure is deprecated: -grpc/reflection/v1alpha/reflection.proto -SwitchTo is deprecated: -XXXXX xDS deprecated fields we support -.ExactMatch -.PrefixMatch -.SafeRegexMatch -.SuffixMatch -GetContainsMatch -GetExactMatch -GetMatchSubjectAltNames -GetPrefixMatch -GetSafeRegexMatch -GetSuffixMatch -GetTlsCertificateCertificateProviderInstance -GetValidationContextCertificateProviderInstance -XXXXX PleaseIgnoreUnused' - -echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 3f75098b..29846df2 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -25,15 +25,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in JSON format using default options. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -110,8 +112,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -122,8 +125,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal marshals the given [proto.Message] in the JSON format using options in -// MarshalOptions. Do not depend on the output being stable. It may change over -// time across different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 95967e81..1f57e661 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -27,15 +27,17 @@ const defaultIndent = " " // Format formats the message as a multiline string. // This function is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } // Marshal writes the given [proto.Message] in textproto format using default -// options. Do not depend on the output being stable. It may change over time -// across different versions of the program. +// options. Do not depend on the output being stable. Its output will change +// across different builds of your program, even when using the same version of +// the protobuf module. func Marshal(m proto.Message) ([]byte, error) { return MarshalOptions{}.Marshal(m) } @@ -84,8 +86,9 @@ type MarshalOptions struct { // Format formats the message as a string. // This method is only intended for human consumption and ignores errors. -// Do not depend on the output being stable. It may change over time across -// different versions of the program. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. func (o MarshalOptions) Format(m proto.Message) string { if m == nil || !m.ProtoReflect().IsValid() { return "" // invalid syntax, but okay since this is for debugging @@ -98,8 +101,9 @@ func (o MarshalOptions) Format(m proto.Message) string { } // Marshal writes the given [proto.Message] in textproto format using options in -// MarshalOptions object. Do not depend on the output being stable. It may -// change over time across different versions of the program. +// MarshalOptions object. Do not depend on the output being stable. Its output +// will change across different builds of your program, even when using the +// same version of the protobuf module. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return o.marshal(nil, m) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index a45625c8..87e46bd4 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -252,6 +252,7 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record fu {rv.MethodByName("Values"), "Values"}, {rv.MethodByName("ReservedNames"), "ReservedNames"}, {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("IsClosed"), "IsClosed"}, }...) case protoreflect.EnumValueDescriptor: diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 18f07568..ff6a3836 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 373d2083..7e87c760 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -32,6 +32,7 @@ var byteType = reflect.TypeOf(byte(0)) func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { f := new(filedesc.Field) f.L0.ParentFile = filedesc.SurrogateProto2 + f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -107,8 +108,7 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri f.L1.StringName.InitJSON(jsonName) } case s == "packed": - f.L1.HasPacked = true - f.L1.IsPacked = true + f.L1.EditionFeatures.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index 20c17b35..d9671982 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -87,3 +87,18 @@ func InvalidUTF8(name string) error { func RequiredNotSet(name string) error { return New("required field %v not set", name) } + +type SizeMismatchError struct { + Calculated, Measured int +} + +func (e *SizeMismatchError) Error() string { + return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured) +} + +func MismatchedSizeCalculation(calculated, measured int) error { + return &SizeMismatchError{ + Calculated: calculated, + Measured: measured, + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 8826bcf4..ece53bea 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -7,6 +7,7 @@ package filedesc import ( "bytes" "fmt" + "strings" "sync" "sync/atomic" @@ -108,9 +109,12 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } func (fd *File) Parent() protoreflect.Descriptor { return nil } func (fd *File) Index() int { return 0 } func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } -func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } + +// Not exported and just used to reconstruct the original FileDescriptor proto +func (fd *File) Edition() int32 { return int32(fd.L1.Edition) } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() @@ -202,6 +206,9 @@ func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } +func (ed *Enum) IsClosed() bool { + return !ed.L1.EditionFeatures.IsOpenEnum +} func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { @@ -251,10 +258,6 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -331,8 +334,7 @@ func (fd *Field) HasPresence() bool { if fd.L1.Cardinality == protoreflect.Repeated { return false } - explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence - return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil } func (fd *Field) HasOptionalKeyword() bool { return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional @@ -345,14 +347,7 @@ func (fd *Field) IsPacked() bool { case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: return false } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsPacked - } - if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 { - // proto3 repeated fields are packed by default. - return !fd.L1.HasPacked || fd.L1.IsPacked - } - return fd.L1.IsPacked + return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } @@ -399,13 +394,7 @@ func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // WARNING: This method is exempt from the compatibility promise and may be // removed in the future without warning. func (fd *Field) EnforceUTF8() bool { - if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { - return fd.L1.EditionFeatures.IsUTF8Validated - } - if fd.L1.HasEnforceUTF8 { - return fd.L1.EnforceUTF8 - } - return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 + return fd.L1.EditionFeatures.IsUTF8Validated } func (od *Oneof) IsSynthetic() bool { @@ -438,7 +427,6 @@ type ( Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor @@ -461,7 +449,16 @@ func (xd *Extension) HasPresence() bool { return xd.L1.Cardi func (xd *Extension) HasOptionalKeyword() bool { return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional } -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsPacked() bool { + if xd.L1.Cardinality != protoreflect.Repeated { + return false + } + switch xd.L1.Kind { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return xd.L1.EditionFeatures.IsPacked +} func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } @@ -542,8 +539,9 @@ func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} + SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}} ) type ( @@ -585,6 +583,34 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } +// Returns true if this field is structured like the synthetic field of a proto2 +// group. This allows us to expand our treatment of delimited fields without +// breaking proto2 files that have been upgraded to editions. +func isGroupLike(fd protoreflect.FieldDescriptor) bool { + // Groups are always group types. + if fd.Kind() != protoreflect.GroupKind { + return false + } + + // Group fields are always the lowercase type name. + if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) { + return false + } + + // Groups could only be defined in the same file they're used. + if fd.Message().ParentFile() != fd.ParentFile() { + return false + } + + // Group messages are always defined in the same scope as the field. File + // level extensions will compare NULL == NULL here, which is why the file + // comparison above is necessary to ensure both come from the same file. + if fd.IsExtension() { + return fd.Parent() == fd.Message().Parent() + } + return fd.ContainingMessage() == fd.Message().Parent() +} + func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { @@ -605,7 +631,7 @@ func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == protoreflect.GroupKind { + if isGroupLike(fd) { s.nameText = string(fd.Message().Name()) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 237e64fd..3bc3b1cd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -113,8 +113,10 @@ func (fd *File) unmarshalSeed(b []byte) { switch string(v) { case "proto2": fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 case "proto3": fd.L1.Syntax = protoreflect.Proto3 + fd.L1.Edition = EditionProto3 case "editions": fd.L1.Syntax = protoreflect.Editions default: @@ -177,11 +179,10 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { fd.L1.Syntax = protoreflect.Proto2 + fd.L1.Edition = EditionProto2 } - if fd.L1.Syntax == protoreflect.Editions { - fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) - } + fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition) // Parse editions features from options if any if options != nil { @@ -267,6 +268,7 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i + ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent()) var numValues int for b := b; len(b) > 0; { @@ -443,6 +445,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i + xd.L1.EditionFeatures = featuresFromParentDesc(pd) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -467,6 +470,38 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd prot xd.L0.FullName = appendFullName(sb, pd.FullName(), v) case genid.FieldDescriptorProto_Extendee_field_number: xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { + xd.L1.Kind = protoreflect.GroupKind + } +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldOptions_Features_field_number: + xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) } default: m := protowire.ConsumeFieldValue(num, typ, b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 482a61cc..570181eb 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -466,10 +466,10 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref b = b[m:] } } - if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { + if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded { fd.L1.Kind = protoreflect.GroupKind } - if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired { + if fd.L1.EditionFeatures.IsLegacyRequired { fd.L1.Cardinality = protoreflect.Required } if rawTypeName != nil { @@ -496,13 +496,11 @@ func (fd *Field) unmarshalOptions(b []byte) { b = b[m:] switch num { case genid.FieldOptions_Packed_field_number: - fd.L1.HasPacked = true - fd.L1.IsPacked = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: - fd.L1.HasEnforceUTF8 = true - fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -548,7 +546,6 @@ func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoref func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { var rawTypeName []byte var rawOptions []byte - xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee) xd.L2 = new(ExtensionL2) for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) @@ -572,7 +569,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: - xd.unmarshalOptions(v) rawOptions = appendOptions(rawOptions, v) } default: @@ -580,12 +576,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { b = b[m:] } } - if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded { - xd.L1.Kind = protoreflect.GroupKind - } - if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired { - xd.L1.Cardinality = protoreflect.Required - } if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { @@ -598,32 +588,6 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) } -func (xd *Extension) unmarshalOptions(b []byte) { - for len(b) > 0 { - num, typ, n := protowire.ConsumeTag(b) - b = b[n:] - switch typ { - case protowire.VarintType: - v, m := protowire.ConsumeVarint(b) - b = b[m:] - switch num { - case genid.FieldOptions_Packed_field_number: - xd.L2.IsPacked = protowire.DecodeBool(v) - } - case protowire.BytesType: - v, m := protowire.ConsumeBytes(b) - b = b[m:] - switch num { - case genid.FieldOptions_Features_field_number: - xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures) - } - default: - m := protowire.ConsumeFieldValue(num, typ, b) - b = b[m:] - } - } -} - func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { var rawMethods [][]byte var rawOptions []byte diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go index 30db19fd..f4107c05 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -8,6 +8,7 @@ package filedesc import ( "fmt" + "strings" "sync" "google.golang.org/protobuf/internal/descfmt" @@ -198,6 +199,16 @@ func (p *Fields) lazyInit() *Fields { if _, ok := p.byText[d.TextName()]; !ok { p.byText[d.TextName()] = d } + if isGroupLike(d) { + lowerJSONName := strings.ToLower(d.JSONName()) + if _, ok := p.byJSON[lowerJSONName]; !ok { + p.byJSON[lowerJSONName] = d + } + lowerTextName := strings.ToLower(d.TextName()) + if _, ok := p.byText[lowerTextName]; !ok { + p.byText[lowerTextName] = d + } + } if _, ok := p.byNum[d.Number()]; !ok { p.byNum[d.Number()] = d } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 0375a49d..11f5f356 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -14,9 +14,13 @@ import ( ) var defaultsCache = make(map[Edition]EditionFeatures) +var defaultsKeys = []Edition{} func init() { unmarshalEditionDefaults(editiondefaults.Defaults) + SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2) + SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3) + SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023) } func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { @@ -104,12 +108,15 @@ func unmarshalEditionDefault(b []byte) { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number: + case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number: + fs = unmarshalFeatureSet(v, fs) + case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number: fs = unmarshalFeatureSet(v, fs) } } } defaultsCache[ed] = fs + defaultsKeys = append(defaultsKeys, ed) } func unmarshalEditionDefaults(b []byte) { @@ -135,8 +142,15 @@ func unmarshalEditionDefaults(b []byte) { } func getFeaturesFor(ed Edition) EditionFeatures { - if def, ok := defaultsCache[ed]; ok { - return def + match := EditionUnknown + for _, key := range defaultsKeys { + if key > ed { + break + } + match = key + } + if match == EditionUnknown { + panic(fmt.Sprintf("unsupported edition: %v", ed)) } - panic(fmt.Sprintf("unsupported edition: %v", ed)) + return defaultsCache[match] } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index 28240ebc..bfb3b841 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -63,6 +63,7 @@ func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return des func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) IsClosed() bool { return false } func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 40272c89..1447a119 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -21,6 +21,7 @@ const ( // Enum values for google.protobuf.Edition. const ( Edition_EDITION_UNKNOWN_enum_value = 0 + Edition_EDITION_LEGACY_enum_value = 900 Edition_EDITION_PROTO2_enum_value = 998 Edition_EDITION_PROTO3_enum_value = 999 Edition_EDITION_2023_enum_value = 1000 @@ -653,6 +654,7 @@ const ( FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" FieldOptions_Features_field_name protoreflect.Name = "features" + FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -667,6 +669,7 @@ const ( FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" + FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -684,6 +687,7 @@ const ( FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 FieldOptions_Features_field_number protoreflect.FieldNumber = 21 + FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -767,6 +771,33 @@ const ( FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport" + FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport" +) + +// Field names for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed" + + FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced" + FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated" + FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning" + FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed" +) + +// Field numbers for google.protobuf.FieldOptions.FeatureSupport. +const ( + FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1 + FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2 + FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3 + FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -1110,17 +1141,20 @@ const ( // Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features" - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features" + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features" ) // Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. const ( - FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 - FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5 ) // Names for google.protobuf.SourceCodeInfo. diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index fd9015e8..9a652a2b 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -10,7 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" ) -const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto" +const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" // Names for google.protobuf.GoFeatures. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 3fadd241..78ee47e4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -233,9 +233,15 @@ func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { } func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + calculatedSize := f.mi.sizePointer(p.Elem(), opts) b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) - return f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -262,14 +268,21 @@ func isInitMessageInfo(p pointer, f *coderFieldInfo) error { return f.mi.checkInitializedPointer(p.Elem()) } -func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { - return protowire.SizeBytes(proto.Size(m)) + tagsize +func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int { + return protowire.SizeBytes(opts.Options().Size(m)) + tagsize } func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() + calculatedSize := mopts.Size(m) b = protowire.AppendVarint(b, wiretag) - b = protowire.AppendVarint(b, uint64(proto.Size(m))) - return opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := mopts.MarshalAppend(b, m) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err } func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { @@ -405,8 +418,8 @@ func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInf return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) } -func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { - return 2*tagsize + proto.Size(m) +func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int { + return 2*tagsize + opts.Options().Size(m) } func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { @@ -482,10 +495,14 @@ func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshal b = protowire.AppendVarint(b, f.wiretag) siz := f.mi.sizePointer(v, opts) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) b, err = f.mi.marshalAppendPointer(b, v, opts) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -520,28 +537,34 @@ func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { return nil } -func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + mopts := opts.Options() s := p.PointerSlice() var err error for _, v := range s { m := asMessage(v.AsValueOf(goType.Elem())) b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) - b, err = opts.Options().MarshalAppend(b, m) + before := len(b) + b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -582,11 +605,12 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += protowire.SizeBytes(proto.Size(m)) + tagsize + n += protowire.SizeBytes(mopts.Size(m)) + tagsize } return n } @@ -597,13 +621,17 @@ func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() b = protowire.AppendVarint(b, wiretag) - siz := proto.Size(m) + siz := mopts.Size(m) b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) var err error b, err = mopts.MarshalAppend(b, m) if err != nil { return b, err } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } } return b, nil } @@ -651,11 +679,12 @@ var coderMessageSliceValue = valueCoderFuncs{ } func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { + mopts := opts.Options() list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } @@ -738,12 +767,13 @@ func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) } } -func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int { + mopts := opts.Options() s := p.PointerSlice() n := 0 for _, v := range s { m := asMessage(v.AsValueOf(messageType.Elem())) - n += 2*tagsize + proto.Size(m) + n += 2*tagsize + mopts.Size(m) } return n } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index 111b9d16..fb35f0ba 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -9,6 +9,7 @@ import ( "sort" "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" ) @@ -240,11 +241,16 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) size += mapi.valFuncs.size(val, mapValTagSize, opts) b = protowire.AppendVarint(b, uint64(size)) + before := len(b) b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) if err != nil { return nil, err } - return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + if measuredSize := len(b) - before; size != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(size, measuredSize) + } + return b, err } else { key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() val := pointerOfValue(valrv) @@ -259,7 +265,12 @@ func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coder } b = protowire.AppendVarint(b, mapi.valWiretag) b = protowire.AppendVarint(b, uint64(valSize)) - return f.mi.marshalAppendPointer(b, val, opts) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, val, opts) + if measuredSize := len(b) - before; valSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(valSize, measuredSize) + } + return b, err } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c2a803bb..c1c33d00 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -167,6 +167,7 @@ func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) // TODO: Use the presence of a UnmarshalJSON method to determine proto2? diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 87b30d05..6e8677ee 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -118,7 +118,7 @@ func (xi *ExtensionInfo) initFromLegacy() { xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind - xd.L2.IsPacked = fd.L1.IsPacked + xd.L1.EditionFeatures = fd.L1.EditionFeatures xd.L2.Default = fd.L1.Default xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) xd.L2.Enum = ed diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go index 9ab09108..b649f112 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -7,7 +7,7 @@ package impl import ( "bytes" "compress/gzip" - "io/ioutil" + "io" "sync" "google.golang.org/protobuf/internal/filedesc" @@ -51,7 +51,7 @@ func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { if err != nil { panic(err) } - b2, err := ioutil.ReadAll(zr) + b2, err := io.ReadAll(zr) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 2ab2c629..950e9a1f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -204,6 +204,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } } + md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type methods := make([]reflect.Method, 0, 2) @@ -250,6 +251,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName od := &md.L2.Oneofs.List[n] od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile + od.L1.EditionFeatures = md.L1.EditionFeatures od.L0.Parent = md od.L0.Index = n @@ -260,6 +262,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName aberrantAppendField(md, f.Type, tag, "", "") fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] fd.L1.ContainingOneof = od + fd.L1.EditionFeatures = od.L1.EditionFeatures od.L1.Fields.List = append(od.L1.Fields.List, fd) } } @@ -307,14 +310,14 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.HasPacked { + if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) } - if fd.L1.HasPacked { - opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + if fd.L1.EditionFeatures.IsPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } return opts.Interface() } @@ -344,6 +347,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n + md2.L1.EditionFeatures = md.L1.EditionFeatures md2.L1.IsMapEntry = true md2.L2.Options = func() protoreflect.ProtoMessage { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index d9ea010b..a6f0dbda 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -247,11 +247,10 @@ func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } } } -func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if m == nil { return false } - xd := xt.TypeDescriptor() x, ok := (*m)[int32(xd.Number())] if !ok { return false @@ -261,25 +260,22 @@ func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 - case xd.Message() != nil: - return x.Value().Message().IsValid() } return true } -func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { - delete(*m, int32(xt.TypeDescriptor().Number())) +func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) { + delete(*m, int32(xd.Number())) } -func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } } - return xt.Zero() + return xd.Type().Zero() } -func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { - xd := xt.TypeDescriptor() +func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) { + xt := xd.Type() isValid := true switch { case !xt.IsValidValue(v): @@ -292,7 +288,7 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) isValid = v.Message().IsValid() } if !isValid { - panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName())) } if *m == nil { @@ -302,16 +298,15 @@ func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { - xd := xt.TypeDescriptor() +func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value { if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { return x.Value() } - v := xt.New() - m.Set(xt, v) + v := xd.Type().New() + m.Set(xd, v) return v } @@ -428,7 +423,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -457,7 +452,7 @@ func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } - return nil, xtd.Type() + return nil, xtd } panic(fmt.Sprintf("field %v is invalid", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 741d6e5b..29ba6bd3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -27,8 +27,9 @@ func (m *messageState) protoUnwrap() interface{} { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -41,8 +42,9 @@ func (m *messageState) ProtoMessageInfo() *MessageInfo { } func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -52,77 +54,86 @@ func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.V } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageState) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageState) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageState) IsValid() bool { return !m.pointer().IsNil() @@ -147,8 +158,9 @@ func (m *messageReflectWrapper) protoUnwrap() interface{} { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { - m.messageInfo().init() - return &m.messageInfo().methods + mi := m.messageInfo() + mi.init() + return &mi.methods } // ProtoMessageInfo is a pseudo-internal API for allowing the v1 code @@ -161,8 +173,9 @@ func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { } func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { - m.messageInfo().init() - for _, ri := range m.messageInfo().rangeInfos { + mi := m.messageInfo() + mi.init() + for _, ri := range mi.rangeInfos { switch ri := ri.(type) { case *fieldInfo: if ri.has(m.pointer()) { @@ -172,77 +185,86 @@ func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, proto } case *oneofInfo: if n := ri.which(m.pointer()); n > 0 { - fi := m.messageInfo().fields[n] + fi := mi.fields[n] if !f(fi.fieldDesc, fi.get(m.pointer())) { return } } } } - m.messageInfo().extensionMap(m.pointer()).Range(f) + mi.extensionMap(m.pointer()).Range(f) } func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.has(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Has(xt) + return mi.extensionMap(m.pointer()).Has(xd) } } func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.clear(m.pointer()) } else { - m.messageInfo().extensionMap(m.pointer()).Clear(xt) + mi.extensionMap(m.pointer()).Clear(xd) } } func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.get(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Get(xt) + return mi.extensionMap(m.pointer()).Get(xd) } } func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { fi.set(m.pointer(), v) } else { - m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + mi.extensionMap(m.pointer()).Set(xd, v) } } func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.mutable(m.pointer()) } else { - return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + return mi.extensionMap(m.pointer()).Mutable(xd) } } func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { - m.messageInfo().init() - if fi, xt := m.messageInfo().checkField(fd); fi != nil { + mi := m.messageInfo() + mi.init() + if fi, xd := mi.checkField(fd); fi != nil { return fi.newField() } else { - return xt.New() + return xd.Type().New() } } func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { - m.messageInfo().init() - if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + mi := m.messageInfo() + mi.init() + if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { return od.Fields().ByNumber(oi.which(m.pointer())) } panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) } func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { - m.messageInfo().init() - return m.messageInfo().getUnknown(m.pointer()) + mi := m.messageInfo() + mi.init() + return mi.getUnknown(m.pointer()) } func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { - m.messageInfo().init() - m.messageInfo().setUnknown(m.pointer(), b) + mi := m.messageInfo() + mi.init() + mi.setUnknown(m.pointer(), b) } func (m *messageReflectWrapper) IsValid() bool { return !m.pointer().IsNil() diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a50fcfb4..a3cba508 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 33 - Patch = 0 + Minor = 34 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e5b03b56..d75a6534 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -51,6 +51,8 @@ type UnmarshalOptions struct { // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the [UnmarshalOptions] type if you need more control. func Unmarshal(b []byte, m Message) error { _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 4fed202f..1f847bcc 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -5,12 +5,17 @@ package proto import ( + "errors" + "fmt" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" + + protoerrors "google.golang.org/protobuf/internal/errors" ) // MarshalOptions configures the marshaler. @@ -70,7 +75,32 @@ type MarshalOptions struct { UseCachedSize bool } +// flags turns the specified MarshalOptions (user-facing) into +// protoiface.MarshalInputFlags (used internally by the marshaler). +// +// See impl.marshalOptions.Options for the inverse operation. +func (o MarshalOptions) flags() protoiface.MarshalInputFlags { + var flags protoiface.MarshalInputFlags + + // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal, + // which is why it is not a part of MarshalInputFlags. + + if o.Deterministic { + flags |= protoiface.MarshalDeterministic + } + + if o.UseCachedSize { + flags |= protoiface.MarshalUseCachedSize + } + + return flags +} + // Marshal returns the wire-format encoding of m. +// +// This is the most common entry point for encoding a Protobuf message. +// +// See the [MarshalOptions] type if you need more control. func Marshal(m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to output. if m == nil { @@ -116,6 +146,9 @@ func emptyBytesForMessage(m Message) []byte { // MarshalAppend appends the wire-format encoding of m to b, // returning the result. +// +// This is a less common entry point than [Marshal], which is only needed if you +// need to supply your own buffers for performance reasons. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // Treat nil message interface as an empty message; nothing to append. if m == nil { @@ -145,12 +178,7 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac in := protoiface.MarshalInput{ Message: m, Buf: b, - } - if o.Deterministic { - in.Flags |= protoiface.MarshalDeterministic - } - if o.UseCachedSize { - in.Flags |= protoiface.MarshalUseCachedSize + Flags: o.flags(), } if methods.Size != nil { sout := methods.Size(protoiface.SizeInput{ @@ -168,6 +196,10 @@ func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoifac out.Buf, err = o.marshalMessageSlow(b, m) } if err != nil { + var mismatch *protoerrors.SizeMismatchError + if errors.As(err, &mismatch) { + return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err) + } return out, err } if allowPartial { diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 17899a3a..c9c8721a 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -11,18 +11,21 @@ import ( // HasExtension reports whether an extension field is populated. // It returns false if m is invalid or if xt does not extend m. func HasExtension(m Message, xt protoreflect.ExtensionType) bool { - // Treat nil message interface as an empty message; no populated fields. - if m == nil { + // Treat nil message interface or descriptor as an empty message; no populated + // fields. + if m == nil || xt == nil { return false } // As a special-case, we reports invalid or mismatching descriptors // as always not being populated (since they aren't). - if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + mr := m.ProtoReflect() + xd := xt.TypeDescriptor() + if mr.Descriptor() != xd.ContainingMessage() { return false } - return m.ProtoReflect().Has(xt.TypeDescriptor()) + return mr.Has(xd) } // ClearExtension clears an extension field such that subsequent diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go index 312d5d45..575d1483 100644 --- a/vendor/google.golang.org/protobuf/proto/messageset.go +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -47,11 +47,16 @@ func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]b func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { b = messageset.AppendFieldStart(b, fd.Number()) b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) - b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + calculatedSize := o.Size(value.Message().Interface()) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) b, err := o.marshalMessage(b, value.Message()) if err != nil { return b, err } + if measuredSize := len(b) - before; calculatedSize != measuredSize { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } b = messageset.AppendFieldEnd(b) return b, nil } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index f1692b49..052fb5ae 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -34,6 +34,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { if methods != nil && methods.Size != nil { out := methods.Size(protoiface.SizeInput{ Message: m, + Flags: o.flags(), }) return out.Size } @@ -42,6 +43,7 @@ func (o MarshalOptions) size(m protoreflect.Message) (size int) { // This case is mainly used for legacy types with a Marshal method. out, _ := methods.Marshal(protoiface.MarshalInput{ Message: m, + Flags: o.flags(), }) return len(out.Buf) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 00b01fbd..c85bfaa5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -161,7 +161,7 @@ const ( // IsValid reports whether the syntax is valid. func (s Syntax) IsValid() bool { switch s { - case Proto2, Proto3: + case Proto2, Proto3, Editions: return true default: return false diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 7dcc2ff0..00102d31 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -373,6 +373,8 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) case 21: b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 22: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -519,6 +521,23 @@ func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { return b } +func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "edition_introduced", nil) + case 2: + b = p.appendSingularField(b, "edition_deprecated", nil) + case 3: + b = p.appendSingularField(b, "deprecation_warning", nil) + case 4: + b = p.appendSingularField(b, "edition_removed", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 60ff62b4..5b80afe5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -544,6 +544,12 @@ type EnumDescriptor interface { // ReservedRanges is a list of reserved ranges of enum numbers. ReservedRanges() EnumRanges + // IsClosed reports whether this enum uses closed semantics. + // See https://protobuf.dev/programming-guides/enum/#definitions. + // Note: the Go protobuf implementation is not spec compliant and treats + // all enums as open enums. + IsClosed() bool + isEnumDescriptor } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } diff --git a/vendor/modules.txt b/vendor/modules.txt index d38f6b6b..632b6be1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -75,7 +75,7 @@ github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/internal/endpoints github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types -# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 +# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.3 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/dynamodb github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations @@ -87,7 +87,7 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding # github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.9 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 +# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.8 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery # github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 @@ -179,8 +179,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 -## explicit; go 1.19 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 +## explicit; go 1.20 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -199,7 +199,7 @@ github.com/lithammer/dedent # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure -# github.com/prometheus/client_golang v1.19.0 +# github.com/prometheus/client_golang v1.19.1 ## explicit; go 1.20 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -207,37 +207,40 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.52.3 +# github.com/prometheus/common v0.53.0 ## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.13.0 -## explicit; go 1.19 +# github.com/prometheus/procfs v0.15.0 +## explicit; go 1.21 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util # github.com/sethvargo/go-envconfig v1.0.3 ## explicit; go 1.20 github.com/sethvargo/go-envconfig -# go.opentelemetry.io/contrib/detectors/aws/lambda v0.51.0 +# go.opentelemetry.io/contrib/bridges/prometheus v0.52.0 +## explicit; go 1.21 +go.opentelemetry.io/contrib/bridges/prometheus +# go.opentelemetry.io/contrib/detectors/aws/lambda v0.52.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/detectors/aws/lambda -# go.opentelemetry.io/contrib/exporters/autoexport v0.51.0 +# go.opentelemetry.io/contrib/exporters/autoexport v0.52.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 +# go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/contrib/propagators/b3 v1.26.0 +# go.opentelemetry.io/contrib/propagators/b3 v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/otel v1.26.0 +# go.opentelemetry.io/otel v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -250,8 +253,8 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 -go.opentelemetry.io/otel/semconv/v1.24.0 -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.26.0 +go.opentelemetry.io/otel/semconv/v1.25.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal @@ -259,7 +262,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal @@ -267,39 +270,39 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.48.0 +# go.opentelemetry.io/otel/exporters/prometheus v0.49.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.26.0 +# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.26.0 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/exporters/stdout/stdouttrace -# go.opentelemetry.io/otel/metric v1.26.0 +# go.opentelemetry.io/otel/metric v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.26.0 +# go.opentelemetry.io/otel/sdk v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -308,7 +311,7 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/metric v1.26.0 +# go.opentelemetry.io/otel/sdk/metric v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/internal @@ -316,7 +319,7 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.26.0 +# go.opentelemetry.io/otel/trace v1.27.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -329,7 +332,7 @@ go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/metrics/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/net v0.24.0 +# golang.org/x/net v0.25.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -345,20 +348,20 @@ golang.org/x/sync/errgroup golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.15.0 ## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/genproto/googleapis/api v0.0.0-20240415151819-79826c84ba32 -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 +## explicit; go 1.20 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240415151819-79826c84ba32 -## explicit; go 1.19 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 +## explicit; go 1.20 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.63.2 +# google.golang.org/grpc v1.64.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -413,7 +416,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.33.0 +# google.golang.org/protobuf v1.34.1 ## explicit; go 1.17 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson